repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ALaDyn/tools-piccante | py-tools/time-analysis.py | 1 | 10427 | #!/usr/bin/python
### loading shell commands
import os, os.path, glob, sys, shutil, time
from scipy.fftpack import fftn
#from PyQt4.QtGui import *
import struct
#from scipy import *
import numpy as np
import math
# from matplotlib import *
# from pylab import *
# import matplotlib as plt
# from matplotlib.ticker import MultipleLocator, FormatStrFormatter
### --- ###
class field_analysis:
def __init__(self,tmin, tmax,substeps, base, end):
self.base = base
self.end = end
self.tmin = int(tmin)
self.tmax = int(tmax)
self.substeps = int(substeps)
if 1000%substeps:
print "ERROR: wrong number of timesteps!"
sys.exit()
self.Nt = substeps*(self.tmax-self.tmin)
self.filenames = list()
self.t = np.zeros((self.Nt))
self.chose_file()
self.init_values()
self.set_frequency()
self.basename = ("%s%03d"% (self.base,self.tmin))
def analize_field(self, filename):
f = open(filename ,'rb')
#- endianness 0:small - 1:big -#
endianness = struct.unpack('i', f.read(4))[0]
#- global grid dim -#
Nx = struct.unpack('i', f.read(4))[0]
Ny = struct.unpack('i', f.read(4))[0]
Nz = struct.unpack('i', f.read(4))[0]
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
Ntot = Nx*Ny*Nz
#- processor grid -#
Npx = struct.unpack('i', f.read(4))[0]
Npy = struct.unpack('i', f.read(4))[0]
Npz = struct.unpack('i', f.read(4))[0]
Nproc = Npx*Npy*Npz
#- field components -#
Nc = struct.unpack('i', f.read(4))[0]
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
self.Nc = Nc
#- grid -> X -#
#x = np.zeros((Nx))
#for i in range(0,Nx):
x = struct.unpack('f'*Nx, f.read(4*Nx))
#- grid -> Y -#
y = struct.unpack('f'*Ny, f.read(4*Ny))
self.x = x
#- grid -> Z -#
z = struct.unpack('f'*Nz, f.read(4*Nz))
self.x = x
self.y = y
self.z = z
#- loop on processors -#
F = np.zeros((Nz,Ny,Nx,Nc))
counter = 0
prog = 0.0
for nprocessor in range(0,Nproc):
#-processor dims -#
i0 = struct.unpack('i', f.read(4))[0]
j0 = struct.unpack('i', f.read(4))[0]
k0 = struct.unpack('i', f.read(4))[0]
li0 = struct.unpack('i', f.read(4))[0]
lj0 = struct.unpack('i', f.read(4))[0]
lk0 = struct.unpack('i', f.read(4))[0]
#print '>>> ',i0,j0,k0,li0,lj0,lk0
NN=li0*lj0*lk0*Nc
array=np.array(struct.unpack('f'*NN, f.read(4*NN))).reshape(lk0,lj0,li0,Nc)
for k in range(0,lk0):
for j in range(0,lj0):
for i in range(0,li0):
for c in range(0,Nc):
F[k+k0,j+j0,i+i0,c] = array[k,j,i,c]
counter += li0
prog = counter*(100.0/Ntot)
#np.savetxt( nameOutFile ,F[0,:,:,component],fmt='%15.14e')
f.close()
#print "done"
return F
def collect_file(self):
name = "E_FIELD_000.000.bin.000"
for i in os.listdir(os.getcwd()):
if i.endswith(".bin.000") and i.startswith("E_FIELD_"):
#print i
self.Nt +=1
continue
else:
continue
def chose_file(self):
base = self.base
end = self.end
tindex = 0
for i in range (self.tmin,self.tmax):
for v in range (0,self.substeps):
t = i + v*1.0/self.substeps
self.t[tindex] = t
name = base + ("%03d.%03d" % (i,v*1000/self.substeps) ) + end
self.filenames.append(name)
#print name
tindex += 1
self.Nt = len(self.filenames)
def collect_data(self):
self.alldata = np.zeros((self.Nt,self.Nz,self.Ny,self.Nx,self.Nc))
for i in range(0,self.Nt):
#print self.filenames[i]
F = self.analize_field(self.filenames[i])
self.alldata[i,:,:,:,:] = F[:,:,:,:]
def init_values(self):
self.analize_field(self.filenames[0])
if(self.Nx>1):
self.dx = self.x[1] - self.x[0]
else:
self.dx = 0
if(self.Ny>1):
self.dy = self.y[1] - self.y[0]
else:
self.dy = 0
if(self.Nz>1):
self.dz = self.z[1] - self.z[0]
else:
self.dz = 0
if(self.Nt>1):
self.dt = self.t[1] - self.t[0]
else:
self.dt = 0
self.kx = np.empty_like(self.x)
self.ky = np.empty_like(self.y)
self.kz = np.empty_like(self.z)
self.kt = np.empty_like(self.t)
self.Lx = self.dx*self.Nx
self.Ly = self.dy*self.Ny
self.Lz = self.dz*self.Nz
self.Lt = self.dt*self.Nt
self.print_parameters()
def print_parameters(self):
print ("SIZE: [ Lx, Ly, Lz ] = [ %f, %f, %f ]" %(self.Lx,self.Ly,self.Lz))
print (" Np : [ Nx, Ny, Nz ] = [ %d, %d, %d ]" %(self.Nx,self.Ny,self.Nz))
print ("time span = [%f:%f] Nt = %d" %(self.tmin, self.tmax,self.Nt))
def set_frequency(self):
if(self.Lx>0):
self.dkx = 1/self.Lx
else:
self.dkx = 0
for i in range(0,self.Nx):
self.kx[i] = i*self.dkx
if(self.Ly>0):
self.dky = 1/self.Ly
else:
self.dky = 0
for i in range(0,self.Ny):
self.ky[i] = i*self.dky
if(self.Lz>0):
self.dkz = 1/self.Lz
else:
self.dkz = 0
for i in range(0,self.Nz):
self.kz[i] = i*self.dkz
if(self.Lt>0):
self.dkt = 1/self.Lt
else:
self.dkt = 0
for i in range(0,self.Nt):
self.kt[i] = i*self.dkt
self.Lkx = self.dkx*(self.Nx/2)
self.Lky = self.dky*(self.Ny/2)
self.Lkz = self.dkz*(self.Nz/2)
self.Lkt = self.dkt*(self.Nt/2)
self.print_frequency()
def print_frequency(self):
print ("SIZE: [ Lkx, Lky, Lkz ] = [ %f, %f, %f ]" %(self.Lkx,self.Lky,self.Lkz))
print (" dk : [ dkx, dky, dkz ] = [ %f, %f, %f ]" %(self.dkx,self.dky,self.dkz))
print ("max_freq = %f domega = %f" %(self.Lkt,self.dkt))
def do_fft(self,zposition,comp):
self.trasf = np.zeros((self.Nt,self.Ny,self.Nx))
self.trasf = np.fft.fftn(self.alldata[:,zposition,:,:,comp])
def select_omega(self,freq):
ifreq = int(freq/self.dkt)
selected=ifreq*self.dkt
print ("selected ifreq = %d, freq=%4.3f" %(ifreq,selected))
name = ("%s-kx-ky-omega%4.3f.txt"% (self.basename,selected))
f1=open(name, 'w')
for j in range(0,self.Ny/2):
for i in range(0,self.Nx/2):
f1.write("%e %e %e\n" % (self.kx[i], self.ky[j], np.real(self.trasf[ifreq,i,j])))
#np.savetxt( "kx-omega.txt" ,np.real(self.trasf[:,:,0]),fmt='%15.14e')
f1.close()
def kx_ky_oneTime(self,time,zposition,comp):
itime = int((time-self.tmin)/self.dt)
if itime>=self.Nt:
itime = self.Nt-1
selected=itime*self.dt + self.tmin
print ("selected itime = %d time = %4.3f" %(itime,selected))
name = ("%s-kx-ky-time%3.3f.txt"% (self.basename,selected))
onetimetrasf = np.zeros((self.Ny,self.Nx))
onetimetrasf = np.fft.fftn(self.alldata[itime,zposition,:,:,comp])
f1=open(name, 'w')
for j in range(-self.Ny/2,self.Ny/2):
ky = j*self.dky
for i in range(-self.Nx/2,self.Nx/2):
kx = i*self.dkx
f1.write("%e %e %e\n" % (kx, ky, np.real(onetimetrasf[j,i])))
f1.close()
def oneTime(self,time,zposition,comp):
itime = int((time-self.tmin)/self.dt)
if itime>=self.Nt:
itime = self.Nt-1
selected=itime*self.dt + self.tmin
print ("selected itime = %d time = %4.3f" %(itime,selected))
name = ("%s-time%3.3f.txt"% (self.basename,selected))
f1=open(name, 'w')
for j in range(-self.Ny/2,self.Ny/2):
y = j*self.dky
for i in range(-self.Nx/2,self.Nx/2):
kx = i*self.dkx
f1.write("%e %e %e\n" % (x[i], y[j], self.alldata[itime,zposition,j,i,comp] ))
f1.close()
def avg_y(self):
name = ("%s-kx-omega.txt"% (self.basename,))
f1=open(name, 'w')
for j in range(-self.Nt/2,self.Nt/2):
kt = j*self.dkt
for i in range(-self.Nx/2,self.Nx/2):
kx = i*self.dkx
f1.write("%e %e %e\n" % (kx, kt, np.real(np.sum(self.trasf[j,i,:]))))
#np.savetxt( "kx-omega.txt" ,np.real(self.trasf[:,:,0]),fmt='%15.14e')
f1.close()
def run():
print 'Number of arguments:', len(sys.argv), 'arguments.'
print 'Argument List:', str(sys.argv)
path = os.getcwd()
#f = open(os.path.join(path,'E_FIELD_000.000.bin.000'),'rb')
component = 0
basetime = 200
deltaT = 10
sub = 5
endtime = basetime + deltaT
myname = "E_FIELD_"
#myname = "DENS_eleBulk_"
myanalysis = field_analysis(basetime, endtime,substeps=sub, base=myname, end=".bin.000")
myanalysis.collect_data()
myanalysis.do_fft(zposition=0,comp=component)
myanalysis.select_omega(freq=1.01)
myanalysis.select_omega(freq=2.01)
myanalysis.oneTime(basetime,zposition=0,comp=component)
myanalysis.avg_y()
myanalysis.kx_ky_oneTime(basetime,zposition=0,comp=component)
run()
def old():
if len(sys.argv)<2:
sys.exit('Usage: %s inputFile outputFile' % sys.argv[0])
elif len(sys.argv)<3:
outFile = 'default.dat'
else:
outFile = str(sys.argv[2])
| gpl-3.0 |
herilalaina/scikit-learn | sklearn/neighbors/tests/test_kde.py | 31 | 5537 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def check_results(kernel, bandwidth, atol, rtol, X, Y, dens_true):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol,
X, Y, dens_true)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
# FIXME
# rng = np.random.RandomState(0)
# X = rng.random_sample((n_samples, n_features))
# Y = rng.random_sample((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
zhengwsh/InplusTrader_Linux | InplusTrader/dataViewer/monitor_tick_main.py | 1 | 3095 | # encoding: UTF-8
import sys,os
from PyQt4 import QtGui,QtCore
from pymongo import MongoClient
import matplotlib.dates as mpd
sys.path.append('..')
from vtConstantMid import *
import datetime as dt
import pytz
from ui.uiCrosshair import Crosshair
"""mid
读取保存在mongodb中的tick数据并图形化,以方便观察某个阶段的tick细节
"""
class TickMonitor(pg.PlotWidget):
#----------------------------------------------------------------------
def __init__(self,host,port,dbName,symbolName,startDatetimeStr,endDatetimeStr):
super(TickMonitor, self).__init__()
self.crosshair = Crosshair(self) #mid 实现crosshair功能
tickMonitor = self.plot(clear=False,pen=(255, 255, 255), name="tickTimeLine")
self.addItem(tickMonitor)
#mid 加载数据
tickDatetimeNums,tickPrices = self.__loadTicksFromMongo(host,port,dbName,symbolName,startDatetimeStr,endDatetimeStr)
#mid 显示数据
tickMonitor.setData(tickDatetimeNums,tickPrices,clear=True,)
#----------------------------------------------------------------------
def __loadTicksFromMongo(self,host,port,dbName,symbolName,startDatetimeStr,endDatetimeStr):
"""mid
加载mongodb数据转换并返回数字格式的时间及价格
"""
mongoConnection = MongoClient( host=host,port=port)
collection = mongoConnection[dbName][symbolName]
startDate = dt.datetime.strptime(startDatetimeStr, '%Y-%m-%d %H:%M:%S')
endDate = dt.datetime.strptime(endDatetimeStr, '%Y-%m-%d %H:%M:%S')
cx = collection.find({'datetime': {'$gte': startDate, '$lte': endDate}})
tickDatetimeNums = []
tickPrices = []
for d in cx:
tickDatetimeNums.append(mpd.date2num(d['datetime']))
tickPrices.append(d['lastPrice'])
return tickDatetimeNums,tickPrices
#----------------------------------------------------------------------
def getTickDatetimeByXPosition(self,xAxis):
"""mid
根据传入的x轴坐标值,返回其所代表的时间
"""
tickDatetimeRet = xAxis
minYearDatetimeNum = mpd.date2num(dt.datetime(1900,1,1))
if(xAxis > minYearDatetimeNum):
tickDatetime = mpd.num2date(xAxis).astimezone(pytz.timezone('utc'))
if(tickDatetime.year >=1900):
tickDatetimeRet = tickDatetime
return tickDatetimeRet
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
#mid 历史tick加载参数
host = '192.168.0.212'
port = 27017
dbName = 'VnTrader_Tick_Db'
symbolName = "EUR.USD.IDEALPRO"
if(True):
startDatetimeStr='2016-11-07 17:40:00'
endDatetimeStr = '2016-11-07 18:25:00'
if(False):
startDatetimeStr='2016-11-07 17:49:00'
endDatetimeStr = '2016-11-07 17:55:00'
main = TickMonitor(host,port,dbName,symbolName,startDatetimeStr,endDatetimeStr)
main.show()
sys.exit(app.exec_())
| mit |
clarkfitzg/dask | dask/dataframe/tests/test_multi.py | 5 | 20818 | import dask.dataframe as dd
import numpy as np
import pandas as pd
from dask.dataframe.multi import (align_partitions, join_indexed_dataframes,
hash_join, concat_indexed_dataframes)
import pandas.util.testing as tm
from dask.async import get_sync
from test_dataframe import eq
def test_align_partitions():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.repartition(A, [10, 40, 60])
B = pd.DataFrame({'x': [1, 2, 3, 4], 'y': list('abda')},
index=[30, 70, 80, 100])
b = dd.repartition(B, [30, 80, 100])
s = dd.core.Scalar({('s', 0): 10}, 's')
(aa, bb), divisions, L = align_partitions(a, b)
def _check(a, b, aa, bb):
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert isinstance(aa, dd.DataFrame)
assert isinstance(bb, dd.DataFrame)
assert eq(a, aa)
assert eq(b, bb)
assert divisions == (10, 30, 40, 60, 80, 100)
assert isinstance(L, list)
assert len(divisions) == 1 + len(L)
_check(a, b, aa, bb)
assert L == [[(aa._name, 0), (bb._name, 0)],
[(aa._name, 1), (bb._name, 1)],
[(aa._name, 2), (bb._name, 2)],
[(aa._name, 3), (bb._name, 3)],
[(aa._name, 4), (bb._name, 4)]]
(aa, ss, bb), divisions, L = align_partitions(a, s, b)
_check(a, b, aa, bb)
assert L == [[(aa._name, 0), None, (bb._name, 0)],
[(aa._name, 1), None, (bb._name, 1)],
[(aa._name, 2), None, (bb._name, 2)],
[(aa._name, 3), None, (bb._name, 3)],
[(aa._name, 4), None, (bb._name, 4)]]
assert eq(ss, 10)
ldf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
rdf = pd.DataFrame({'c': [1, 2, 3, 4, 5, 6, 7],
'd': [7, 6, 5, 4, 3, 2, 1]})
for lhs, rhs in [(dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),
(dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),
(dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),
(dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2))]:
(lresult, rresult), div, parts = dd.multi.align_partitions(lhs, rhs)
assert eq(lresult, ldf)
assert eq(rresult, rdf)
# different index
ldf = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
rdf = pd.DataFrame({'c': [1, 2, 3, 4, 5, 6, 7],
'd': [7, 6, 5, 4, 3, 2, 1]},
index=list('fghijkl'))
for lhs, rhs in [(dd.from_pandas(ldf, 1), dd.from_pandas(rdf, 1)),
(dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 2)),
(dd.from_pandas(ldf, 2), dd.from_pandas(rdf, 3)),
(dd.from_pandas(ldf, 3), dd.from_pandas(rdf, 2))]:
(lresult, rresult), div, parts = dd.multi.align_partitions(lhs, rhs)
assert eq(lresult, ldf)
assert eq(rresult, rdf)
def test_join_indexed_dataframe_to_indexed_dataframe():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6]},
index=[1, 2, 3, 4, 6, 7])
a = dd.repartition(A, [1, 4, 7])
B = pd.DataFrame({'y': list('abcdef')},
index=[1, 2, 4, 5, 6, 8])
b = dd.repartition(B, [1, 2, 5, 8])
c = join_indexed_dataframes(a, b, how='left')
assert c.divisions[0] == a.divisions[0]
assert c.divisions[-1] == max(a.divisions + b.divisions)
tm.assert_frame_equal(c.compute(), A.join(B))
c = join_indexed_dataframes(a, b, how='right')
assert c.divisions[0] == b.divisions[0]
assert c.divisions[-1] == b.divisions[-1]
tm.assert_frame_equal(c.compute(), A.join(B, how='right'))
c = join_indexed_dataframes(a, b, how='inner')
assert c.divisions[0] == 1
assert c.divisions[-1] == max(a.divisions + b.divisions)
tm.assert_frame_equal(c.compute(), A.join(B, how='inner'))
c = join_indexed_dataframes(a, b, how='outer')
assert c.divisions[0] == 1
assert c.divisions[-1] == 8
tm.assert_frame_equal(c.compute(), A.join(B, how='outer'))
assert sorted(join_indexed_dataframes(a, b, how='inner').dask) == \
sorted(join_indexed_dataframes(a, b, how='inner').dask)
assert sorted(join_indexed_dataframes(a, b, how='inner').dask) != \
sorted(join_indexed_dataframes(a, b, how='outer').dask)
def list_eq(a, b):
if isinstance(a, dd.DataFrame):
a = a.compute(get=get_sync)
if isinstance(b, dd.DataFrame):
b = b.compute(get=get_sync)
assert list(a.columns) == list(b.columns)
# ToDo: As of pandas 0,17, tm.assert_numpy_array_equal can
# compare arrays include NaN. This logic can be replaced
for c in a.columns:
if a[c].dtype in (int, float):
a[c] = a[c].fillna(100)
b[c] = b[c].fillna(100)
else:
a[c] = a[c].fillna('NAN')
b[c] = b[c].fillna('NAN')
av = sorted(a.values.tolist())
bv = sorted(b.values.tolist())
assert av == bv, (av, bv)
def test_hash_join():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
B = pd.DataFrame({'y': [1, 3, 4, 4, 5, 6], 'z': [6, 5, 4, 3, 2, 1]})
b = dd.repartition(B, [0, 2, 5])
for how in ['inner', 'left', 'right', 'outer']:
c = hash_join(a, 'y', b, 'y', how)
result = c.compute()
expected = pd.merge(A, B, how, 'y')
assert list(result.columns) == list(expected.columns)
assert sorted(result.fillna(100).values.tolist()) == \
sorted(expected.fillna(100).values.tolist())
# Different columns and npartitions
c = hash_join(a, 'x', b, 'z', 'outer', npartitions=3)
assert c.npartitions == 3
result = c.compute()
expected = pd.merge(A, B, 'outer', None, 'x', 'z')
assert list(result.columns) == list(expected.columns)
assert sorted(result.fillna(100).values.tolist()) == \
sorted(expected.fillna(100).values.tolist())
assert hash_join(a, 'y', b, 'y', 'inner')._name == \
hash_join(a, 'y', b, 'y', 'inner')._name
assert hash_join(a, 'y', b, 'y', 'inner')._name != \
hash_join(a, 'y', b, 'y', 'outer')._name
def test_indexed_concat():
A = pd.DataFrame({'x': [1, 2, 3, 4, 6, 7], 'y': list('abcdef')},
index=[1, 2, 3, 4, 6, 7])
a = dd.repartition(A, [1, 4, 7])
B = pd.DataFrame({'x': [10, 20, 40, 50, 60, 80]},
index=[1, 2, 4, 5, 6, 8])
b = dd.repartition(B, [1, 2, 5, 8])
for how in ['inner', 'outer']:
c = concat_indexed_dataframes([a, b], join=how)
result = c.compute()
expected = pd.concat([A, B], 0, how)
assert list(result.columns) == list(expected.columns)
assert sorted(zip(result.values.tolist(), result.index.values.tolist())) == \
sorted(zip(expected.values.tolist(), expected.index.values.tolist()))
assert sorted(concat_indexed_dataframes([a, b], join='inner').dask) == \
sorted(concat_indexed_dataframes([a, b], join='inner').dask)
assert sorted(concat_indexed_dataframes([a, b], join='inner').dask) != \
sorted(concat_indexed_dataframes([a, b], join='outer').dask)
def test_merge():
A = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
B = pd.DataFrame({'y': [1, 3, 4, 4, 5, 6], 'z': [6, 5, 4, 3, 2, 1]})
b = dd.repartition(B, [0, 2, 5])
eq(dd.merge(a, b, left_index=True, right_index=True),
pd.merge(A, B, left_index=True, right_index=True))
for how in ['inner', 'outer', 'left', 'right']:
result = dd.merge(a, b, on='y', how=how)
list_eq(result, pd.merge(A, B, on='y', how=how))
assert all(d is None for d in result.divisions)
list_eq(dd.merge(a, b, left_on='x', right_on='z', how=how),
pd.merge(A, B, left_on='x', right_on='z', how=how))
list_eq(dd.merge(a, b, left_on='x', right_on='z', how=how,
suffixes=('1', '2')),
pd.merge(A, B, left_on='x', right_on='z', how=how,
suffixes=('1', '2')))
list_eq(dd.merge(a, b, how=how), pd.merge(A, B, how=how))
list_eq(dd.merge(a, B, how=how), pd.merge(A, B, how=how))
list_eq(dd.merge(A, b, how=how), pd.merge(A, B, how=how))
list_eq(dd.merge(A, B, how=how), pd.merge(A, B, how=how))
list_eq(dd.merge(a, b, left_index=True, right_index=True, how=how),
pd.merge(A, B, left_index=True, right_index=True, how=how))
list_eq(dd.merge(a, b, left_index=True, right_index=True, how=how,
suffixes=('1', '2')),
pd.merge(A, B, left_index=True, right_index=True, how=how,
suffixes=('1', '2')))
list_eq(dd.merge(a, b, left_on='x', right_index=True, how=how),
pd.merge(A, B, left_on='x', right_index=True, how=how))
list_eq(dd.merge(a, b, left_on='x', right_index=True, how=how,
suffixes=('1', '2')),
pd.merge(A, B, left_on='x', right_index=True, how=how,
suffixes=('1', '2')))
# pandas result looks buggy
# list_eq(dd.merge(a, B, left_index=True, right_on='y'),
# pd.merge(A, B, left_index=True, right_on='y'))
def test_merge_by_index_patterns():
pdf1l = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
pdf1r = pd.DataFrame({'c': [1, 2, 3, 4, 5, 6, 7],
'd': [7, 6, 5, 4, 3, 2, 1]})
pdf2l = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf2r = pd.DataFrame({'c': [7, 6, 5, 4, 3, 2, 1],
'd': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf3l = pdf2l
pdf3r = pd.DataFrame({'c': [6, 7, 8, 9],
'd': [5, 4, 3, 2]},
index=list('abdg'))
pdf4l = pdf2l
pdf4r = pd.DataFrame({'c': [9, 10, 11, 12],
'd': [5, 4, 3, 2]},
index=list('abdg'))
# completely different index
pdf5l = pd.DataFrame({'a': [1, 1, 2, 2, 3, 3, 4],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('lmnopqr'))
pdf5r = pd.DataFrame({'c': [1, 1, 1, 1],
'd': [5, 4, 3, 2]},
index=list('abcd'))
pdf6l = pd.DataFrame({'a': [1, 1, 2, 2, 3, 3, 4],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('cdefghi'))
pdf6r = pd.DataFrame({'c': [1, 2, 1, 2],
'd': [5, 4, 3, 2]},
index=list('abcd'))
pdf7l = pd.DataFrame({'a': [1, 1, 2, 2, 3, 3, 4],
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf7r = pd.DataFrame({'c': [5, 6, 7, 8],
'd': [5, 4, 3, 2]},
index=list('fghi'))
for pdl, pdr in [(pdf1l, pdf1r), (pdf2l, pdf2r), (pdf3l, pdf3r),
(pdf4l, pdf4r), (pdf5l, pdf5r), (pdf6l, pdf6r),
(pdf7l, pdf7r)]:
for lpart, rpart in [(2, 2), # same partition
(3, 2), # left npartition > right npartition
(2, 3)]: # left npartition < right npartition
ddl = dd.from_pandas(pdl, lpart)
ddr = dd.from_pandas(pdr, rpart)
for how in ['inner', 'outer', 'left', 'right']:
eq(dd.merge(ddl, ddr, how=how, left_index=True, right_index=True),
pd.merge(pdl, pdr, how=how, left_index=True, right_index=True))
eq(dd.merge(ddr, ddl, how=how, left_index=True, right_index=True),
pd.merge(pdr, pdl, how=how, left_index=True, right_index=True))
eq(ddr.merge(ddl, how=how, left_index=True, right_index=True),
pdr.merge(pdl, how=how, left_index=True, right_index=True))
eq(ddl.merge(ddr, how=how, left_index=True, right_index=True),
pdl.merge(pdr, how=how, left_index=True, right_index=True))
# hash join
list_eq(dd.merge(ddl, ddr, how=how, left_on='a', right_on='c'),
pd.merge(pdl, pdr, how=how, left_on='a', right_on='c'))
list_eq(dd.merge(ddl, ddr, how=how, left_on='b', right_on='d'),
pd.merge(pdl, pdr, how=how, left_on='b', right_on='d'))
list_eq(dd.merge(ddr, ddl, how=how, left_on='c', right_on='a'),
pd.merge(pdr, pdl, how=how, left_on='c', right_on='a'))
list_eq(dd.merge(ddr, ddl, how=how, left_on='d', right_on='b'),
pd.merge(pdr, pdl, how=how, left_on='d', right_on='b'))
list_eq(ddl.merge(ddr, how=how, left_on='a', right_on='c'),
pdl.merge(pdr, how=how, left_on='a', right_on='c'))
list_eq(ddl.merge(ddr, how=how, left_on='b', right_on='d'),
pdl.merge(pdr, how=how, left_on='b', right_on='d'))
list_eq(ddr.merge(ddl, how=how, left_on='c', right_on='a'),
pdr.merge(pdl, how=how, left_on='c', right_on='a'))
list_eq(ddr.merge(ddl, how=how, left_on='d', right_on='b'),
pdr.merge(pdl, how=how, left_on='d', right_on='b'))
def test_join_by_index_patterns():
# Similar test cases as test_merge_by_index_patterns,
# but columns / index for join have same dtype
pdf1l = pd.DataFrame({'a': list('abcdefg'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf1r = pd.DataFrame({'c': list('abcdefg'),
'd': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf2l = pdf1l
pdf2r = pd.DataFrame({'c': list('gfedcba'),
'd': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf3l = pdf1l
pdf3r = pd.DataFrame({'c': list('abdg'),
'd': [5, 4, 3, 2]},
index=list('abdg'))
pdf4l = pd.DataFrame({'a': list('abcabce'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf4r = pd.DataFrame({'c': list('abda'),
'd': [5, 4, 3, 2]},
index=list('abdg'))
# completely different index
pdf5l = pd.DataFrame({'a': list('lmnopqr'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('lmnopqr'))
pdf5r = pd.DataFrame({'c': list('abcd'),
'd': [5, 4, 3, 2]},
index=list('abcd'))
pdf6l = pd.DataFrame({'a': list('cdefghi'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('cdefghi'))
pdf6r = pd.DataFrame({'c': list('abab'),
'd': [5, 4, 3, 2]},
index=list('abcd'))
pdf7l = pd.DataFrame({'a': list('aabbccd'),
'b': [7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefg'))
pdf7r = pd.DataFrame({'c': list('aabb'),
'd': [5, 4, 3, 2]},
index=list('fghi'))
for pdl, pdr in [(pdf1l, pdf1r), (pdf2l, pdf2r), (pdf3l, pdf3r),
(pdf4l, pdf4r), (pdf5l, pdf5r), (pdf6l, pdf6r),
(pdf7l, pdf7r)]:
for lpart, rpart in [(2, 2), (3, 2), (2, 3)]:
ddl = dd.from_pandas(pdl, lpart)
ddr = dd.from_pandas(pdr, rpart)
for how in ['inner', 'outer', 'left', 'right']:
eq(ddl.join(ddr, how=how), pdl.join(pdr, how=how))
eq(ddr.join(ddl, how=how), pdr.join(pdl, how=how))
eq(ddl.join(ddr, how=how, lsuffix='l', rsuffix='r'),
pdl.join(pdr, how=how, lsuffix='l', rsuffix='r'))
eq(ddr.join(ddl, how=how, lsuffix='l', rsuffix='r'),
pdr.join(pdl, how=how, lsuffix='l', rsuffix='r'))
"""
# temporary disabled bacause pandas may incorrectly raise
# IndexError for empty DataFrame
# https://github.com/pydata/pandas/pull/10826
list_eq(ddl.join(ddr, how=how, on='a', lsuffix='l', rsuffix='r'),
pdl.join(pdr, how=how, on='a', lsuffix='l', rsuffix='r'))
list_eq(ddr.join(ddl, how=how, on='c', lsuffix='l', rsuffix='r'),
pdr.join(pdl, how=how, on='c', lsuffix='l', rsuffix='r'))
# merge with index and columns
list_eq(ddl.merge(ddr, how=how, left_on='a', right_index=True),
pdl.merge(pdr, how=how, left_on='a', right_index=True))
list_eq(ddr.merge(ddl, how=how, left_on='c', right_index=True),
pdr.merge(pdl, how=how, left_on='c', right_index=True))
list_eq(ddl.merge(ddr, how=how, left_index=True, right_on='c'),
pdl.merge(pdr, how=how, left_index=True, right_on='c'))
list_eq(ddr.merge(ddl, how=how, left_index=True, right_on='a'),
pdr.merge(pdl, how=how, left_index=True, right_on='a'))
"""
def test_merge_by_multiple_columns():
pdf1l = pd.DataFrame({'a': list('abcdefghij'),
'b': list('abcdefghij'),
'c': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},
index=list('abcdefghij'))
pdf1r = pd.DataFrame({'d': list('abcdefghij'),
'e': list('abcdefghij'),
'f': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]},
index=list('abcdefghij'))
pdf2l = pd.DataFrame({'a': list('abcdeabcde'),
'b': list('abcabcabca'),
'c': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},
index=list('abcdefghij'))
pdf2r = pd.DataFrame({'d': list('edcbaedcba'),
'e': list('aaabbbcccd'),
'f': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]},
index=list('fghijklmno'))
pdf3l = pd.DataFrame({'a': list('aaaaaaaaaa'),
'b': list('aaaaaaaaaa'),
'c': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]},
index=list('abcdefghij'))
pdf3r = pd.DataFrame({'d': list('aaabbbccaa'),
'e': list('abbbbbbbbb'),
'f': [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]},
index=list('ABCDEFGHIJ'))
for pdl, pdr in [(pdf1l, pdf1r), (pdf2l, pdf2r), (pdf3l, pdf3r)]:
for lpart, rpart in [(2, 2), (3, 2), (2, 3)]:
ddl = dd.from_pandas(pdl, lpart)
ddr = dd.from_pandas(pdr, rpart)
for how in ['inner', 'outer', 'left', 'right']:
eq(ddl.join(ddr, how=how), pdl.join(pdr, how=how))
eq(ddr.join(ddl, how=how), pdr.join(pdl, how=how))
eq(dd.merge(ddl, ddr, how=how, left_index=True, right_index=True),
pd.merge(pdl, pdr, how=how, left_index=True, right_index=True))
eq(dd.merge(ddr, ddl, how=how, left_index=True, right_index=True),
pd.merge(pdr, pdl, how=how, left_index=True, right_index=True))
# hash join
list_eq(dd.merge(ddl, ddr, how=how, left_on='a', right_on='d'),
pd.merge(pdl, pdr, how=how, left_on='a', right_on='d'))
list_eq(dd.merge(ddl, ddr, how=how, left_on='b', right_on='e'),
pd.merge(pdl, pdr, how=how, left_on='b', right_on='e'))
list_eq(dd.merge(ddr, ddl, how=how, left_on='d', right_on='a'),
pd.merge(pdr, pdl, how=how, left_on='d', right_on='a'))
list_eq(dd.merge(ddr, ddl, how=how, left_on='e', right_on='b'),
pd.merge(pdr, pdl, how=how, left_on='e', right_on='b'))
list_eq(dd.merge(ddl, ddr, how=how, left_on=['a', 'b'], right_on=['d', 'e']),
pd.merge(pdl, pdr, how=how, left_on=['a', 'b'], right_on=['d', 'e']))
| bsd-3-clause |
jlegendary/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| gpl-3.0 |
Rossonero/bmlswp | ch08/norm.py | 23 | 2242 | import numpy as np
class NormalizePositive(object):
def __init__(self, axis=0):
self.axis = axis
def fit(self, features, y=None):
# count features that are greater than zero in axis `self.axis`:
if self.axis == 1:
features = features.T
binary = (features > 0)
count = binary.sum(axis=0)
# to avoid division by zero, set zero counts to one:
count[count == 0] = 1.
self.mean = features.sum(axis=0)/count
# Compute variance by average squared difference to the mean, but only
# consider differences where binary is True (i.e., where there was a
# true rating):
diff = (features - self.mean) * binary
diff **= 2
# regularize the estimate of std by adding 0.1
self.std = np.sqrt(0.1 + diff.sum(axis=0)/count)
return self
def transform(self, features):
if self.axis == 1:
features = features.T
binary = (features > 0)
features = features - self.mean
features /= self.std
features *= binary
if self.axis == 1:
features = features.T
return features
def inverse_transform(self, features, copy=True):
if copy:
features = features.copy()
if self.axis == 1:
features = features.T
features *= self.std
features += self.mean
if self.axis == 1:
features = features.T
return features
def fit_transform(self, features):
return self.fit(features).transform(features)
def predict(train):
norm = NormalizePositive()
train = norm.fit_transform(train)
return norm.inverse_transform(train * 0.)
def main(transpose_inputs=False):
from load_ml100k import get_train_test
from sklearn import metrics
train,test = get_train_test(random_state=12)
if transpose_inputs:
train = train.T
test = test.T
predicted = predict(train)
r2 = metrics.r2_score(test[test > 0], predicted[test > 0])
print('R2 score ({} normalization): {:.1%}'.format(
('movie' if transpose_inputs else 'user'),
r2))
if __name__ == '__main__':
main()
main(transpose_inputs=True)
| mit |
frank-tancf/scikit-learn | sklearn/base.py | 18 | 17981 | """Base classes for all estimators."""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import copy
import warnings
import numpy as np
from scipy import sparse
from .externals import six
from .utils.fixes import signature
from .utils.deprecation import deprecated
from .exceptions import ChangedBehaviorWarning as _ChangedBehaviorWarning
@deprecated("ChangedBehaviorWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class ChangedBehaviorWarning(_ChangedBehaviorWarning):
pass
##############################################################################
def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator: estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe: boolean, optional
If safe is false, clone will fall back to a deepcopy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in six.iteritems(new_object_params):
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if isinstance(param1, np.ndarray):
# For most ndarrays, we do not test for complete equality
if not isinstance(param2, type(param1)):
equality_test = False
elif (param1.ndim > 0
and param1.shape[0] > 0
and isinstance(param2, np.ndarray)
and param2.ndim > 0
and param2.shape[0] > 0):
equality_test = (
param1.shape == param2.shape
and param1.dtype == param2.dtype
# We have to use '.flat' for 2D arrays
and param1.flat[0] == param2.flat[0]
and param1.flat[-1] == param2.flat[-1]
)
else:
equality_test = np.all(param1 == param2)
elif sparse.issparse(param1):
# For sparse matrices equality doesn't work
if not sparse.issparse(param2):
equality_test = False
elif param1.size == 0 or param2.size == 0:
equality_test = (
param1.__class__ == param2.__class__
and param1.size == 0
and param2.size == 0
)
else:
equality_test = (
param1.__class__ == param2.__class__
and param1.data[0] == param2.data[0]
and param1.data[-1] == param2.data[-1]
and param1.nnz == param2.nnz
and param1.shape == param2.shape
)
else:
new_obj_val = new_object_params[name]
params_set_val = params_set[name]
# The following construct is required to check equality on special
# singletons such as np.nan that are not equal to them-selves:
equality_test = (new_obj_val == params_set_val or
new_obj_val is params_set_val)
if not equality_test:
raise RuntimeError('Cannot clone object %s, as the constructor '
'does not seem to set parameter %s' %
(estimator, name))
return new_object
###############################################################################
def _pprint(params, offset=0, printer=repr):
"""Pretty print the dictionary 'params'
Parameters
----------
params: dict
The dictionary to pretty print
offset: int
The offset in characters to add at the begin of each line.
printer:
The function to convert entries to strings, typically
the builtin str or repr
"""
# Do a multi-line justified repr:
options = np.get_printoptions()
np.set_printoptions(precision=5, threshold=64, edgeitems=2)
params_list = list()
this_line_length = offset
line_sep = ',\n' + (1 + offset // 2) * ' '
for i, (k, v) in enumerate(sorted(six.iteritems(params))):
if type(v) is float:
# use str for representing floating point numbers
# this way we get consistent representation across
# architectures and versions.
this_repr = '%s=%s' % (k, str(v))
else:
# use repr of the rest
this_repr = '%s=%s' % (k, printer(v))
if len(this_repr) > 500:
this_repr = this_repr[:300] + '...' + this_repr[-100:]
if i > 0:
if (this_line_length + len(this_repr) >= 75 or '\n' in this_repr):
params_list.append(line_sep)
this_line_length = len(line_sep)
else:
params_list.append(', ')
this_line_length += 2
params_list.append(this_repr)
this_line_length += len(this_repr)
np.set_printoptions(**options)
lines = ''.join(params_list)
# Strip trailing space to avoid nightmare in doctests
lines = '\n'.join(l.rstrip(' ') for l in lines.split('\n'))
return lines
###############################################################################
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep: boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
###############################################################################
class ClassifierMixin(object):
"""Mixin class for all classifiers in scikit-learn."""
_estimator_type = "classifier"
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
from .metrics import accuracy_score
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
###############################################################################
class RegressorMixin(object):
"""Mixin class for all regression estimators in scikit-learn."""
_estimator_type = "regressor"
def score(self, X, y, sample_weight=None):
"""Returns the coefficient of determination R^2 of the prediction.
The coefficient R^2 is defined as (1 - u/v), where u is the regression
sum of squares ((y_true - y_pred) ** 2).sum() and v is the residual
sum of squares ((y_true - y_true.mean()) ** 2).sum().
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Parameters
----------
X : array-like, shape = (n_samples, n_features)
Test samples.
y : array-like, shape = (n_samples) or (n_samples, n_outputs)
True values for X.
sample_weight : array-like, shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
R^2 of self.predict(X) wrt. y.
"""
from .metrics import r2_score
return r2_score(y, self.predict(X), sample_weight=sample_weight,
multioutput='variance_weighted')
###############################################################################
class ClusterMixin(object):
"""Mixin class for all cluster estimators in scikit-learn."""
_estimator_type = "clusterer"
def fit_predict(self, X, y=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input data.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
self.fit(X)
return self.labels_
class BiclusterMixin(object):
"""Mixin class for all bicluster estimators in scikit-learn"""
@property
def biclusters_(self):
"""Convenient way to get row and column indicators together.
Returns the ``rows_`` and ``columns_`` members.
"""
return self.rows_, self.columns_
def get_indices(self, i):
"""Row and column indices of the i'th bicluster.
Only works if ``rows_`` and ``columns_`` attributes exist.
Returns
-------
row_ind : np.array, dtype=np.intp
Indices of rows in the dataset that belong to the bicluster.
col_ind : np.array, dtype=np.intp
Indices of columns in the dataset that belong to the bicluster.
"""
rows = self.rows_[i]
columns = self.columns_[i]
return np.nonzero(rows)[0], np.nonzero(columns)[0]
def get_shape(self, i):
"""Shape of the i'th bicluster.
Returns
-------
shape : (int, int)
Number of rows and columns (resp.) in the bicluster.
"""
indices = self.get_indices(i)
return tuple(len(i) for i in indices)
def get_submatrix(self, i, data):
"""Returns the submatrix corresponding to bicluster `i`.
Works with sparse matrices. Only works if ``rows_`` and
``columns_`` attributes exist.
"""
from .utils.validation import check_array
data = check_array(data, accept_sparse='csr')
row_ind, col_ind = self.get_indices(i)
return data[row_ind[:, np.newaxis], col_ind]
###############################################################################
class TransformerMixin(object):
"""Mixin class for all transformers in scikit-learn."""
def fit_transform(self, X, y=None, **fit_params):
"""Fit to data, then transform it.
Fits transformer to X and y with optional parameters fit_params
and returns a transformed version of X.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Training set.
y : numpy array of shape [n_samples]
Target values.
Returns
-------
X_new : numpy array of shape [n_samples, n_features_new]
Transformed array.
"""
# non-optimized default implementation; override when a better
# method is possible for a given clustering algorithm
if y is None:
# fit method of arity 1 (unsupervised transformation)
return self.fit(X, **fit_params).transform(X)
else:
# fit method of arity 2 (supervised transformation)
return self.fit(X, y, **fit_params).transform(X)
###############################################################################
class MetaEstimatorMixin(object):
"""Mixin class for all meta estimators in scikit-learn."""
# this is just a tag for the moment
###############################################################################
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier."""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor."""
return getattr(estimator, "_estimator_type", None) == "regressor"
| bsd-3-clause |
erp12/pyshgp | setup.py | 1 | 1228 | """Pyshgp setup file."""
import os
from setuptools import setup, find_packages
exec(open("pyshgp/__init__.py").read())
def read(fname):
"""Read a file to a string."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="pyshgp",
version=__version__,
description="Push Genetic Programming in Python",
long_description=read('README.md'),
long_description_content_type="text/markdown",
keywords=["push gp", "genetic programming", "pushgp", "gp", "push"],
author="Eddie Pantridge",
author_email="[email protected]",
license="MIT",
url="https://github.com/erp12/pyshgp",
packages=find_packages(
exclude=('examples', 'examples.*', 'tests', 'tests.*', 'docs', 'docs_source')
),
classifiers=[
"Development Status :: 4 - Beta",
'Programming Language :: Python :: 3',
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
install_requires=[
"numpy>=1.12.0",
"scipy>=0.18.0",
"pandas>=0.23.4",
"pyrsistent>=0.16.0",
],
tests_require=[
"pytest"
],
)
| mit |
dsquareindia/scikit-learn | sklearn/utils/tests/test_class_weight.py | 55 | 9891 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Fix exception in error message formatting when missing label is a string
# https://github.com/scikit-learn/scikit-learn/issues/8312
assert_raise_message(ValueError,
'Class label label_not_present not present',
compute_class_weight,
{'label_not_present': 1.}, classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_balanced_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_balanced_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777,
0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "balanced" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
fidelram/deepTools | deeptools/plotEnrichment.py | 1 | 25048 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import numpy as np
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['svg.fonttype'] = 'none'
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import plotly.offline as py
import plotly.graph_objs as go
from deeptools.mapReduce import mapReduce, getUserRegion, blSubtract
from deeptools.getFragmentAndReadSize import get_read_and_fragment_length
from deeptools.utilities import getCommonChrNames, mungeChromosome, getTLen, smartLabels
from deeptools.bamHandler import openBam
from deeptoolsintervals import Enrichment, GTF
from deeptools.countReadsPerBin import CountReadsPerBin as cr
from deeptools import parserCommon
old_settings = np.seterr(all='ignore')
def parse_arguments(args=None):
basic_args = plot_enrichment_args()
# --region, --blackListFileName, -p and -v
parent_parser = parserCommon.getParentArgParse(binSize=False)
# --extend reads and such
read_options = parserCommon.read_options()
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
Tool for calculating and plotting the signal enrichment in either regions in BED
format or feature types (column 3) in GTF format. The underlying datapoints can also be output.
Metrics are plotted as a fraction of total reads. Regions in a BED file are assigned to the 'peak' feature.
detailed help:
plotEnrichment -h
""",
epilog='example usages:\n'
'plotEnrichment -b file1.bam file2.bam --BED peaks.bed -o enrichment.png\n\n'
' \n\n',
parents=[basic_args, parent_parser, read_options])
return parser
def plot_enrichment_args():
parser = argparse.ArgumentParser(add_help=False)
required = parser.add_argument_group('Required arguments')
# define the arguments
required.add_argument('--bamfiles', '-b',
metavar='file1.bam file2.bam',
help='List of indexed bam files separated by spaces.',
nargs='+',
required=True)
required.add_argument('--BED',
help='Limits the enrichment analysis to '
'the regions specified in these BED/GTF files. Enrichment '
'is calculated as the number of reads overlapping each '
'feature type. The feature type is column 3 in a GTF file '
'and "peak" for BED files.',
metavar='FILE1.bed FILE2.bed',
nargs='+',
required=True)
optional = parser.add_argument_group('Optional arguments')
optional.add_argument('--plotFile', '-o',
help='File to save the plot to. The file extension determines the format, '
'so heatmap.pdf will save the heatmap in PDF format. '
'The available formats are: .png, '
'.eps, .pdf and .svg.',
type=parserCommon.writableFile,
metavar='FILE')
optional.add_argument('--attributeKey',
help='Instead of deriving labels from the feature column in a GTF file, '
'use the given attribute key, such as gene_biotype. For BED files or '
'entries without the attribute key, None is used as the label.')
optional.add_argument('--labels', '-l',
metavar='sample1 sample2',
help='User defined labels instead of default labels from '
'file names. '
'Multiple labels have to be separated by spaces, e.g. '
'--labels sample1 sample2 sample3',
nargs='+')
optional.add_argument('--smartLabels',
action='store_true',
help='Instead of manually specifying labels for the input '
'BAM/BED/GTF files, this causes deepTools to use the file name '
'after removing the path and extension. For BED/GTF files, the '
'eventual region name will be overriden if specified inside '
'the file.')
optional.add_argument('--regionLabels',
metavar="region1 region2",
help="For BED files, the label given to its region is "
"the file name, but this can be overridden by providing "
"a custom label. For GTF files this is ignored. Note "
"that if you provide labels, you MUST provide one for each "
"BED/GTF file, even though it will be ignored for GTF files.",
nargs='+')
optional.add_argument('--plotTitle', '-T',
help='Title of the plot, to be printed on top of '
'the generated image. Leave blank for no title. (Default: %(default)s)',
default='')
optional.add_argument('--plotFileFormat',
metavar='FILETYPE',
help='Image format type. If given, this option '
'overrides the image format based on the plotFile '
'ending. The available options are: png, '
'eps, pdf, plotly and svg.',
choices=['png', 'pdf', 'svg', 'eps', 'plotly'])
optional.add_argument('--outRawCounts',
help='Save the counts per region to a tab-delimited file.',
type=parserCommon.writableFile,
metavar='FILE')
optional.add_argument('--perSample',
help='Group the plots by sample, rather than by feature type (the default).',
action='store_true')
optional.add_argument('--variableScales',
help='By default, the y-axes are always 0-100. This allows the axis range to be restricted.',
action='store_true')
optional.add_argument('--plotHeight',
help='Plot height in cm. (Default: %(default)s)',
type=float,
default=20)
optional.add_argument('--plotWidth',
help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',
type=float,
default=20)
optional.add_argument('--colors',
help='List of colors to use '
'for the plotted lines. Color names '
'and html hex strings (e.g., #eeff22) '
'are accepted. The color names should '
'be space separated. For example, '
'--colors red blue green ',
nargs='+')
optional.add_argument('--numPlotsPerRow',
help='Number of plots per row (Default: %(default)s)',
type=int,
default=4)
optional.add_argument('--alpha',
default=0.9,
type=parserCommon.check_float_0_1,
help='The alpha channel (transparency) to use for the bars. '
'The default is 0.9 and values must be between 0 and 1.')
optional.add_argument('--Offset',
help='Uses this offset inside of each read as the signal. This is useful in '
'cases like RiboSeq or GROseq, where the signal is 12, 15 or 0 bases past the '
'start of the read. This can be paired with the --filterRNAstrand option. '
'Note that negative values indicate offsets from the end of each read. A value '
'of 1 indicates the first base of the alignment (taking alignment orientation '
'into account). Likewise, a value of -1 is the last base of the alignment. An '
'offset of 0 is not permitted. If two values are specified, then they will be '
'used to specify a range of positions. Note that specifying something like '
'--Offset 5 -1 will result in the 5th through last position being used, which '
'is equivalent to trimming 4 bases from the 5-prime end of alignments.',
metavar='INT',
type=int,
nargs='+',
required=False)
bed12 = parser.add_argument_group('BED12 arguments')
bed12.add_argument('--keepExons',
help="For BED12 files, use each exon as a region, rather than columns 2/3",
action="store_true")
return parser
def getBAMBlocks(read, defaultFragmentLength, centerRead, offset=None):
"""
This is basically get_fragment_from_read from countReadsPerBin
"""
blocks = None
maxPairedFragmentLength = 0
if defaultFragmentLength != "read length":
maxPairedFragmentLength = 4 * defaultFragmentLength
if defaultFragmentLength == 'read length':
blocks = read.get_blocks()
else:
if cr.is_proper_pair(read, maxPairedFragmentLength):
if read.is_reverse:
fragmentStart = read.next_reference_start
fragmentEnd = read.reference_end
else:
fragmentStart = read.reference_start
# the end of the fragment is defined as
# the start of the forward read plus the insert length
fragmentEnd = read.reference_start + abs(read.template_length)
# Extend using the default fragment length
else:
if read.is_reverse:
fragmentStart = read.reference_end - defaultFragmentLength
fragmentEnd = read.reference_end
else:
fragmentStart = read.reference_start
fragmentEnd = read.reference_start + defaultFragmentLength
if centerRead:
fragmentCenter = fragmentEnd - (fragmentEnd - fragmentStart) / 2
fragmentStart = fragmentCenter - read.infer_query_length(always=False) / 2
fragmentEnd = fragmentStart + read.infer_query_length(always=False)
assert fragmentStart < fragmentEnd, "fragment start greater than fragment" \
"end for read {}".format(read.query_name)
blocks = [(int(fragmentStart), int(fragmentEnd))]
# Handle read offsets, if needed
if offset is not None:
rv = [(None, None)]
if len(offset) > 1:
if offset[0] > 0:
offset[0] -= 1
if offset[1] < 0:
offset[1] += 1
else:
if offset[0] > 0:
offset[0] -= 1
offset = [offset[0], offset[0] + 1]
else:
offset = [offset[0], None]
if offset[1] == 0:
# -1 gets switched to 0, which screws things up
offset = (offset[0], None)
stretch = []
# For the sake of simplicity, convert [(10, 20), (30, 40)] to [10, 11, 12, 13, ..., 40]
# Then subset accordingly
for block in blocks:
stretch.extend(range(block[0], block[1]))
if read.is_reverse:
stretch = stretch[::-1]
try:
foo = stretch[offset[0]:offset[1]]
except:
return rv
if len(foo) == 0:
return rv
if read.is_reverse:
foo = foo[::-1]
# Convert the stretch back to a list of tuples
foo = np.array(foo)
d = foo[1:] - foo[:-1]
idx = np.argwhere(d > 1).flatten().tolist() # This now holds the interval bounds as a list
idx.append(-1)
last = 0
blocks = []
for i in idx:
blocks.append((foo[last].astype("int"), foo[i].astype("int") + 1))
last = i + 1
return blocks
def getEnrichment_worker(arglist):
"""
This is the worker function of plotEnrichment.
In short, given a region, iterate over all reads **starting** in it.
Filter/extend them as requested and check each for an overlap with
findOverlaps. For each overlap, increment the counter for that feature.
"""
chrom, start, end, args, defaultFragmentLength = arglist
if args.verbose:
sys.stderr.write("Processing {}:{}-{}\n".format(chrom, start, end))
olist = []
total = [0] * len(args.bamfiles)
for idx, f in enumerate(args.bamfiles):
odict = dict()
for x in gtf.features:
odict[x] = 0
fh = openBam(f)
chrom = mungeChromosome(chrom, fh.references)
lpos = None
prev_pos = set()
for read in fh.fetch(chrom, start, end):
# Filter
if read.pos < start:
# Ensure that a given alignment is processed only once
continue
if read.flag & 4:
continue
if args.minMappingQuality and read.mapq < args.minMappingQuality:
continue
if args.samFlagInclude and read.flag & args.samFlagInclude != args.samFlagInclude:
continue
if args.samFlagExclude and read.flag & args.samFlagExclude != 0:
continue
tLen = getTLen(read)
if args.minFragmentLength > 0 and tLen < args.minFragmentLength:
continue
if args.maxFragmentLength > 0 and tLen > args.maxFragmentLength:
continue
if args.ignoreDuplicates:
# Assuming more or less concordant reads, use the fragment bounds, otherwise the start positions
if tLen >= 0:
s = read.pos
e = s + tLen
else:
s = read.pnext
e = s - tLen
if read.reference_id != read.next_reference_id:
e = read.pnext
if lpos is not None and lpos == read.reference_start \
and (s, e, read.next_reference_id, read.is_reverse) in prev_pos:
continue
if lpos != read.reference_start:
prev_pos.clear()
lpos = read.reference_start
prev_pos.add((s, e, read.next_reference_id, read.is_reverse))
total[idx] += 1
# Get blocks, possibly extending
features = gtf.findOverlaps(chrom, getBAMBlocks(read, defaultFragmentLength, args.centerReads, args.Offset))
if features is not None and len(features) > 0:
for x in features:
odict[x] += 1
olist.append(odict)
return olist, gtf.features, total
def plotEnrichment(args, featureCounts, totalCounts, features):
# get the number of rows and columns
if args.perSample:
totalPlots = len(args.bamfiles)
barsPerPlot = len(features)
else:
totalPlots = len(features)
barsPerPlot = len(args.bamfiles)
cols = min(args.numPlotsPerRow, totalPlots)
rows = np.ceil(totalPlots / float(args.numPlotsPerRow)).astype(int)
# Handle the colors
if not args.colors:
cmap_plot = plt.get_cmap('jet')
args.colors = cmap_plot(np.arange(barsPerPlot, dtype=float) / float(barsPerPlot))
if args.plotFileFormat == 'plotly':
args.colors = range(barsPerPlot)
elif len(args.colors) < barsPerPlot:
sys.exit("Error: {0} colors were requested, but {1} were needed!".format(len(args.colors), barsPerPlot))
data = []
if args.plotFileFormat == 'plotly':
fig = go.Figure()
fig['layout'].update(title=args.plotTitle)
domainWidth = .9 / cols
domainHeight = .9 / rows
bufferHeight = 0.0
if rows > 1:
bufferHeight = 0.1 / (rows - 1)
bufferWidth = 0.0
if cols > 1:
bufferWidth = 0.1 / (cols - 1)
else:
grids = gridspec.GridSpec(rows, cols)
plt.rcParams['font.size'] = 10.0
# convert cm values to inches
fig = plt.figure(figsize=(args.plotWidth / 2.54, args.plotHeight / 2.54))
fig.suptitle(args.plotTitle, y=(1 - (0.06 / args.plotHeight)))
for i in range(totalPlots):
col = i % cols
row = np.floor(i / float(args.numPlotsPerRow)).astype(int)
if args.perSample:
xlabels = features
ylabel = "% alignments in {0}".format(args.labels[i])
vals = [featureCounts[i][foo] for foo in features]
vals = 100 * np.array(vals, dtype='float64') / totalCounts[i]
else:
xlabels = args.labels
ylabel = "% {0}".format(features[i])
vals = [foo[features[i]] for foo in featureCounts]
vals = 100 * np.array(vals, dtype='float64') / np.array(totalCounts, dtype='float64')
if args.plotFileFormat == 'plotly':
xanchor = 'x{}'.format(i + 1)
yanchor = 'y{}'.format(i + 1)
base = row * (domainHeight + bufferHeight)
domain = [base, base + domainHeight]
fig['layout']['xaxis{}'.format(i + 1)] = {'domain': domain, 'anchor': yanchor}
base = col * (domainWidth + bufferWidth)
domain = [base, base + domainWidth]
fig['layout']['yaxis{}'.format(i + 1)] = {'domain': domain, 'anchor': xanchor, 'title': ylabel}
if args.variableScales is False:
fig['layout']['yaxis{}'.format(i + 1)].update(range=[0, 100])
trace = go.Bar(x=xlabels,
y=vals,
opacity=args.alpha,
orientation='v',
showlegend=False,
xaxis=xanchor,
yaxis=yanchor,
name=ylabel,
marker={'color': args.colors, 'line': {'color': args.colors}})
data.append(trace)
else:
ax = plt.subplot(grids[row, col])
ax.bar(np.arange(vals.shape[0]), vals, width=1.0, bottom=0.0, align='center', color=args.colors, edgecolor=args.colors, alpha=args.alpha)
ax.set_ylabel(ylabel)
ax.set_xticks(np.arange(vals.shape[0]))
ax.set_xticklabels(xlabels, rotation='vertical')
if args.variableScales is False:
ax.set_ylim(0.0, 100.0)
if args.plotFileFormat == 'plotly':
fig['data'] = data
py.plot(fig, filename=args.plotFile, auto_open=False)
# colors
else:
plt.subplots_adjust(wspace=0.05, hspace=0.3, bottom=0.15, top=0.80)
plt.tight_layout()
plt.savefig(args.plotFile, dpi=200, format=args.plotFileFormat)
plt.close()
def getChunkLength(args, chromSize):
"""
There's no point in parsing the GTF time over and over again needlessly.
Emprically, it seems that adding ~4x the number of workers is ideal, since
coverage is non-uniform. This is a heuristic way of approximating that.
Note that if there are MANY small contigs and a few large ones (e.g., the
max and median lengths are >10x different, then it's best to take a
different tack.
"""
if args.region:
chromSize, region_start, region_end, genomeChunkLength = getUserRegion(chromSize, args.region)
rv = np.ceil((region_start - region_end) / float(4 * args.numberOfProcessors)).astype(int)
return max(1, rv)
bl = None
if args.blackListFileName:
bl = GTF(args.blackListFileName)
lengths = []
for k, v in chromSize:
regs = blSubtract(bl, k, [0, v])
for reg in regs:
lengths.append(reg[1] - reg[0])
if len(lengths) >= 4 * args.numberOfProcessors:
rv = np.median(lengths).astype(int)
# In cases like dm6 or GRCh38, there are a LOT of really small contigs, which will cause the median to be small and performance to tank
if np.max(lengths) >= 10 * rv:
rv = np.ceil(np.sum(lengths) / (4.0 * args.numberOfProcessors)).astype(int)
else:
rv = np.ceil(np.sum(lengths) / (4.0 * args.numberOfProcessors)).astype(int)
return max(1, rv)
def main(args=None):
args = parse_arguments().parse_args(args)
if not args.outRawCounts and not args.plotFile:
sys.exit("Error: You need to specify at least one of --plotFile or --outRawCounts!\n")
if args.labels is None:
args.labels = args.bamfiles
if args.smartLabels:
args.labels = smartLabels(args.bamfiles)
if len(args.labels) != len(args.bamfiles):
sys.exit("Error: The number of labels ({0}) does not match the number of BAM files ({1})!".format(len(args.labels), len(args.bamfiles)))
# Ensure that if we're given an attributeKey that it's not empty
if args.attributeKey and args.attributeKey == "":
args.attributeKey = None
global gtf
if not args.regionLabels and args.smartLabels:
args.regionLabels = smartLabels(args.BED)
gtf = Enrichment(args.BED, keepExons=args.keepExons, labels=args.regionLabels, attributeKey=args.attributeKey)
# Get fragment size and chromosome dict
fhs = [openBam(x) for x in args.bamfiles]
chromSize, non_common_chr = getCommonChrNames(fhs, verbose=args.verbose)
for fh in fhs:
fh.close()
frag_len_dict, read_len_dict = get_read_and_fragment_length(args.bamfiles[0],
return_lengths=False,
blackListFileName=args.blackListFileName,
numberOfProcessors=args.numberOfProcessors,
verbose=args.verbose)
if args.extendReads:
if args.extendReads is True:
# try to guess fragment length if the bam file contains paired end reads
if frag_len_dict:
defaultFragmentLength = frag_len_dict['median']
else:
sys.exit("*ERROR*: library is not paired-end. Please provide an extension length.")
if args.verbose:
print("Fragment length based on paired en data "
"estimated to be {0}".format(frag_len_dict['median']))
elif args.extendReads < read_len_dict['median']:
sys.stderr.write("*WARNING*: read extension is smaller than read length (read length = {}). "
"Reads will not be extended.\n".format(int(read_len_dict['median'])))
defaultFragmentLength = 'read length'
elif args.extendReads > 2000:
sys.exit("*ERROR*: read extension must be smaller that 2000. Value give: {} ".format(args.extendReads))
else:
defaultFragmentLength = args.extendReads
else:
defaultFragmentLength = 'read length'
# Get the chunkLength
chunkLength = getChunkLength(args, chromSize)
# Map reduce to get the counts/file/feature
res = mapReduce([args, defaultFragmentLength],
getEnrichment_worker,
chromSize,
genomeChunkLength=chunkLength,
region=args.region,
blackListFileName=args.blackListFileName,
numberOfProcessors=args.numberOfProcessors,
verbose=args.verbose)
features = res[0][1]
featureCounts = []
for i in list(range(len(args.bamfiles))):
d = dict()
for x in features:
d[x] = 0
featureCounts.append(d)
# res is a list, with each element a list (length len(args.bamfiles)) of dicts
totalCounts = [0] * len(args.bamfiles)
for x in res:
for i, y in enumerate(x[2]):
totalCounts[i] += y
for i, y in enumerate(x[0]):
for k, v in y.items():
featureCounts[i][k] += v
# Make a plot
if args.plotFile:
plotEnrichment(args, featureCounts, totalCounts, features)
# Raw counts
if args.outRawCounts:
of = open(args.outRawCounts, "w")
of.write("file\tfeatureType\tpercent\tfeatureReadCount\ttotalReadCount\n")
for i, x in enumerate(args.labels):
for k, v in featureCounts[i].items():
of.write("{0}\t{1}\t{2:5.2f}\t{3}\t{4}\n".format(x, k, (100.0 * v) / totalCounts[i], v, totalCounts[i]))
of.close()
| gpl-3.0 |
niketanpansare/systemml | src/main/python/tests/test_mllearn_df.py | 2 | 5767 | #!/usr/bin/python
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# To run:
# - Python 2: `PYSPARK_PYTHON=python2 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# - Python 3: `PYSPARK_PYTHON=python3 spark-submit --master local[*] --driver-class-path SystemML.jar test_mllearn_df.py`
# Make the `systemml` package importable
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
import unittest
import numpy as np
from pyspark.ml import Pipeline
from pyspark.ml.feature import HashingTF, Tokenizer
from pyspark.sql import SparkSession
from sklearn import datasets, metrics, neighbors
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import linear_model
from sklearn.metrics import accuracy_score, r2_score
from systemml.mllearn import LinearRegression, LogisticRegression, NaiveBayes, SVM
sparkSession = SparkSession.builder.getOrCreate()
def test_accuracy_score(sklearn_predicted, mllearn_predicted, y_test, threshold):
if accuracy_score(sklearn_predicted, mllearn_predicted) > threshold:
# Our results match that of scikit-learn. No need to measure with the ground truth
return True
elif accuracy_score(y_test, mllearn_predicted) > accuracy_score(y_test, sklearn_predicted):
# We perform better than scikit-learn, ignore the threshold
return True
else:
return False
# Currently not integrated with JUnit test
# ~/spark-1.6.1-scala-2.11/bin/spark-submit --master local[*] --driver-class-path SystemML.jar test.py
class TestMLLearn(unittest.TestCase):
def test_logistic_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
# Convert to DataFrame for i/o: current way to transfer data
logistic = LogisticRegression(sparkSession, transferUsingDF=True)
logistic.fit(X_train, y_train)
mllearn_predicted = logistic.predict(X_test)
sklearn_logistic = linear_model.LogisticRegression()
sklearn_logistic.fit(X_train, y_train)
self.failUnless(test_accuracy_score(sklearn_logistic.predict(X_test), mllearn_predicted, y_test, 0.95))
def test_linear_regression(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='direct-solve', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_linear_regression_cg(self):
diabetes = datasets.load_diabetes()
diabetes_X = diabetes.data[:, np.newaxis, 2]
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
regr = LinearRegression(sparkSession, solver='newton-cg', transferUsingDF=True)
regr.fit(diabetes_X_train, diabetes_y_train)
mllearn_predicted = regr.predict(diabetes_X_test)
sklearn_regr = linear_model.LinearRegression()
sklearn_regr.fit(diabetes_X_train, diabetes_y_train)
self.failUnless(r2_score(sklearn_regr.predict(diabetes_X_test), mllearn_predicted) > 0.95) # We are comparable to a similar algorithm in scikit learn
def test_svm_sk2(self):
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:int(.9 * n_samples)]
y_train = y_digits[:int(.9 * n_samples)]
X_test = X_digits[int(.9 * n_samples):]
y_test = y_digits[int(.9 * n_samples):]
svm = SVM(sparkSession, is_multi_class=True, transferUsingDF=True)
mllearn_predicted = svm.fit(X_train, y_train).predict(X_test)
from sklearn import linear_model, svm
clf = svm.LinearSVC()
sklearn_predicted = clf.fit(X_train, y_train).predict(X_test)
self.failUnless(test_accuracy_score(sklearn_predicted, mllearn_predicted, y_test, 0.95))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
roxyboy/bokeh | examples/compat/mpl/listcollection.py | 34 | 1602 | from matplotlib.collections import LineCollection
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import output_file, show
def make_segments(x, y):
'''
Create list of line segments from x and y coordinates.
'''
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
return segments
def colorline(x, y, colors=None, linewidth=3, alpha=1.0):
'''
Plot a line with segments.
Optionally, specify segments colors and segments widths.
'''
# Make a list of colors cycling through the rgbcmyk series.
# You have several ways to input the colors:
# colors = ['r','g','b','c','y','m','k']
# colors = ['red','green','blue','cyan','yellow','magenta','black']
# colors = ['#ff0000', '#008000', '#0000ff', '#00bfbf', '#bfbf00', '#bf00bf', '#000000']
# colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0), (0.0, 0.75, 0.75, 1.0),
# (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0), (0.0, 0.0, 0.0, 1.0)]
colors = ['r', 'g', 'b', 'c', 'y', 'm', 'k']
widths = [5, 10, 20, 40, 20, 10, 5]
segments = make_segments(x, y)
lc = LineCollection(segments, colors=colors, linewidth=widths, alpha=alpha)
ax = plt.gca()
ax.add_collection(lc)
return lc
# Colored sine wave
x = np.linspace(0, 4 * np.pi, 100)
y = np.sin(x)
colorline(x, y)
plt.title("MPL support for ListCollection in Bokeh")
plt.xlim(x.min(), x.max())
plt.ylim(-1.0, 1.0)
output_file("listcollection.html")
show(mpl.to_bokeh())
| bsd-3-clause |
cpcloud/dask | dask/dataframe/tests/test_dataframe.py | 1 | 95806 | import sys
from copy import copy
from operator import getitem, add
from itertools import product
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
import dask
from dask.async import get_sync
from dask import delayed
from dask.utils import ignoring, put_lines
import dask.dataframe as dd
from dask.dataframe.core import repartition_divisions, aca, _concat, Scalar
from dask.dataframe.methods import boundary_slice
from dask.dataframe.utils import assert_eq, make_meta, assert_max_deps
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 5, 9, 9])
full = d.compute()
def test_Dataframe():
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='a')
assert_eq(d['a'] + 1, expected)
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
assert_eq(d[d['b'] > 2], full[full['b'] > 2])
assert_eq(d[['a', 'b']], full[['a', 'b']])
assert_eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert_eq(d.head(2), full.head(2))
assert_eq(d.head(3), full.head(3))
assert_eq(d.head(2), dsk[('x', 0)].head(2))
assert_eq(d['a'].head(2), full['a'].head(2))
assert_eq(d['a'].head(3), full['a'].head(3))
assert_eq(d['a'].head(2), dsk[('x', 0)]['a'].head(2))
assert (sorted(d.head(2, compute=False).dask) ==
sorted(d.head(2, compute=False).dask))
assert (sorted(d.head(2, compute=False).dask) !=
sorted(d.head(3, compute=False).dask))
assert_eq(d.tail(2), full.tail(2))
assert_eq(d.tail(3), full.tail(3))
assert_eq(d.tail(2), dsk[('x', 2)].tail(2))
assert_eq(d['a'].tail(2), full['a'].tail(2))
assert_eq(d['a'].tail(3), full['a'].tail(3))
assert_eq(d['a'].tail(2), dsk[('x', 2)]['a'].tail(2))
assert (sorted(d.tail(2, compute=False).dask) ==
sorted(d.tail(2, compute=False).dask))
assert (sorted(d.tail(2, compute=False).dask) !=
sorted(d.tail(3, compute=False).dask))
def test_head_npartitions():
assert_eq(d.head(5, npartitions=2), full.head(5))
assert_eq(d.head(5, npartitions=2, compute=False), full.head(5))
assert_eq(d.head(5, npartitions=-1), full.head(5))
assert_eq(d.head(7, npartitions=-1), full.head(7))
assert_eq(d.head(2, npartitions=-1), full.head(2))
with pytest.raises(ValueError):
d.head(2, npartitions=5)
@pytest.mark.skipif(sys.version_info[:2] == (3, 3),
reason="Python3.3 uses pytest2.7.2, w/o warns method")
def test_head_npartitions_warn():
with pytest.warns(None):
d.head(100)
with pytest.warns(None):
d.head(7)
with pytest.warns(None):
d.head(7, npartitions=2)
def test_index_head():
assert_eq(d.index.head(2), full.index[:2])
assert_eq(d.index.head(3), full.index[:3])
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert_eq((d + 1), full + 1)
def test_Index():
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D',
periods=10))]:
ddf = dd.from_pandas(case, 3)
assert_eq(ddf.index, case.index)
pytest.raises(AttributeError, lambda: ddf.index.index)
def test_Scalar():
val = np.int64(1)
s = Scalar({('a', 0): val}, 'a', 'i8')
assert hasattr(s, 'dtype')
assert 'dtype' in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, dtype=int64>"
val = pd.Timestamp('2001-01-01')
s = Scalar({('a', 0): val}, 'a', val)
assert not hasattr(s, 'dtype')
assert 'dtype' not in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, type=Timestamp>"
def test_attributes():
assert 'a' in dir(d)
assert 'foo' not in dir(d)
pytest.raises(AttributeError, lambda: d.foo)
df = dd.from_pandas(pd.DataFrame({'a b c': [1, 2, 3]}), npartitions=2)
assert 'a b c' not in dir(df)
df = dd.from_pandas(pd.DataFrame({'a': [1, 2], 5: [1, 2]}), npartitions=2)
assert 'a' in dir(df)
assert 5 not in dir(df)
df = dd.from_pandas(tm.makeTimeDataFrame(), npartitions=3)
pytest.raises(AttributeError, lambda: df.foo)
def test_column_names():
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
tm.assert_index_equal(d[['b', 'a']].columns, pd.Index(['b', 'a']))
assert d['a'].name == 'a'
assert (d['a'] + 1).name == 'a'
assert (d['a'] + d['b']).name is None
def test_index_names():
assert d.index.name is None
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = dd.from_pandas(df, 3)
assert ddf.index.name == 'x'
assert ddf.index.compute().name == 'x'
def test_set_index():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 2, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 5, 8]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [9, 1, 8]},
index=[9, 9, 9])}
d = dd.DataFrame(dsk, 'x', meta, [0, 4, 9, 9])
full = d.compute()
d2 = d.set_index('b', npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == 'b'
assert_eq(d2, full.set_index('b'))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == 'b'
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index('b')
assert d4.index.name == 'b'
assert_eq(d4, full.set_index('b'))
def test_set_index_interpolate():
df = pd.DataFrame({'x': [4, 1, 1, 3, 3], 'y': [1., 1, 1, 1, 2]})
d = dd.from_pandas(df, 2)
d1 = d.set_index('x', npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == set([1, 2, 3, 4])
d2 = d.set_index('y', npartitions=3)
assert d2.divisions[0] == 1.
assert 1. < d2.divisions[1] < d2.divisions[2] < 2.
assert d2.divisions[3] == 2.
def test_set_index_interpolate_int():
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({'x': 2 * L})
d = dd.from_pandas(df, 2)
d1 = d.set_index('x', npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range('20130101', periods=3))
s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern'))
df = pd.DataFrame({'tz': s_aware, 'notz': s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index('notz', npartitions=2)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index('tz', npartitions=2)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
@pytest.mark.parametrize(
'npartitions',
[1, pytest.mark.xfail(2, reason='pandas join removes freq')]
)
def test_timezone_freq(npartitions):
s_naive = pd.Series(pd.date_range('20130101', periods=10))
s_aware = pd.Series(pd.date_range('20130101', periods=10, tz='US/Eastern'))
pdf = pd.DataFrame({'tz': s_aware, 'notz': s_naive})
ddf = dd.from_pandas(pdf, npartitions=npartitions)
assert pdf.tz[0].freq == ddf.compute().tz[0].freq == ddf.tz.compute()[0].freq
@pytest.mark.parametrize('drop', [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame({'A': list('ABAABBABAA'),
'B': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'C': [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index('A', drop=drop),
pdf.set_index('A', drop=drop))
assert_eq(ddf.set_index('B', drop=drop),
pdf.set_index('B', drop=drop))
assert_eq(ddf.set_index('C', drop=drop),
pdf.set_index('C', drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop),
pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop),
pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop),
pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame({0: list('ABAABBABAA'),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3]})
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop),
pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop),
pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with tm.assertRaisesRegexp(NotImplementedError, msg):
ddf.set_index(['a', 'b'])
def test_rename_columns():
# GH 819
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert_eq(ddf, df)
msg = r"Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with tm.assertRaisesRegexp(ValueError, msg):
ddf.columns = [1, 2, 3, 4]
# Multi-index columns
df = pd.DataFrame({('A', '0') : [1, 2, 2, 3], ('B', 1) : [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df.columns = ['x', 'y']
ddf.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert_eq(ddf, df)
def test_rename_series():
# GH 819
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = dd.from_pandas(s, 2)
s.name = 'renamed'
ds.name = 'renamed'
assert s.name == 'renamed'
assert_eq(ds, s)
ind = s.index
dind = ds.index
ind.name = 'renamed'
dind.name = 'renamed'
assert ind.name == 'renamed'
assert_eq(dind, ind)
def test_describe():
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(20)) * 4)
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20})
ds = dd.from_pandas(s, 4)
ddf = dd.from_pandas(df, 4)
assert_eq(s.describe(), ds.describe())
assert_eq(df.describe(), ddf.describe())
assert_eq(s.describe(), ds.describe(split_every=2))
assert_eq(df.describe(), ddf.describe(split_every=2))
assert ds.describe(split_every=2)._name != ds.describe()._name
assert ddf.describe(split_every=2)._name != ddf.describe()._name
# remove string columns
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20,
'c': list('abcd') * 20})
ddf = dd.from_pandas(df, 4)
assert_eq(df.describe(), ddf.describe())
assert_eq(df.describe(), ddf.describe(split_every=2))
def test_cumulative():
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.cumsum(), df.cumsum())
assert_eq(ddf.cumprod(), df.cumprod())
assert_eq(ddf.cummin(), df.cummin())
assert_eq(ddf.cummax(), df.cummax())
assert_eq(ddf.cumsum(axis=1), df.cumsum(axis=1))
assert_eq(ddf.cumprod(axis=1), df.cumprod(axis=1))
assert_eq(ddf.cummin(axis=1), df.cummin(axis=1))
assert_eq(ddf.cummax(axis=1), df.cummax(axis=1))
assert_eq(ddf.a.cumsum(), df.a.cumsum())
assert_eq(ddf.a.cumprod(), df.a.cumprod())
assert_eq(ddf.a.cummin(), df.a.cummin())
assert_eq(ddf.a.cummax(), df.a.cummax())
# With NaNs
df = pd.DataFrame({'a': [1, 2, np.nan, 4, 5, 6, 7, 8],
'b': [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],
'c': [np.nan] * 8})
ddf = dd.from_pandas(df, 3)
assert_eq(df.cumsum(), ddf.cumsum())
assert_eq(df.cummin(), ddf.cummin())
assert_eq(df.cummax(), ddf.cummax())
assert_eq(df.cumprod(), ddf.cumprod())
assert_eq(df.cumsum(skipna=False), ddf.cumsum(skipna=False))
assert_eq(df.cummin(skipna=False), ddf.cummin(skipna=False))
assert_eq(df.cummax(skipna=False), ddf.cummax(skipna=False))
assert_eq(df.cumprod(skipna=False), ddf.cumprod(skipna=False))
assert_eq(df.cumsum(axis=1), ddf.cumsum(axis=1))
assert_eq(df.cummin(axis=1), ddf.cummin(axis=1))
assert_eq(df.cummax(axis=1), ddf.cummax(axis=1))
assert_eq(df.cumprod(axis=1), ddf.cumprod(axis=1))
assert_eq(df.cumsum(axis=1, skipna=False), ddf.cumsum(axis=1, skipna=False))
assert_eq(df.cummin(axis=1, skipna=False), ddf.cummin(axis=1, skipna=False))
assert_eq(df.cummax(axis=1, skipna=False), ddf.cummax(axis=1, skipna=False))
assert_eq(df.cumprod(axis=1, skipna=False), ddf.cumprod(axis=1, skipna=False))
def test_dropna():
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.x.dropna(), df.x.dropna())
assert_eq(ddf.y.dropna(), df.y.dropna())
assert_eq(ddf.z.dropna(), df.z.dropna())
assert_eq(ddf.dropna(), df.dropna())
assert_eq(ddf.dropna(how='all'), df.dropna(how='all'))
assert_eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
assert_eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
assert_eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
@pytest.mark.parametrize('lower, upper', [(2, 5), (2.5, 3.5)])
def test_clip(lower, upper):
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf = dd.from_pandas(df, 3)
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
ds = dd.from_pandas(s, 3)
assert_eq(ddf.clip(lower=lower, upper=upper),
df.clip(lower=lower, upper=upper))
assert_eq(ddf.clip(lower=lower), df.clip(lower=lower))
assert_eq(ddf.clip(upper=upper), df.clip(upper=upper))
assert_eq(ds.clip(lower=lower, upper=upper),
s.clip(lower=lower, upper=upper))
assert_eq(ds.clip(lower=lower), s.clip(lower=lower))
assert_eq(ds.clip(upper=upper), s.clip(upper=upper))
assert_eq(ddf.clip_lower(lower), df.clip_lower(lower))
assert_eq(ddf.clip_lower(upper), df.clip_lower(upper))
assert_eq(ddf.clip_upper(lower), df.clip_upper(lower))
assert_eq(ddf.clip_upper(upper), df.clip_upper(upper))
assert_eq(ds.clip_lower(lower), s.clip_lower(lower))
assert_eq(ds.clip_lower(upper), s.clip_lower(upper))
assert_eq(ds.clip_upper(lower), s.clip_upper(lower))
assert_eq(ds.clip_upper(upper), s.clip_upper(upper))
def test_where_mask():
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 4, 2, 6, 2, 3, 1, 6, 2],
'c': [5, 6, 7, 8, 9, 10, 11, 12, 13]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3,
'd': [False] * 9,
'e': [True] * 9},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]),
pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6)]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert_eq(ddf.where(ddcond), pdf.where(pdcond))
assert_eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert_eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert_eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
# ToDo: Should work on pandas 0.17
# https://github.com/pydata/pandas/pull/10283
# assert_eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
# assert_eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert_eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert_eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert_eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert_eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert_eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b),
full.a + full.b)
assert_eq(dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1),
full.a + full.b + 1)
def test_map_partitions():
assert_eq(d.map_partitions(lambda df: df, meta=d), full)
assert_eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1))
assert_eq(result, full.sum(axis=1))
assert_eq(d.map_partitions(lambda df: 1), pd.Series([1, 1, 1], dtype=np.int64),
check_divisions=False)
x = Scalar({('x', 0): 1}, 'x', int)
result = dd.map_partitions(lambda x: 2, x)
assert result.dtype in (np.int32, np.int64) and result.compute() == 2
result = dd.map_partitions(lambda x: 4.0, x)
assert result.dtype == np.float64 and result.compute() == 4.0
def test_map_partitions_names():
func = lambda x: x
assert (sorted(dd.map_partitions(func, d, meta=d).dask) ==
sorted(dd.map_partitions(func, d, meta=d).dask))
assert (sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) ==
sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask))
func = lambda x, y: x
assert (sorted(dd.map_partitions(func, d, d, meta=d).dask) ==
sorted(dd.map_partitions(func, d, d, meta=d).dask))
def test_map_partitions_column_info():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a, meta=a)
tm.assert_index_equal(b.columns, a.columns)
assert_eq(df, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, a)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = dd.map_partitions(lambda df: df.x + 1, a, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_method_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
tm.assert_index_equal(b.columns, a.columns)
b = a.map_partitions(lambda df: df.x + 1)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = a.map_partitions(lambda df: df.x + 1, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_keeps_kwargs_in_dict():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
assert "'x': 5" in str(b.dask)
assert_eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_drop_duplicates():
res = d.drop_duplicates()
res2 = d.drop_duplicates(split_every=2)
sol = full.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.a.drop_duplicates()
res2 = d.a.drop_duplicates(split_every=2)
sol = full.a.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.index.drop_duplicates()
res2 = d.index.drop_duplicates(split_every=2)
sol = full.index.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
with pytest.raises(NotImplementedError):
d.drop_duplicates(keep=False)
def test_drop_duplicates_subset():
df = pd.DataFrame({'x': [1, 2, 3, 1, 2, 3],
'y': ['a', 'a', 'b', 'b', 'c', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
for kwarg in [{'keep': 'first'}, {'keep': 'last'}]:
assert_eq(df.x.drop_duplicates(**kwarg),
ddf.x.drop_duplicates(**kwarg))
for ss in [['x'], 'y', ['x', 'y']]:
assert_eq(df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg))
def test_set_partition():
d2 = d.set_index('b', divisions=[0, 2, 9])
assert d2.divisions == (0, 2, 9)
expected = full.set_index('b')
assert_eq(d2, expected)
def test_set_partition_compute():
d2 = d.set_index('b', divisions=[0, 2, 9], compute=False)
d3 = d.set_index('b', divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index('b'))
assert_eq(d3, full.set_index('b'))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_get_partition():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_partition(0)
assert isinstance(div1, dd.DataFrame)
assert_eq(div1, pdf.loc[0:3])
div2 = ddf.get_partition(1)
assert_eq(div2, pdf.loc[4:7])
div3 = ddf.get_partition(2)
assert_eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_partition(0)
assert isinstance(div1, dd.Series)
assert_eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_partition(1)
assert_eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_partition(2)
assert_eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
with tm.assertRaises(ValueError):
ddf.get_partition(-1)
with tm.assertRaises(ValueError):
ddf.get_partition(3)
def test_ndim():
assert (d.ndim == 2)
assert (d.a.ndim == 1)
assert (d.index.ndim == 1)
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_cache():
d2 = d.cache()
assert all(task[0] == getitem for task in d2.dask.values())
assert_eq(d2.a, d.a)
def test_value_counts():
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts()
expected = df.x.value_counts()
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2)
assert_eq(result2, expected)
assert result._name != result2._name
def test_unique():
pdf = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],
'y': ['a', 'c', 'b', np.nan, 'c',
'b', 'a', 'd', np.nan, 'a']})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name='x'))
assert_eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name='y'))
assert_eq(ddf.x.unique(split_every=2),
pd.Series(pdf.x.unique(), name='x'))
assert_eq(ddf.y.unique(split_every=2),
pd.Series(pdf.y.unique(), name='y'))
assert ddf.x.unique(split_every=2)._name != ddf.x.unique()._name
def test_isin():
assert_eq(d.a.isin([0, 1, 2]), full.a.isin([0, 1, 2]))
assert_eq(d.a.isin(pd.Series([0, 1, 2])),
full.a.isin(pd.Series([0, 1, 2])))
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
def test_size():
assert_eq(d.size, full.size)
assert_eq(d.a.size, full.a.size)
assert_eq(d.index.size, full.index.size)
def test_nbytes():
assert_eq(d.a.nbytes, full.a.nbytes)
assert_eq(d.index.nbytes, full.index.nbytes)
def test_quantile():
# series / multiple
result = d.b.quantile([.3, .7])
exp = full.b.quantile([.3, .7]) # result may different
assert len(result) == 2
assert result.divisions == (.3, .7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == 0
assert 5 < result.iloc[1] < 6
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([.3, .7])
exp = s.quantile([.3, .7])
assert len(result) == 2
assert result.divisions == (.3, .7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert 1 < result.iloc[0] < 2
assert 7 < result.iloc[1] < 8
# series / single
result = d.b.quantile(.5)
exp = full.b.quantile(.5) # result may different
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert 4 < result < 6
def test_empty_quantile():
result = d.b.quantile([])
exp = full.b.quantile([])
assert result.divisions == (None, None)
# because of a pandas bug, name is not preserved
# https://github.com/pydata/pandas/pull/10881
assert result.name == 'b'
assert result.compute().name == 'b'
assert_eq(result, exp, check_names=False)
def test_dataframe_quantile():
# column X is for test column order and result division
df = pd.DataFrame({'A': np.arange(20),
'X': np.arange(20, 40),
'B': np.arange(10, 30),
'C': ['a', 'b', 'c', 'd'] * 5},
columns=['A', 'X', 'B', 'C'])
ddf = dd.from_pandas(df, 3)
result = ddf.quantile()
assert result.npartitions == 1
assert result.divisions == ('A', 'X')
result = result.compute()
assert isinstance(result, pd.Series)
tm.assert_index_equal(result.index, pd.Index(['A', 'X', 'B']))
assert (result > pd.Series([16, 36, 26], index=['A', 'X', 'B'])).all()
assert (result < pd.Series([17, 37, 27], index=['A', 'X', 'B'])).all()
result = ddf.quantile([0.25, 0.75])
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(['A', 'X', 'B']))
minexp = pd.DataFrame([[1, 21, 11], [17, 37, 27]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result > minexp).all().all()
maxexp = pd.DataFrame([[2, 22, 12], [18, 38, 28]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result < maxexp).all().all()
assert_eq(ddf.quantile(axis=1), df.quantile(axis=1))
pytest.raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1))
def test_index():
assert_eq(d.index, full.index)
def test_assign():
d_unknown = dd.from_pandas(full, npartitions=3, sort=False)
assert not d_unknown.known_divisions
res = d.assign(c=1,
d='string',
e=d.a.sum(),
f=d.a + d.b)
res_unknown = d_unknown.assign(c=1,
d='string',
e=d_unknown.a.sum(),
f=d_unknown.a + d_unknown.b)
sol = full.assign(c=1,
d='string',
e=full.a.sum(),
f=full.a + full.b)
assert_eq(res, sol)
assert_eq(res_unknown, sol)
res = d.assign(c=full.a + 1)
assert_eq(res, full.assign(c=full.a + 1))
# divisions unknown won't work with pandas
with pytest.raises(ValueError):
d_unknown.assign(c=full.a + 1)
# unsupported type
with pytest.raises(TypeError):
d.assign(c=list(range(9)))
# Fails when assigning known divisions to unknown divisions
with pytest.raises(ValueError):
d_unknown.assign(foo=d.a)
# Fails when assigning unknown divisions to known divisions
with pytest.raises(ValueError):
d.assign(foo=d_unknown.a)
def test_map():
assert_eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1))
lk = dict((v, v + 1) for v in full.a.values)
assert_eq(d.a.map(lk), full.a.map(lk))
assert_eq(d.b.map(lk), full.b.map(lk))
lk = pd.Series(lk)
assert_eq(d.a.map(lk), full.a.map(lk))
assert_eq(d.b.map(lk), full.b.map(lk))
assert_eq(d.b.map(lk, meta=d.b), full.b.map(lk))
assert_eq(d.b.map(lk, meta=('b', 'i8')), full.b.map(lk))
pytest.raises(TypeError, lambda: d.a.map(d.b))
def test_concat():
x = _concat([pd.DataFrame(columns=['a', 'b']),
pd.DataFrame(columns=['a', 'b'])])
assert list(x.columns) == ['a', 'b']
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert_eq(e, f)
assert_eq(d.a, type(d.a)(*d.a._args))
assert_eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame(dsk, 'x', meta, divisions=[None, None, None])
assert not df.known_divisions
def test_unknown_divisions():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
d = dd.DataFrame(dsk, 'x', meta, [None, None, None, None])
full = d.compute(get=dask.get)
assert_eq(d.a.sum(), full.a.sum())
assert_eq(d.a + d.b + 1, full.a + full.b + 1)
@pytest.mark.parametrize('join', ['inner', 'outer', 'left', 'right'])
def test_align(join):
df1a = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11])
df1b = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13])
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
# DataFrame
res1, res2 = ddf1a.align(ddf1b, join=join)
exp1, exp2 = df1a.align(df1b, join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a['A'].align(ddf1b['B'], join=join)
exp1, exp2 = df1a['A'].align(df1b['B'], join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# DataFrame with fill_value
res1, res2 = ddf1a.align(ddf1b, join=join, fill_value=1)
exp1, exp2 = df1a.align(df1b, join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a['A'].align(ddf1b['B'], join=join, fill_value=1)
exp1, exp2 = df1a['A'].align(df1b['B'], join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
@pytest.mark.parametrize('join', ['inner', 'outer', 'left', 'right'])
def test_align_axis(join):
df1a = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10),
'C': np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11])
df1b = pd.DataFrame({'B': np.random.randn(10),
'C': np.random.randn(10),
'D': np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13])
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=0)
exp1, exp2 = df1a.align(df1b, join=join, axis=0)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=1)
exp1, exp2 = df1a.align(df1b, join=join, axis=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis='index')
exp1, exp2 = df1a.align(df1b, join=join, axis='index')
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis='columns')
exp1, exp2 = df1a.align(df1b, join=join, axis='columns')
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# invalid
with tm.assertRaises(ValueError):
ddf1a.align(ddf1b, join=join, axis='XXX')
with tm.assertRaises(ValueError):
ddf1a['A'].align(ddf1b['B'], join=join, axis=1)
def test_combine():
df1 = pd.DataFrame({'A': np.random.choice([1, 2, np.nan], 100),
'B': np.random.choice(['a', 'b', np.nan], 100)})
df2 = pd.DataFrame({'A': np.random.choice([1, 2, 3], 100),
'B': np.random.choice(['a', 'b', 'c'], 100)})
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
first = lambda a, b: a
# DataFrame
for da, db, a, b in [(ddf1, ddf2, df1, df2),
(ddf1.A, ddf2.A, df1.A, df2.A),
(ddf1.B, ddf2.B, df1.B, df2.B)]:
for func, fill_value in [(add, None), (add, 100), (first, None)]:
sol = a.combine(b, func, fill_value=fill_value)
assert_eq(da.combine(db, func, fill_value=fill_value), sol)
assert_eq(da.combine(b, func, fill_value=fill_value), sol)
assert_eq(ddf1.combine(ddf2, add, overwrite=False),
df1.combine(df2, add, overwrite=False))
assert da.combine(db, add)._name == da.combine(db, add)._name
def test_combine_first():
df1 = pd.DataFrame({'A': np.random.choice([1, 2, np.nan], 100),
'B': np.random.choice(['a', 'b', np.nan], 100)})
df2 = pd.DataFrame({'A': np.random.choice([1, 2, 3], 100),
'B': np.random.choice(['a', 'b', 'c'], 100)})
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
# DataFrame
assert_eq(ddf1.combine_first(ddf2), df1.combine_first(df2))
assert_eq(ddf1.combine_first(df2), df1.combine_first(df2))
# Series
assert_eq(ddf1.A.combine_first(ddf2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.A.combine_first(df2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.B.combine_first(ddf2.B), df1.B.combine_first(df2.B))
assert_eq(ddf1.B.combine_first(df2.B), df1.B.combine_first(df2.B))
def test_dataframe_picklable():
from pickle import loads, dumps
cloudpickle = pytest.importorskip('cloudpickle')
cp_dumps = cloudpickle.dumps
d = tm.makeTimeDataFrame()
df = dd.from_pandas(d, npartitions=3)
df = df + 2
# dataframe
df2 = loads(dumps(df))
assert_eq(df, df2)
df2 = loads(cp_dumps(df))
assert_eq(df, df2)
# series
a2 = loads(dumps(df.A))
assert_eq(df.A, a2)
a2 = loads(cp_dumps(df.A))
assert_eq(df.A, a2)
# index
i2 = loads(dumps(df.index))
assert_eq(df.index, i2)
i2 = loads(cp_dumps(df.index))
assert_eq(df.index, i2)
# scalar
# lambdas are present, so only test cloudpickle
s = df.A.sum()
s2 = loads(cp_dumps(s))
assert_eq(s, s2)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5], 42)
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert a._name != b._name
assert len(a.compute()) + len(b.compute()) == len(full)
a2, b2 = d.random_split([0.5, 0.5], 42)
assert a2._name == a._name
assert b2._name == b._name
parts = d.random_split([0.4, 0.5, 0.1], 42)
names = set([p._name for p in parts])
names.update([a._name, b._name])
assert len(names) == 5
with pytest.raises(ValueError):
d.random_split([0.4, 0.5], 42)
def test_series_round():
ps = pd.Series([1.123, 2.123, 3.123, 1.234, 2.234, 3.234], name='a')
s = dd.from_pandas(ps, npartitions=3)
assert_eq(s.round(), ps.round())
def test_set_partition_2():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index('y', divisions=['a', 'c', 'd'])
assert result.divisions == ('a', 'c', 'd')
assert list(result.compute(get=get_sync).index[-2:]) == ['d', 'd']
@pytest.mark.slow
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith('repartition-split')]
keys = sorted(keys)
sp = pd.concat([d._get(d.dask, k) for k in keys])
assert_eq(orig, sp)
assert_eq(orig, d)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert_eq(a, b)
assert_eq(a._get(b.dask, (b._name, 0)), df.iloc[:1])
for div in [[20, 60], [10, 50], [1], # first / last element mismatch
[0, 60], [10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60]]: # not unique (last element can be duplicated)
pytest.raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list('abxyz'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [[0, 6], [0, 6, 6], [0, 5, 6], [0, 4, 6, 6],
[0, 2, 6], [0, 2, 6, 6],
[0, 2, 3, 6, 6], [0, 1, 2, 3, 4, 5, 6, 6]]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
pdf = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'y': [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list('abcdefghij'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [list('aj'), list('ajj'), list('adj'),
list('abfj'), list('ahjj'), list('acdj'), list('adfij'),
list('abdefgij'), list('abcdefghij')]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [list('Yadijm'), list('acmrxz'), list('Yajz')]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([0, 6], [0, 6, 6], 'a', 'b', 'c')
assert result == {('b', 0): (boundary_slice, ('a', 0), 0, 6, False),
('b', 1): (boundary_slice, ('a', 0), 6, 6, True),
('c', 0): ('b', 0),
('c', 1): ('b', 1)}
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')
assert result == {('b', 0): (boundary_slice, ('a', 0), 1, 3, False),
('b', 1): (boundary_slice, ('a', 1), 3, 4, False),
('b', 2): (boundary_slice, ('a', 1), 4, 6, False),
('b', 3): (boundary_slice, ('a', 1), 6, 7, True),
('c', 0): (pd.concat, [('b', 0), ('b', 1)]),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df.y)
@pytest.mark.parametrize('use_index', [True, False])
@pytest.mark.parametrize('n', [1, 2, 4, 5])
@pytest.mark.parametrize('k', [1, 2, 4, 5])
@pytest.mark.parametrize('dtype', [int, float, 'M8[ns]'])
@pytest.mark.parametrize('transform', [lambda df: df, lambda df: df.x])
def test_repartition_npartitions(use_index, n, k, dtype, transform):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10,
'y': list('abdabd') * 10},
index=pd.Series([10, 20, 30, 40, 50, 60] * 10, dtype=dtype))
df = transform(df)
a = dd.from_pandas(df, npartitions=n, sort=use_index)
b = a.repartition(npartitions=k)
assert_eq(a, b)
assert b.npartitions == k
parts = dask.get(b.dask, b._keys())
assert all(map(len, parts))
def test_repartition_object_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10},
index=list('abdabd') * 10)
a = dd.from_pandas(df, npartitions=5)
b = a.repartition(npartitions=2)
assert b.npartitions == 2
assert_eq(b, df)
b = a.repartition(npartitions=10)
assert b.npartitions == 10
assert_eq(b, df)
assert not b.known_divisions
@pytest.mark.slow
@pytest.mark.parametrize('npartitions', [1, 20, 243])
@pytest.mark.parametrize('freq', ['1D', '7D', '28h'])
@pytest.mark.parametrize('end', ['2000-04-15', '2000-04-15 12:37:01'])
@pytest.mark.parametrize('start', ['2000-01-01', '2000-01-01 12:30:00'])
def test_repartition_freq(npartitions, freq, start, end):
start = pd.Timestamp(start)
end = pd.Timestamp(end)
ind = pd.DatetimeIndex(start=start, end=end, freq='60s')
df = pd.DataFrame({'x': np.arange(len(ind))}, index=ind)
ddf = dd.from_pandas(df, npartitions=npartitions, name='x')
ddf2 = ddf.repartition(freq=freq)
assert_eq(ddf2, df)
def test_repartition_freq_divisions():
df = pd.DataFrame({'x': np.random.random(10)},
index=pd.DatetimeIndex(np.random.random(10) * 100e9))
ddf = dd.from_pandas(df, npartitions=3)
ddf2 = ddf.repartition(freq='15s')
for div in ddf2.divisions[1:-1]:
assert div == div.round('15s')
assert ddf2.divisions[0] == df.index.min()
assert ddf2.divisions[-1] == df.index.max()
assert_eq(ddf2, ddf2)
def test_repartition_freq_errors():
df = pd.DataFrame({'x': [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(TypeError) as info:
ddf.repartition(freq='1s')
assert 'only' in str(info.value)
assert 'timeseries' in str(info.value)
def test_embarrassingly_parallel_operations():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
assert_eq(a.x.astype('float32'), df.x.astype('float32'))
assert a.x.astype('float32').compute().dtype == 'float32'
assert_eq(a.x.dropna(), df.x.dropna())
assert_eq(a.x.between(2, 4), df.x.between(2, 4))
assert_eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert_eq(a.x.notnull(), df.x.notnull())
assert_eq(a.x.isnull(), df.x.isnull())
assert_eq(a.notnull(), df.notnull())
assert_eq(a.isnull(), df.isnull())
assert len(a.sample(0.5).compute()) < len(df)
def test_fillna():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.fillna(100), df.fillna(100))
assert_eq(ddf.A.fillna(100), df.A.fillna(100))
assert_eq(ddf.fillna(method='pad'), df.fillna(method='pad'))
assert_eq(ddf.A.fillna(method='pad'), df.A.fillna(method='pad'))
assert_eq(ddf.fillna(method='bfill'), df.fillna(method='bfill'))
assert_eq(ddf.A.fillna(method='bfill'), df.A.fillna(method='bfill'))
assert_eq(ddf.fillna(method='pad', limit=2),
df.fillna(method='pad', limit=2))
assert_eq(ddf.A.fillna(method='pad', limit=2),
df.A.fillna(method='pad', limit=2))
assert_eq(ddf.fillna(method='bfill', limit=2),
df.fillna(method='bfill', limit=2))
assert_eq(ddf.A.fillna(method='bfill', limit=2),
df.A.fillna(method='bfill', limit=2))
assert_eq(ddf.fillna(100, axis=1), df.fillna(100, axis=1))
assert_eq(ddf.fillna(method='pad', axis=1), df.fillna(method='pad', axis=1))
assert_eq(ddf.fillna(method='pad', limit=2, axis=1),
df.fillna(method='pad', limit=2, axis=1))
pytest.raises(ValueError, lambda: ddf.A.fillna(0, axis=1))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10, axis=1))
df = tm.makeMissingDataframe(0.2, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
pytest.raises(ValueError, lambda: ddf.fillna(method='pad').compute())
assert_eq(df.fillna(method='pad', limit=3),
ddf.fillna(method='pad', limit=3))
def test_fillna_multi_dataframe():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.A.fillna(ddf.B), df.A.fillna(df.B))
assert_eq(ddf.B.fillna(ddf.A), df.B.fillna(df.A))
def test_ffill_bfill():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.ffill(), df.ffill())
assert_eq(ddf.bfill(), df.bfill())
assert_eq(ddf.ffill(axis=1), df.ffill(axis=1))
assert_eq(ddf.bfill(axis=1), df.bfill(axis=1))
def test_sample():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.5)
assert_eq(b, b)
c = a.sample(0.5, random_state=1234)
d = a.sample(0.5, random_state=1234)
assert_eq(c, d)
assert a.sample(0.5)._name != a.sample(0.5)._name
def test_sample_without_replacement():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(0.7, replace=False)
bb = b.index.compute()
assert len(bb) == len(set(bb))
def test_datetime_accessor():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df['x'] = df.x.astype('M8[us]')
a = dd.from_pandas(df, 2)
assert 'date' in dir(a.x.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert_eq(a.x.dt.date, df.x.dt.date, check_names=False)
# to_pydatetime returns a numpy array in pandas, but a Series in dask
assert_eq(a.x.dt.to_pydatetime(),
pd.Series(df.x.dt.to_pydatetime(), index=df.index, dtype=object))
assert set(a.x.dt.date.dask) == set(a.x.dt.date.dask)
assert set(a.x.dt.to_pydatetime().dask) == set(a.x.dt.to_pydatetime().dask)
def test_str_accessor():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'D'], 'y': [1, 2, 3, 4]},
index=['e', 'f', 'g', 'H'])
a = dd.from_pandas(df, 2, sort=False)
# Check that str not in dir/hasattr for non-object columns
assert 'str' not in dir(a.y)
assert not hasattr(a.y, 'str')
assert 'upper' in dir(a.x.str)
assert_eq(a.x.str.upper(), df.x.str.upper())
assert set(a.x.str.upper().dask) == set(a.x.str.upper().dask)
assert 'upper' in dir(a.index.str)
assert_eq(a.index.str.upper(), df.index.str.upper())
assert set(a.index.str.upper().dask) == set(a.index.str.upper().dask)
# make sure to pass thru args & kwargs
assert 'contains' in dir(a.x.str)
assert_eq(a.x.str.contains('a'), df.x.str.contains('a'))
assert set(a.x.str.contains('a').dask) == set(a.x.str.contains('a').dask)
assert_eq(a.x.str.contains('d', case=False), df.x.str.contains('d', case=False))
assert set(a.x.str.contains('d', case=False).dask) == set(a.x.str.contains('d', case=False).dask)
for na in [True, False]:
assert_eq(a.x.str.contains('a', na=na), df.x.str.contains('a', na=na))
assert set(a.x.str.contains('a', na=na).dask) == set(a.x.str.contains('a', na=na).dask)
for regex in [True, False]:
assert_eq(a.x.str.contains('a', regex=regex), df.x.str.contains('a', regex=regex))
assert set(a.x.str.contains('a', regex=regex).dask) == set(a.x.str.contains('a', regex=regex).dask)
def test_empty_max():
meta = make_meta({'x': 'i8'})
a = dd.DataFrame({('x', 0): pd.DataFrame({'x': [1]}),
('x', 1): pd.DataFrame({'x': []})}, 'x',
meta, [None, None, None])
assert_eq(a.x.max(), 1)
def test_query():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
q = a.query('x**2 > y')
with ignoring(ImportError):
assert_eq(q, df.query('x**2 > y'))
def test_eval():
p = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
d = dd.from_pandas(p, npartitions=2)
with ignoring(ImportError):
assert_eq(p.eval('x + y'), d.eval('x + y'))
assert_eq(p.eval('z = x + y', inplace=False),
d.eval('z = x + y', inplace=False))
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=True)
if p.eval('z = x + y', inplace=None) is None:
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=None)
@pytest.mark.parametrize('include, exclude', [
([int], None),
(None, [int]),
([np.number, object], [float]),
(['datetime'], None)
])
def test_select_dtypes(include, exclude):
n = 10
df = pd.DataFrame({'cint': [1] * n,
'cstr': ['a'] * n,
'clfoat': [1.] * n,
'cdt': pd.date_range('2016-01-01', periods=n)})
a = dd.from_pandas(df, npartitions=2)
result = a.select_dtypes(include=include, exclude=exclude)
expected = df.select_dtypes(include=include, exclude=exclude)
assert_eq(result, expected)
# count dtypes
tm.assert_series_equal(a.get_dtype_counts(), df.get_dtype_counts())
tm.assert_series_equal(a.get_ftype_counts(), df.get_ftype_counts())
tm.assert_series_equal(result.get_dtype_counts(),
expected.get_dtype_counts())
tm.assert_series_equal(result.get_ftype_counts(),
expected.get_ftype_counts())
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert (sorted(a.x.drop_duplicates().dask) ==
sorted(a.x.drop_duplicates().dask))
assert (sorted(a.groupby('x').y.mean().dask) ==
sorted(a.groupby('x').y.mean().dask))
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert (sorted(aca(a.x, f, f, a.x._meta).dask) !=
sorted(aca(a.x, f2, f2, a.x._meta).dask))
assert (sorted(aca(a.x, f, f, a.x._meta).dask) ==
sorted(aca(a.x, f, f, a.x._meta).dask))
# Test aca with keywords
def chunk(x, c_key=0, both_key=0):
return x.sum() + c_key + both_key
def agg(x, a_key=0, both_key=0):
return pd.Series(x).sum() + a_key + both_key
c_key = 2
a_key = 3
both_key = 4
res = aca(a.x, chunk=chunk, aggregate=agg, chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key}, both_key=both_key)
assert (sorted(res.dask) ==
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=both_key).dask))
assert (sorted(res.dask) !=
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=0).dask))
assert_eq(res, df.x.sum() + 2 * (c_key + both_key) + a_key + both_key)
def test_aca_meta_infer():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
def chunk(x, y, constant=1.0):
return (x + y + constant).head()
def agg(x):
return x.head()
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg,
chunk_kwargs=dict(constant=2.0))
sol = (df + 2.0 + 2.0).head()
assert_eq(res, sol)
# Should infer as a scalar
res = aca([ddf.x], chunk=lambda x: pd.Series([x.sum()]),
aggregate=lambda x: x.sum())
assert isinstance(res, Scalar)
assert res.compute() == df.x.sum()
def test_aca_split_every():
df = pd.DataFrame({'x': [1] * 60})
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, y, constant=0):
return x.sum() + y + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: aca([ddf, 2.0], chunk=chunk, aggregate=agg, combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 * (2 + 1) + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, combine=combine,
constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, constant=3, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
aca([ddf, 2.0], chunk=chunk, aggregate=agg, split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0))
def test_reduction_method():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
chunk = lambda x, val=0: (x >= val).sum()
agg = lambda x: x.sum()
# Output of chunk is a scalar
res = ddf.x.reduction(chunk, aggregate=agg)
assert_eq(res, df.x.count())
# Output of chunk is a series
res = ddf.reduction(chunk, aggregate=agg)
assert res._name == ddf.reduction(chunk, aggregate=agg)._name
assert_eq(res, df.count())
# Test with keywords
res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={'val': 25})
res2._name == ddf.reduction(chunk, aggregate=agg,
chunk_kwargs={'val': 25})._name
assert res2._name != res._name
assert_eq(res2, (df >= 25).sum())
# Output of chunk is a dataframe
def sum_and_count(x):
return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
res = ddf.reduction(sum_and_count,
aggregate=lambda x: x.groupby(level=0).sum())
assert_eq(res, pd.DataFrame({'sum': df.sum(), 'count': df.count()}))
def test_reduction_method_split_every():
df = pd.Series([1] * 60)
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, constant=0):
return x.sum() + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: ddf.reduction(chunk, aggregate=agg, combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = ddf.reduction(chunk, aggregate=agg, combine=combine, constant=3.0,
split_every=3)
assert res.compute() == 60 + 15 * 3 + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = ddf.reduction(chunk, aggregate=agg, constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * 3 + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
ddf.reduction(chunk, aggregate=agg, split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0))
def test_pipe():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
def f(x, y, z=0):
return x + y + z
assert_eq(ddf.pipe(f, 1, z=2), f(ddf, 1, z=2))
assert_eq(ddf.x.pipe(f, 1, z=2), f(ddf.x, 1, z=2))
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=['a', 'b'])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8],
'z': [9, 10, 11, 12]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.drop('y', axis=1), df.drop('y', axis=1))
assert_eq(ddf.drop(['y', 'z'], axis=1), df.drop(['y', 'z'], axis=1))
with pytest.raises(ValueError):
ddf.drop(['a', 'x'], axis=1)
assert_eq(ddf.drop(['a', 'x'], axis=1, errors='ignore'),
df.drop(['a', 'x'], axis=1, errors='ignore'))
def test_gh580():
df = pd.DataFrame({'x': np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert_eq(np.cos(df['x']), np.cos(ddf['x']))
assert_eq(np.cos(df['x']), np.cos(ddf['x']))
def test_rename_dict():
renamer = {'a': 'A', 'b': 'B'}
assert_eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert_eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
pytest.raises(ValueError, lambda: d.rename(index=renamer))
def test_to_timestamp():
index = pd.PeriodIndex(freq='A', start='1/1/2001', end='12/1/2004')
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]}, index=index)
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.to_timestamp(), df.to_timestamp())
assert_eq(ddf.to_timestamp(freq='M', how='s').compute(),
df.to_timestamp(freq='M', how='s'))
assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())
assert_eq(ddf.x.to_timestamp(freq='M', how='s').compute(),
df.x.to_timestamp(freq='M', how='s'))
def test_to_frame():
s = pd.Series([1, 2, 3], name='foo')
a = dd.from_pandas(s, npartitions=2)
assert_eq(s.to_frame(), a.to_frame())
assert_eq(s.to_frame('bar'), a.to_frame('bar'))
def test_apply():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
assert_eq(ddf.x.apply(lambda x: x + 1),
df.x.apply(lambda x: x + 1))
# specify columns
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1, columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis='columns', columns=None),
df.apply(lambda xy: xy[0] + xy[1], axis='columns'))
# inference
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert_eq(ddf.apply(lambda xy: xy, axis=1),
df.apply(lambda xy: xy, axis=1))
# result will be dataframe
func = lambda x: pd.Series([x, x])
assert_eq(ddf.x.apply(func, name=[0, 1]), df.x.apply(func))
# inference
assert_eq(ddf.x.apply(func), df.x.apply(func))
# axis=0
with tm.assertRaises(NotImplementedError):
ddf.apply(lambda xy: xy, axis=0)
with tm.assertRaises(NotImplementedError):
ddf.apply(lambda xy: xy, axis='index')
def test_applymap():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.applymap(lambda x: x + 1), df.applymap(lambda x: x + 1))
assert_eq(ddf.applymap(lambda x: (x, x)), df.applymap(lambda x: (x, x)))
def test_abs():
df = pd.DataFrame({'A': [1, -2, 3, -4, 5],
'B': [-6., -7, -8, -9, 10],
'C': ['a', 'b', 'c', 'd', 'e']})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.A.abs(), df.A.abs())
assert_eq(ddf[['A', 'B']].abs(), df[['A', 'B']].abs())
pytest.raises(TypeError, lambda: ddf.C.abs())
pytest.raises(TypeError, lambda: ddf.abs())
def test_round():
df = pd.DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.round(), df.round())
assert_eq(ddf.round(2), df.round(2))
def test_cov():
# DataFrame
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.cov()
res2 = ddf.cov(split_every=2)
res3 = ddf.cov(10)
res4 = ddf.cov(10, split_every=2)
sol = df.cov()
sol2 = df.cov(10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.cov()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.cov(db)
res2 = da.cov(db, split_every=2)
res3 = da.cov(db, 10)
res4 = da.cov(db, 10, split_every=2)
sol = a.cov(b)
sol2 = a.cov(b, 10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.cov(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
def test_corr():
# DataFrame
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.corr()
res2 = ddf.corr(split_every=2)
res3 = ddf.corr(min_periods=10)
res4 = ddf.corr(min_periods=10, split_every=2)
sol = df.corr()
sol2 = df.corr(min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.corr()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: ddf.corr(method='spearman'))
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.corr(db)
res2 = da.corr(db, split_every=2)
res3 = da.corr(db, min_periods=10)
res4 = da.corr(db, min_periods=10, split_every=2)
sol = da.corr(db)
sol2 = da.corr(db, min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.corr(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: da.corr(db, method='spearman'))
pytest.raises(TypeError, lambda: da.corr(ddf))
def test_cov_corr_meta():
df = pd.DataFrame({'a': np.array([1, 2, 3]),
'b': np.array([1.0, 2.0, 3.0], dtype='f4'),
'c': np.array([1.0, 2.0, 3.0])},
index=pd.Index([1, 2, 3], name='myindex'))
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.corr(), df.corr())
assert_eq(ddf.cov(), df.cov())
assert ddf.a.cov(ddf.b)._meta.dtype == 'f8'
assert ddf.a.corr(ddf.b)._meta.dtype == 'f8'
@pytest.mark.slow
def test_cov_corr_stable():
df = pd.DataFrame(np.random.random((20000000, 2)) * 2 - 1, columns=['a', 'b'])
ddf = dd.from_pandas(df, npartitions=50)
assert_eq(ddf.cov(split_every=8), df.cov())
assert_eq(ddf.corr(split_every=8), df.corr())
def test_autocorr():
x = pd.Series(np.random.random(100))
dx = dd.from_pandas(x, npartitions=10)
assert_eq(dx.autocorr(2), x.autocorr(2))
assert_eq(dx.autocorr(0), x.autocorr(0))
assert_eq(dx.autocorr(-2), x.autocorr(-2))
assert_eq(dx.autocorr(2, split_every=3), x.autocorr(2))
pytest.raises(TypeError, lambda: dx.autocorr(1.5))
def test_apply_infer_columns():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
def return_df(x):
# will create new DataFrame which columns is ['sum', 'mean']
return pd.Series([x.sum(), x.mean()], index=['sum', 'mean'])
# DataFrame to completely different DataFrame
result = ddf.apply(return_df, axis=1)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['sum', 'mean']))
assert_eq(result, df.apply(return_df, axis=1))
# DataFrame to Series
result = ddf.apply(lambda x: 1, axis=1)
assert isinstance(result, dd.Series)
assert result.name is None
assert_eq(result, df.apply(lambda x: 1, axis=1))
def return_df2(x):
return pd.Series([x * 2, x * 3], index=['x2', 'x3'])
# Series to completely different DataFrame
result = ddf.x.apply(return_df2)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['x2', 'x3']))
assert_eq(result, df.x.apply(return_df2))
# Series to Series
result = ddf.x.apply(lambda x: 1)
assert isinstance(result, dd.Series)
assert result.name == 'x'
assert_eq(result, df.x.apply(lambda x: 1))
def test_index_time_properties():
i = tm.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert 'day' in dir(a.index)
# returns a numpy array in pandas, but a Index in dask
assert_eq(a.index.day, pd.Index(i.index.day))
assert_eq(a.index.month, pd.Index(i.index.month))
def test_nlargest_nsmallest():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(20),
'b': list(ascii_lowercase[:20]),
'c': np.random.permutation(20).astype('float64')})
ddf = dd.from_pandas(df, npartitions=3)
for m in ['nlargest', 'nsmallest']:
f = lambda df, *args, **kwargs: getattr(df, m)(*args, **kwargs)
res = f(ddf, 5, 'a')
res2 = f(ddf, 5, 'a', split_every=2)
sol = f(df, 5, 'a')
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf, 5, ['a', 'b'])
res2 = f(ddf, 5, ['a', 'b'], split_every=2)
sol = f(df, 5, ['a', 'b'])
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf.a, 5)
res2 = f(ddf.a, 5, split_every=2)
sol = f(df.a, 5)
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
def test_reset_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
sol = df.reset_index()
res = ddf.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.reset_index(drop=True)
res = ddf.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index()
res = ddf.x.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index(drop=True)
res = ddf.x.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({'a': range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df['x'].iteritems(), ddf['x'].iteritems()):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
def test_astype():
df = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[10, 20, 30, 40])
a = dd.from_pandas(df, 2)
assert_eq(a.astype(float), df.astype(float))
assert_eq(a.x.astype(float), df.x.astype(float))
def test_astype_categoricals():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'b', 'c'],
'y': [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, 2)
ddf2 = ddf.astype({'x': 'category'})
assert not ddf2.x.cat.known
assert ddf2.x.dtype == 'category'
assert ddf2.compute().x.dtype == 'category'
dx = ddf.x.astype('category')
assert not dx.cat.known
assert dx.dtype == 'category'
assert dx.compute().dtype == 'category'
def test_groupby_callable():
a = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[1, 2, 3, 4])
b = dd.from_pandas(a, 2)
def iseven(x):
return x % 2 == 0
assert_eq(a.groupby(iseven).y.sum(),
b.groupby(iseven).y.sum())
assert_eq(a.y.groupby(iseven).sum(),
b.y.groupby(iseven).sum())
def test_set_index_sorted_true():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index('x', sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index('x', drop=drop),
df.set_index('x', drop=drop))
assert_eq(a.set_index(a.x, sorted=True, drop=drop),
df.set_index(df.x, drop=drop))
assert_eq(a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop))
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_compute_divisions():
from dask.dataframe.shuffle import compute_divisions
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [10, 20, 30, 40],
'z': [4, 3, 2, 1]},
index=[1, 3, 10, 20])
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
divisions = compute_divisions(a)
b = copy(a)
b.divisions = divisions
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_methods_tokenize_differently():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df = dd.from_pandas(df, npartitions=1)
assert (df.x.map_partitions(lambda x: pd.Series(x.min()))._name !=
df.x.map_partitions(lambda x: pd.Series(x.max()))._name)
def test_sorted_index_single_partition():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index('x', sorted=True),
df.set_index('x'))
def _assert_info(df, ddf, memory_usage=True):
from io import StringIO
assert isinstance(df, pd.DataFrame)
assert isinstance(ddf, dd.DataFrame)
buf_pd, buf_da = StringIO(), StringIO()
df.info(buf=buf_pd, memory_usage=memory_usage)
ddf.info(buf=buf_da, verbose=True, memory_usage=memory_usage)
stdout_pd = buf_pd.getvalue()
stdout_da = buf_da.getvalue()
stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))
assert stdout_pd == stdout_da
def test_info():
from io import StringIO
from dask.compatibility import unicode
from pandas.formats import format
format._put_lines = put_lines
test_frames = [
pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=pd.Int64Index(range(4))), # No RangeIndex in dask
pd.DataFrame()
]
for df in test_frames:
ddf = dd.from_pandas(df, npartitions=4)
_assert_info(df, ddf)
buf = StringIO()
ddf = dd.from_pandas(pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=range(4)), npartitions=4)
# Verbose=False
ddf.info(buf=buf, verbose=False)
assert buf.getvalue() == unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 2 entries, x to y\n"
"dtypes: int64(2)")
# buf=None
assert ddf.info(buf=None) is None
def test_groupby_multilevel_info():
# GH 1844
from io import StringIO
from dask.compatibility import unicode
from pandas.formats import format
format._put_lines = put_lines
df = pd.DataFrame({'A': [1, 1, 2, 2],
'B': [1, 2, 3, 4],
'C': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby(['A', 'B']).sum()
# slight difference between memory repr (single additional space)
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
assert buf.getvalue() == unicode("""<class 'dask.dataframe.core.DataFrame'>
Columns: 1 entries, C to C
dtypes: int64(1)""")
# multilevel
g = ddf.groupby(['A', 'B']).agg(['count', 'sum'])
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
assert buf.getvalue() == unicode("""<class 'dask.dataframe.core.DataFrame'>
Columns: 2 entries, (C, count) to (C, sum)
dtypes: int64(2)""")
def test_categorize_info():
# assert that we can call info after categorize
# workaround for: https://github.com/pydata/pandas/issues/14368
from io import StringIO
from dask.compatibility import unicode
from pandas.formats import format
format._put_lines = put_lines
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Series(list('aabc')),
'z': pd.Series(list('aabc'))},
index=pd.Int64Index(range(4))) # No RangeIndex in dask
ddf = dd.from_pandas(df, npartitions=4).categorize(['y'])
# Verbose=False
buf = StringIO()
ddf.info(buf=buf, verbose=True)
assert buf.getvalue() == unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Int64Index: 4 entries, 0 to 3\n"
"Data columns (total 3 columns):\n"
"x 4 non-null int64\n"
"y 4 non-null category\n"
"z 4 non-null object\n"
"dtypes: category(1), object(1), int64(1)")
def test_gh_1301():
df = pd.DataFrame([['1', '2'], ['3', '4']])
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = ddf.assign(y=ddf[1].astype(int))
assert_eq(ddf2, df.assign(y=df[1].astype(int)))
assert ddf2.dtypes['y'] == np.dtype(int)
def test_timeseries_sorted():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df.reset_index(), npartitions=2)
df.index.name = 'index'
assert_eq(ddf.set_index('index', sorted=True, drop=True), df)
def test_column_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
orig = ddf.copy()
ddf['z'] = ddf.x + ddf.y
df['z'] = df.x + df.y
assert_eq(df, ddf)
assert 'z' not in orig.columns
def test_columns_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df2 = df.assign(y=df.x + 1, z=df.x - 1)
df[['a', 'b']] = df2[['y', 'z']]
ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)
ddf[['a', 'b']] = ddf2[['y', 'z']]
assert_eq(df, ddf)
def test_attribute_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1., 2., 3., 4., 5.]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y = ddf.x + ddf.y
assert_eq(ddf, df.assign(y=df.x + df.y))
def test_inplace_operators():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1., 2., 3., 4., 5.]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y **= 0.5
assert_eq(ddf.y, df.y ** 0.5)
assert_eq(ddf, df.assign(y=df.y ** 0.5))
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("idx", [
np.arange(100),
sorted(np.random.random(size=100)),
pd.date_range('20150101', periods=100)
])
def test_idxmaxmin(idx, skipna):
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'), index=idx)
df.b.iloc[31] = np.nan
df.d.iloc[78] = np.nan
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(df.idxmax(axis=1, skipna=skipna),
ddf.idxmax(axis=1, skipna=skipna))
assert_eq(df.idxmin(axis=1, skipna=skipna),
ddf.idxmin(axis=1, skipna=skipna))
assert_eq(df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna))
assert_eq(df.idxmax(skipna=skipna),
ddf.idxmax(skipna=skipna, split_every=2))
assert (ddf.idxmax(skipna=skipna)._name !=
ddf.idxmax(skipna=skipna, split_every=2)._name)
assert_eq(df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna))
assert_eq(df.idxmin(skipna=skipna),
ddf.idxmin(skipna=skipna, split_every=2))
assert (ddf.idxmin(skipna=skipna)._name !=
ddf.idxmin(skipna=skipna, split_every=2)._name)
assert_eq(df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))
assert_eq(df.a.idxmax(skipna=skipna),
ddf.a.idxmax(skipna=skipna, split_every=2))
assert (ddf.a.idxmax(skipna=skipna)._name !=
ddf.a.idxmax(skipna=skipna, split_every=2)._name)
assert_eq(df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))
assert_eq(df.a.idxmin(skipna=skipna),
ddf.a.idxmin(skipna=skipna, split_every=2))
assert (ddf.a.idxmin(skipna=skipna)._name !=
ddf.a.idxmin(skipna=skipna, split_every=2)._name)
def test_getitem_meta():
data = {'col1': ['a', 'a', 'b'],
'col2': [0, 1, 0]}
df = pd.DataFrame(data=data, columns=['col1', 'col2'])
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(df.col2[df.col1 == 'a'], ddf.col2[ddf.col1 == 'a'])
def test_getitem_multilevel():
pdf = pd.DataFrame({('A', '0') : [1,2,2], ('B', '1') : [1,2,3]})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(pdf['A', '0'], ddf['A', '0'])
assert_eq(pdf[[('A', '0'), ('B', '1')]], ddf[[('A', '0'), ('B', '1')]])
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({'x': [1, 2, 3], 'y': [0, 0, 0]})
b = pd.DataFrame({'x': [1, 2, 3], 'y': [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index('y', sorted=True)
assert df2.divisions == (0, 1, 1)
def test_diff():
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.diff(), df.diff())
assert_eq(ddf.diff(0), df.diff(0))
assert_eq(ddf.diff(2), df.diff(2))
assert_eq(ddf.diff(-2), df.diff(-2))
assert_eq(ddf.diff(2, axis=1), df.diff(2, axis=1))
assert_eq(ddf.a.diff(), df.a.diff())
assert_eq(ddf.a.diff(0), df.a.diff(0))
assert_eq(ddf.a.diff(2), df.a.diff(2))
assert_eq(ddf.a.diff(-2), df.a.diff(-2))
assert ddf.diff(2)._name == ddf.diff(2)._name
assert ddf.diff(2)._name != ddf.diff(3)._name
pytest.raises(TypeError, lambda: ddf.diff(1.5))
def test_shift():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
# DataFrame
assert_eq(ddf.shift(), df.shift())
assert_eq(ddf.shift(0), df.shift(0))
assert_eq(ddf.shift(2), df.shift(2))
assert_eq(ddf.shift(-2), df.shift(-2))
assert_eq(ddf.shift(2, axis=1), df.shift(2, axis=1))
# Series
assert_eq(ddf.A.shift(), df.A.shift())
assert_eq(ddf.A.shift(0), df.A.shift(0))
assert_eq(ddf.A.shift(2), df.A.shift(2))
assert_eq(ddf.A.shift(-2), df.A.shift(-2))
with pytest.raises(TypeError):
ddf.shift(1.5)
def test_shift_with_freq():
df = tm.makeTimeDataFrame(30)
# DatetimeIndex
for data_freq, divs1 in [('B', False), ('D', True), ('H', True)]:
df = df.set_index(tm.makeDateIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq, divs2 in [('S', True), ('W', False),
(pd.Timedelta(10, unit='h'), True)]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions == divs2
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs1
# PeriodIndex
for data_freq, divs in [('B', False), ('D', True), ('H', True)]:
df = df.set_index(pd.period_range('2000-01-01', periods=30,
freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for d, p in [(ddf, df), (ddf.A, df.A)]:
res = d.shift(2, freq=data_freq)
assert_eq(res, p.shift(2, freq=data_freq))
assert res.known_divisions == divs
# PeriodIndex.shift doesn't have `freq` parameter
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs
with pytest.raises(ValueError):
ddf.index.shift(2, freq='D') # freq keyword not supported
# TimedeltaIndex
for data_freq in ['T', 'D', 'H']:
df = df.set_index(tm.makeTimedeltaIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq in ['S', pd.Timedelta(10, unit='h')]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions
# Other index types error
df = tm.makeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
pytest.raises(NotImplementedError, lambda: ddf.shift(2, freq='S'))
pytest.raises(NotImplementedError, lambda: ddf.A.shift(2, freq='S'))
pytest.raises(NotImplementedError, lambda: ddf.index.shift(2))
@pytest.mark.parametrize('method', ['first', 'last'])
def test_first_and_last(method):
f = lambda x, offset: getattr(x, method)(offset)
freqs = ['12h', 'D']
offsets = ['0d', '100h', '20d', '20B', '3W', '3M', '400d', '13M']
for freq in freqs:
index = pd.date_range('1/1/2000', '1/1/2001', freq=freq)[::4]
df = pd.DataFrame(np.random.random((len(index), 4)), index=index,
columns=['A', 'B', 'C', 'D'])
ddf = dd.from_pandas(df, npartitions=10)
for offset in offsets:
assert_eq(f(ddf, offset), f(df, offset))
assert_eq(f(ddf.A, offset), f(df.A, offset))
@pytest.mark.parametrize('npartitions', [1, 4, 20])
@pytest.mark.parametrize('split_every', [2, 5])
@pytest.mark.parametrize('split_out', [None, 1, 5, 20])
def test_hash_split_unique(npartitions, split_every, split_out):
from string import ascii_lowercase
s = pd.Series(np.random.choice(list(ascii_lowercase), 1000, replace=True))
ds = dd.from_pandas(s, npartitions=npartitions)
dropped = ds.unique(split_every=split_every, split_out=split_out)
dsk = dropped._optimize(dropped.dask, dropped._keys())
from dask.core import get_deps
dependencies, dependents = get_deps(dsk)
assert len([k for k, v in dependencies.items() if not v]) == npartitions
assert dropped.npartitions == (split_out or 1)
assert sorted(dropped.compute(get=dask.get)) == sorted(s.unique())
@pytest.mark.parametrize('split_every', [None, 2])
def test_split_out_drop_duplicates(split_every):
x = np.concatenate([np.arange(10)] * 100)[:, None]
y = x.copy()
z = np.concatenate([np.arange(20)] * 50)[:, None]
rs = np.random.RandomState(1)
rs.shuffle(x)
rs.shuffle(y)
rs.shuffle(z)
df = pd.DataFrame(np.concatenate([x, y, z], axis=1), columns=['x', 'y', 'z'])
ddf = dd.from_pandas(df, npartitions=20)
for subset, keep in product([None, ['x', 'z']], ['first', 'last']):
sol = df.drop_duplicates(subset=subset, keep=keep)
res = ddf.drop_duplicates(subset=subset, keep=keep,
split_every=split_every, split_out=10)
assert res.npartitions == 10
assert_eq(sol, res)
@pytest.mark.parametrize('split_every', [None, 2])
def test_split_out_value_counts(split_every):
df = pd.DataFrame({'x': [1, 2, 3] * 100})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.x.value_counts(split_out=10, split_every=split_every).npartitions == 10
assert_eq(ddf.x.value_counts(split_out=10, split_every=split_every), df.x.value_counts())
def test_values():
from dask.array.utils import assert_eq
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(df, 2)
assert_eq(df.values, ddf.values)
assert_eq(df.x.values, ddf.x.values)
assert_eq(df.y.values, ddf.y.values)
assert_eq(df.index.values, ddf.index.values)
def test_copy():
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.from_pandas(df, npartitions=2)
b = a.copy()
a['y'] = a.x * 2
assert_eq(b, df)
df['y'] = df.x * 2
def test_del():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = a.copy()
del a['x']
assert_eq(b, df)
del df['x']
assert_eq(a, df)
@pytest.mark.parametrize('index', [True, False])
@pytest.mark.parametrize('deep', [True, False])
def test_memory_usage(index, deep):
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1.0, 2.0, 3.0],
'z': ['a', 'b', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.memory_usage(index=index, deep=deep),
ddf.memory_usage(index=index, deep=deep))
assert (df.x.memory_usage(index=index, deep=deep) ==
ddf.x.memory_usage(index=index, deep=deep).compute())
@pytest.mark.parametrize('reduction', ['sum', 'mean', 'std', 'var', 'count',
'min', 'max', 'idxmin', 'idxmax',
'prod', 'all', 'sem'])
def test_dataframe_reductions_arithmetic(reduction):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1.1, 2.2, 3.3, 4.4, 5.5]})
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf - (getattr(ddf, reduction)() + 1),
df - (getattr(df, reduction)() + 1))
def test_datetime_loc_open_slicing():
dtRange = pd.date_range('01.01.2015','05.05.2015')
df = pd.DataFrame(np.random.random((len(dtRange), 2)), index=dtRange)
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(df.loc[:'02.02.2015'], ddf.loc[:'02.02.2015'])
assert_eq(df.loc['02.02.2015':], ddf.loc['02.02.2015':])
assert_eq(df[0].loc[:'02.02.2015'], ddf[0].loc[:'02.02.2015'])
assert_eq(df[0].loc['02.02.2015':], ddf[0].loc['02.02.2015':])
def test_to_datetime():
df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(pd.to_datetime(df), dd.to_datetime(ddf))
s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 100)
ds = dd.from_pandas(s, npartitions=10)
assert_eq(pd.to_datetime(s, infer_datetime_format=True),
dd.to_datetime(ds, infer_datetime_format=True))
@pytest.mark.parametrize('drop', [0, 9])
def test_slice_on_filtered_boundary(drop):
# https://github.com/dask/dask/issues/2211
x = np.arange(10)
x[[5, 6]] -= 2
df = pd.DataFrame({"A": x, "B": np.arange(len(x))})
pdf = df.set_index("A").query("B != {}".format(drop))
ddf = dd.from_pandas(df, 1).set_index("A").query("B != {}".format(drop))
result = dd.concat([ddf, ddf.rename(columns={"B": "C"})], axis=1)
expected = pd.concat([pdf, pdf.rename(columns={"B": "C"})], axis=1)
assert_eq(result, expected)
assert not result.compute().index.is_monotonic # didn't accidentally sort
def test_boundary_slice_nonmonotonic():
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = boundary_slice(df, 0, 4)
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
result = boundary_slice(df, -1, 4)
expected = df.drop(-2)
tm.assert_frame_equal(result, expected)
result = boundary_slice(df, -2, 3)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = boundary_slice(df, -2, 3.5)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = boundary_slice(df, -2, 4)
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('index, left, right', [
(range(10), 0, 9),
(range(10), -1, None),
(range(10), None, 10),
([-1, 0, 2, 1], None, None),
([-1, 0, 2, 1], -1, None),
([-1, 0, 2, 1], None, 2),
([-1, 0, 2, 1], -2, 3),
(pd.date_range("2017", periods=10), None, None),
(pd.date_range("2017", periods=10), pd.Timestamp("2017"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2017-01-10")),
(pd.date_range("2017", periods=10), pd.Timestamp("2016"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2018")),
])
def test_boundary_slice_same(index, left, right):
df = pd.DataFrame({"A": range(len(index))}, index=index)
result = boundary_slice(df, left, right)
tm.assert_frame_equal(result, df)
| bsd-3-clause |
Alex-Ian-Hamilton/sunpy | sunpy/lightcurve/lightcurve.py | 1 | 15090 | """
LightCurve is a generic LightCurve class from which all other LightCurve
classes inherit from.
"""
from __future__ import absolute_import
import os.path
import shutil
import warnings
from datetime import datetime
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sunpy import config
from sunpy.time import is_time, TimeRange, parse_time
from sunpy.util.cond_dispatch import ConditionalDispatch, run_cls
from sunpy.extern.six.moves import urllib
from sunpy.extern import six
# pylint: disable=E1101,E1121,W0404,W0612,W0613
__authors__ = ["Keith Hughitt"]
__email__ = "[email protected]"
__all__ = ['LightCurve']
class LightCurve(object):
"""
LightCurve(filepath)
A generic light curve object.
Attributes
----------
meta : `str` or `dict`
The comment string or header associated with the data.
data : `~pandas.DataFrame`
An pandas DataFrame prepresenting one or more fields as a function of time.
Examples
--------
>>> from sunpy.lightcurve import LightCurve
>>> import datetime
>>> import numpy as np
>>> base = datetime.datetime.today()
>>> dates = [base - datetime.timedelta(minutes=x) for x in range(0, 24 * 60)]
>>> intensity = np.sin(np.arange(0, 12 * np.pi, step=(12 * np.pi) / 24 * 60))
>>> light_curve = LightCurve.create({"param1": intensity}, index=dates)
>>> light_curve.peek() # doctest: +SKIP
References
----------
* `Pandas Documentation <http://pandas.pydata.org/pandas-docs/dev/dsintro.html>`_
"""
_cond_dispatch = ConditionalDispatch()
create = classmethod(_cond_dispatch.wrapper())
def __init__(self, data, meta=None):
self.data = pandas.DataFrame(data)
if meta == '' or meta is None:
self.meta = OrderedDict()
self.meta.update({'name':None})
else:
self.meta = OrderedDict(meta)
@property
def header(self):
"""
Return the lightcurves metadata
.. deprecated:: 0.4.0
Use .meta instead
"""
warnings.warn("""lightcurve.header has been renamed to lightcurve.meta
for compatibility with map, please use meta instead""", Warning)
return self.meta
@classmethod
def from_time(cls, time, **kwargs):
"""
Called by Conditional Dispatch object when valid time is passed as
input to create method.
"""
date = parse_time(time)
url = cls._get_url_for_date(date, **kwargs)
filepath = cls._download(
url, kwargs, err="Unable to download data for specified date"
)
return cls.from_file(filepath)
@classmethod
def from_range(cls, start, end, **kwargs):
"""Called by Conditional Dispatch object when start and end time are
passed as input to create method.
:param start:
:param end:
:param kwargs:
:return:
"""
url = cls._get_url_for_date_range(parse_time(start), parse_time(end), **kwargs)
filepath = cls._download(
url, kwargs,
err="Unable to download data for specified date range"
)
result = cls.from_file(filepath)
result.data = result.data.truncate(start,end)
return result
@classmethod
def from_timerange(cls, timerange, **kwargs):
"""
Called by Conditional Dispatch object when time range is passed as
input to create method.
"""
url = cls._get_url_for_date_range(timerange, **kwargs)
filepath = cls._download(
url, kwargs,
err = "Unable to download data for specified date range"
)
result = cls.from_file(filepath)
result.data = result.data.truncate(timerange.start, timerange.end)
return result
@classmethod
def from_file(cls, filename):
"""Used to return Light Curve object by reading the given filename.
Parameters
----------
filename: `str`
Path of the file to be read.
Returns
-------
Lightcurve object.
"""
filename = os.path.expanduser(filename)
meta, data = cls._parse_filepath(filename)
if data.empty:
raise ValueError("No data found!")
else:
return cls(data, meta)
@classmethod
def from_url(cls, url, **kwargs):
"""
Called by Conditional Dispatch object to create Light Curve object when
given a url. Downloads a file from the given url, attemps to read it
and returns a Light Curve object.
Parameters
----------
url : str
A url given as a string.
"""
try:
filepath = cls._download(url, kwargs)
except (urllib.error.HTTPError, urllib.error.URLError, ValueError):
err = "Unable to read location {!s}.".format(url)
raise ValueError(err)
return cls.from_file(filepath)
@classmethod
def from_data(cls, data, index=None, meta=None):
"""
Called by Conditional Dispatch object to create Light Curve object when
corresponding data is passed to create method.
Parameters
----------
data : `~numpy.ndarray`
The data array
index : `~datetime.datetime` array
The time values
"""
return cls(
pandas.DataFrame(data, index=index),
meta
)
@classmethod
def from_yesterday(cls):
"""
Called by Conditional Dispatch object if no input if given
"""
return cls.from_url(cls._get_default_uri())
@classmethod
def from_dataframe(cls, dataframe, meta=None):
"""
Called by Conditional Dispatch object to create Light Curve object when
Pandas DataFrame is passed to create method.
Parameters
----------
dataframe : `~pandas.DataFrame`
The data.
meta : `str` or `dict`
The metadata.
"""
return cls(dataframe, meta)
def plot(self, axes=None, **plot_args):
"""Plot a plot of the light curve
Parameters
----------
axes : `~matplotlib.axes.Axes` or None
If provided the image will be plotted on the given axes. Otherwise
the current axes will be used.
**plot_args : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
axes : `~matplotlib.axes.Axes`
The plot axes.
"""
# Get current axes
if axes is None:
axes = plt.gca()
axes = self.data.plot(ax=axes, **plot_args)
return axes
def peek(self, **kwargs):
"""Displays the light curve in a new figure.
Parameters
----------
**kwargs : `dict`
Any additional plot arguments that should be used
when plotting.
Returns
-------
fig : `~matplotlib.Figure`
A plot figure.
"""
figure = plt.figure()
self.plot(**kwargs)
figure.show()
return figure
@staticmethod
def _download(uri, kwargs,
err='Unable to download data at specified URL'):
"""Attempts to download data at the specified URI.
Parameters
----------
**kwargs : uri
A url
"""
_filename = os.path.basename(uri).split("?")[0]
# user specifies a download directory
if "directory" in kwargs:
download_dir = os.path.expanduser(kwargs["directory"])
else:
download_dir = config.get("downloads", "download_dir")
# overwrite the existing file if the keyword is present
if "overwrite" in kwargs:
overwrite = kwargs["overwrite"]
else:
overwrite = False
# If the file is not already there, download it
filepath = os.path.join(download_dir, _filename)
if not(os.path.isfile(filepath)) or (overwrite and
os.path.isfile(filepath)):
try:
response = urllib.request.urlopen(uri)
except (urllib.error.HTTPError, urllib.error.URLError):
raise urllib.error.URLError(err)
with open(filepath, 'wb') as fp:
shutil.copyfileobj(response, fp)
else:
warnings.warn("Using existing file rather than downloading, use "
"overwrite=True to override.", RuntimeWarning)
return filepath
@classmethod
def _get_default_uri(cls):
"""Default data to load when none is specified."""
msg = "No default action set for {}"
raise NotImplementedError(msg.format(cls.__name__))
@classmethod
def _get_url_for_date(cls, date, **kwargs):
"""Returns a URL to the data for the specified date."""
msg = "Date-based downloads not supported for for {}"
raise NotImplementedError(msg.format(cls.__name__))
@classmethod
def _get_url_for_date_range(cls, *args, **kwargs):
"""Returns a URL to the data for the specified date range."""
msg = "Date-range based downloads not supported for for {}"
raise NotImplementedError(msg.format(cls.__name__))
@staticmethod
def _parse_csv(filepath):
"""Place holder method to parse CSV files."""
msg = "Generic CSV parsing not yet implemented for LightCurve"
raise NotImplementedError(msg)
@staticmethod
def _parse_fits(filepath):
"""Place holder method to parse FITS files."""
msg = "Generic FITS parsing not yet implemented for LightCurve"
raise NotImplementedError(msg)
@classmethod
def _parse_filepath(cls, filepath):
"""Check the file extension to see how to parse the file."""
filename, extension = os.path.splitext(filepath)
if extension.lower() in (".csv", ".txt"):
return cls._parse_csv(filepath)
else:
return cls._parse_fits(filepath)
def truncate(self, a, b=None):
"""Returns a truncated version of the lightcurve object.
Parameters
----------
a : `sunpy.time.TimeRange`
A time range to truncate to.
Returns
-------
newlc : `~sunpy.lightcurve.LightCurve`
A new lightcurve with only the selected times.
"""
if isinstance(a, TimeRange):
time_range = a
else:
time_range = TimeRange(a,b)
truncated = self.data.truncate(time_range.start, time_range.end)
return self.__class__.create(truncated, self.meta.copy())
def extract(self, column_name):
"""Returns a new lightcurve with the chosen column.
Parameters
----------
column_name : `str`
A valid column name
Returns
-------
newlc : `~sunpy.lightcurve.LightCurve`
A new lightcurve with only the selected column.
"""
# TODO allow the extract function to pick more than one column
if isinstance(self, pandas.Series):
return self
else:
return LightCurve(self.data[column_name], self.meta.copy())
def time_range(self):
"""Returns the start and end times of the LightCurve as a `~sunpy.time.TimeRange`
object"""
return TimeRange(self.data.index[0], self.data.index[-1])
def concatenate(self, otherlightcurve):
"""Concatenate another light curve. This function will check and remove
any duplicate times. It will keep the column values from the original
lightcurve to which the new lightcurve is being added.
Parameters
----------
otherlightcurve : `~sunpy.lightcurve.LightCurve`
Another lightcurve of the same type.
Returns
-------
newlc : `~sunpy.lightcurve.LightCurve`
A new lightcurve.
"""
if not isinstance(otherlightcurve, self.__class__):
raise TypeError("Lightcurve classes must match.")
meta = OrderedDict()
meta.update({str(self.data.index[0]):self.meta.copy()})
meta.update({str(otherlightcurve.data.index[0]):otherlightcurve.meta.copy()})
data = self.data.copy().append(otherlightcurve.data)
data['index'] = data.index
# default behavior of drop_duplicates is keep the first column.
data = data.drop_duplicates(subset='index')
data.set_index = data['index']
data.drop('index', axis=1, inplace=True)
return self.__class__.create(data, meta)
# What's happening here is the following: The ConditionalDispatch is just an
# unbound callable object, that is, it does not know which class it is attached
# to. What we do against that is return a wrapper and make that a classmethod -
# thus we get the class as whose member it is called as as the first argument,
# this is why in the type signature we always have type as the first type.
# We then use run_cls, which just returns a wrapper that interprets the first
# argument as the class the function should be called of. So,
# x = run_cls("foo") returns something that turns x(cls, 1) into cls.foo(1).
# Because this has *args, **kwargs as its signature we need to disable the
# check of ConditionalDispatch that makes sure the function and the
# conditional need to have the same signature - but they still do have to.
LightCurve._cond_dispatch.add(
run_cls("from_time"),
lambda cls, time, **kwargs: is_time(time),
# type is here because the class parameter is a class,
# i.e. an instance of type (which is the base meta-class).
[type, (six.string_types, datetime, tuple)],
False
)
LightCurve._cond_dispatch.add(
run_cls("from_range"),
lambda cls, time1, time2, **kwargs: is_time(time1) and is_time(time2),
[type, (six.string_types, datetime, tuple),
(six.string_types, datetime, tuple)],
False
)
LightCurve._cond_dispatch.add(
run_cls("from_timerange"),
lambda cls, timerange, **kwargs: True,
[type, TimeRange],
False
)
LightCurve._cond_dispatch.add(
run_cls("from_file"),
lambda cls, filename: os.path.exists(os.path.expanduser(filename)),
[type, six.string_types],
False
)
LightCurve._cond_dispatch.add(
run_cls("from_url"),
lambda cls, url, **kwargs: True,
[type, six.string_types],
False
)
LightCurve._cond_dispatch.add(
run_cls("from_data"),
lambda cls, data, index=None, meta=None: True,
[type, (list, dict, np.ndarray, pandas.Series), object, object],
False
)
LightCurve._cond_dispatch.add(
run_cls("from_dataframe"),
lambda cls, dataframe, meta=None: True,
[type, pandas.DataFrame, object],
False
)
LightCurve._cond_dispatch.add(
run_cls("from_yesterday"),
lambda cls: True,
[type],
False
)
| bsd-2-clause |
Lawrence-Liu/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
hrjn/scikit-learn | examples/cluster/plot_face_segmentation.py | 71 | 2839 | """
===================================================
Segmenting the picture of a raccoon face in regions
===================================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
# load the raccoon face as a numpy array
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(face)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / graph.data.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 25
#############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels, random_state=1)
t1 = time.time()
labels = labels.reshape(face.shape)
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS))])
plt.xticks(())
plt.yticks(())
title = 'Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0))
print(title)
plt.title(title)
plt.show()
| bsd-3-clause |
abhisg/scikit-learn | examples/covariance/plot_outlier_detection.py | 235 | 3891 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates two
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from scipy import stats
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"robust covariance estimator": EllipticEnvelope(contamination=.1)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 500), np.linspace(-7, 7, 500))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = 0
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(0.5 * n_inliers, 2) - offset
X2 = 0.3 * np.random.randn(0.5 * n_inliers, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model with the One-Class SVM
plt.figure(figsize=(10, 5))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
clf.fit(X)
y_pred = clf.decision_function(X).ravel()
threshold = stats.scoreatpercentile(y_pred,
100 * outliers_fraction)
y_pred = y_pred > threshold
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(1, 2, i + 1)
subplot.set_title("Outlier detection")
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=11))
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.show()
| bsd-3-clause |
almarklein/scikit-image | doc/source/conf.py | 3 | 9573 | # -*- coding: utf-8 -*-
#
# skimage documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 22 13:00:30 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
curpath = os.path.dirname(__file__)
sys.path.append(os.path.join(curpath, '..', 'ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.autosummary', 'plot2rst',
'sphinx.ext.intersphinx']
# Determine if the matplotlib has a recent enough version of the
# plot_directive, otherwise use the local fork.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skimage'
copyright = '2013, the scikit-image team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
setup_lines = open('../../setup.py').readlines()
version = 'vUndefined'
for l in setup_lines:
if l.startswith('VERSION'):
version = l.split("'")[1]
break
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-image'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'skimage v%s docs' % version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['navigation.html',
'localtoc.html',
'versions.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikitimagedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'scikit-image.tex', u'The scikit-image Documentation',
u'scikit-image development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{enumitem}
\setlistdepth{100}
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
numpydoc_show_class_members = False
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_basedir = os.path.join(curpath, "plots")
plot_pre_code = """
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
import matplotlib
matplotlib.rcParams.update({
'font.size': 14,
'axes.titlesize': 12,
'axes.labelsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 10,
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
"""
plot_include_source = True
plot_formats = [('png', 100), ('pdf', 100)]
plot2rst_index_name = 'README'
plot2rst_rcparams = {'image.cmap' : 'gray',
'image.interpolation' : 'none'}
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
_python_doc_base = 'http://docs.python.org/2.7'
intersphinx_mapping = {
_python_doc_base: None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://scikit-learn.org/stable': None
}
| bsd-3-clause |
ltiao/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
bugobliterator/mavlink | pymavlink/tools/mavgpslag.py | 43 | 3446 | #!/usr/bin/env python
'''
calculate GPS lag from DF log
'''
import sys, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--plot", action='store_true', default=False, help="plot errors")
parser.add_argument("--minspeed", type=float, default=6, help="minimum speed")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
from pymavlink.mavextra import *
from pymavlink.rotmat import Vector3, Matrix3
'''
Support having a $HOME/.pymavlink/mavextra.py for extra graphing functions
'''
home = os.getenv('HOME')
if home is not None:
extra = os.path.join(home, '.pymavlink', 'mavextra.py')
if os.path.exists(extra):
import imp
mavuser = imp.load_source('pymavlink.mavuser', extra)
from pymavlink.mavuser import *
def velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=0):
'''return summed velocity error'''
sum = 0
count = 0
for i in range(0, len(vel)-1):
dv = vel[i+1] - vel[i]
da = Vector3()
for idx in range(1+accel_indexes[i]-shift, 1+accel_indexes[i+1]-shift):
da += gaccel[idx]
dt1 = timestamps[i+1] - timestamps[i]
dt2 = (accel_indexes[i+1] - accel_indexes[i]) * imu_dt
da *= imu_dt
da *= dt1/dt2
#print(accel_indexes[i+1] - accel_indexes[i])
ex = abs(dv.x - da.x)
ey = abs(dv.y - da.y)
sum += 0.5*(ex+ey)
count += 1
if count == 0:
return None
return sum/count
def gps_lag(logfile):
'''work out gps velocity lag times for a log file'''
print("Processing log %s" % filename)
mlog = mavutil.mavlink_connection(filename)
timestamps = []
vel = []
gaccel = []
accel_indexes = []
ATT = None
IMU = None
dtsum = 0
dtcount = 0
while True:
m = mlog.recv_match(type=['GPS','IMU','ATT'])
if m is None:
break
t = m.get_type()
if t == 'GPS' and m.Status==3 and m.Spd>args.minspeed:
v = Vector3(m.Spd*cos(radians(m.GCrs)), m.Spd*sin(radians(m.GCrs)), m.VZ)
vel.append(v)
timestamps.append(m._timestamp)
accel_indexes.append(max(len(gaccel)-1,0))
elif t == 'ATT':
ATT = m
elif t == 'IMU':
if ATT is not None:
gaccel.append(earth_accel_df(m, ATT))
if IMU is not None:
dt = m._timestamp - IMU._timestamp
dtsum += dt
dtcount += 1
IMU = m
imu_dt = dtsum / dtcount
print("Loaded %u samples imu_dt=%.3f" % (len(vel), imu_dt))
besti = -1
besterr = 0
delays = []
errors = []
for i in range(0,100):
err = velocity_error(timestamps, vel, gaccel, accel_indexes, imu_dt, shift=i)
if err is None:
break
errors.append(err)
delays.append(i*imu_dt)
if besti == -1 or err < besterr:
besti = i
besterr = err
print("Best %u (%.3fs) %f" % (besti, besti*imu_dt, besterr))
if args.plot:
import matplotlib.pyplot as plt
plt.plot(delays, errors, 'bo-')
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,0,y2))
plt.ylabel('Error')
plt.xlabel('Delay(s)')
plt.show()
for filename in args.logs:
gps_lag(filename)
| lgpl-3.0 |
legacysurvey/legacypipe | py/obiwan/decals_sim_mpiwrapper.py | 2 | 5026 | from __future__ import division, print_function
if __name__ == '__main__':
import matplotlib
matplotlib.use('Agg')
import os
import numpy as np
from glob import glob
import datetime
import sys
from astrometry.util.ttime import Time
from obiwan.decals_sim import get_parser,ptime
from obiwan.decals_sim import main as decals_sim_main
########
# stdouterr_redirected() is from Ted Kisner
# Every mpi task (zeropoint file) gets its own stdout file
import time
from contextlib import contextmanager
@contextmanager
def stdouterr_redirected(to=os.devnull, comm=None):
'''
Based on http://stackoverflow.com/questions/5081657
import os
with stdouterr_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
'''
sys.stdout.flush()
sys.stderr.flush()
fd = sys.stdout.fileno()
fde = sys.stderr.fileno()
##### assert that Python and C stdio write using the same file descriptor
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
sys.stderr.close() # + implicit flush()
os.dup2(to.fileno(), fde) # fd writes to 'to' file
sys.stderr = os.fdopen(fde, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
if (comm is None) or (comm.rank == 0):
print("Begin log redirection to {} at {}".format(to, time.asctime()))
sys.stdout.flush()
sys.stderr.flush()
pto = to
if comm is None:
if not os.path.exists(os.path.dirname(pto)):
os.makedirs(os.path.dirname(pto))
with open(pto, 'w') as file:
_redirect_stdout(to=file)
else:
pto = "{}_{}".format(to, comm.rank)
with open(pto, 'w') as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
sys.stdout.flush()
sys.stderr.flush()
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
if comm is not None:
# concatenate per-process files
comm.barrier()
if comm.rank == 0:
with open(to, 'w') as outfile:
for p in range(comm.size):
outfile.write("================= Process {} =================\n".format(p))
fname = "{}_{}".format(to, p)
with open(fname) as infile:
outfile.write(infile.read())
os.remove(fname)
comm.barrier()
if (comm is None) or (comm.rank == 0):
print("End log redirection to {} at {}".format(to, time.asctime()))
sys.stdout.flush()
sys.stderr.flush()
if __name__ == "__main__":
# Inputs from decals_sim
t0= Time()
parser= get_parser()
args = parser.parse_args()
bricks= np.loadtxt(os.path.join(os.getenv('LEGACY_SURVEY_DIR'),args.bricklist),dtype=str)
if args.nproc > 1:
from mpi4py.MPI import COMM_WORLD as comm
t0=ptime('parse-args',t0)
if args.nproc > 1:
bricks_split= np.array_split(bricks, comm.size)[comm.rank]
else:
bricks_split= np.array_split(bricks, 1)[0]
for brick in bricks_split:
# Check if already ran
#--> lrg/122/1222p257/rowstart0/
#args.update(dict(brick=brick))
d=vars(args)
d['brick']= brick
outdir= '%s/%s/%s/rowstart%d/' % \
(args.objtype,brick[:3],brick,args.rowstart)
outdir= os.path.join(os.getenv('DECALS_SIM_DIR'),outdir)
hdf5_fn= os.path.join(outdir,'%s_%s.hdf5' % (args.objtype,brick))
if not os.path.exists(hdf5_fn):
if args.nproc > 1:
# Log to unique file
outfn=os.path.join(outdir,"log.%s" % \
datetime.datetime.now().strftime("%Y-%m-%d-hr%H-min%M"))
with stdouterr_redirected(to=outfn, comm=None):
t0=ptime('before-%s' % brick,t0)
decals_sim_main(args=args)
t0=ptime('after-%s' % brick,t0)
else:
decals_sim_main(args=args)
if args.nproc > 1:
# Wait for all mpi tasks to finish
confirm_files = comm.gather( images_split[comm.rank], root=0 )
if comm.rank == 0:
tnow= Time()
print("Done, total time = %s" % (tnow-tbegin,))
else:
tnow= Time()
print("Done, total time = %s" % (tnow-tbegin,))
| bsd-3-clause |
boknilev/dsl-char-cnn | src/cnn_multifilter_large_task1.py | 1 | 6355 | '''Character CNN code for DSL 2016 task 2
Partly based on:
https://github.com/fchollet/keras/blob/master/examples/imdb_cnn.py
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
import tensorflow as tf
tf.set_random_seed(1337) # probably not needed
from keras.preprocessing import sequence
from keras.models import Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Embedding, merge
from keras.layers import Convolution1D, MaxPooling1D
#from keras import backend as K
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.utils import np_utils
#from keras.regularizers import l1, l2, l1l2, activity_l1, activity_l2, activity_l1l2
#from keras.layers.normalization import BatchNormalization
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
from data import load_data, load_labels, get_task1_alphabet, task1_train_file, task1_test_file, task1_labels_file
alphabet = get_task1_alphabet()
# limit tensorflow memory usage
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.4
set_session(tf.Session(config=config))
# set tensorflow random seed for reproducibility
# model file
model_file = "cnn_model_gpu_multifilter_large_morehidden_moredrop_task1.hdf5"
# set parameters:
print('Hyperparameters:')
alphabet_size = len(alphabet) + 2 # add 2, one padding and unknown chars
print('Alphabet size:', alphabet_size)
maxlen = 400
print('Max text len:', maxlen)
batch_size = 64
print('Batch size:', batch_size)
embedding_dims = 50
print('Embedding dim:', embedding_dims)
nb_filters = [50,100,150,200,200,200,200]
print('Number of filters:', nb_filters)
filter_lengths = [1,2,3,4,5,6,7]
print('Filter lengths:', filter_lengths)
hidden_dims = 500
print('Hidden dems:', hidden_dims)
nb_epoch = 30
embedding_droupout = 0.2
print('Embedding dropout:', embedding_droupout)
fc_dropout = 0.7
print('Fully-connected dropout:', fc_dropout)
print('Loading data...')
(X_train, y_train), (X_test, y_test), num_classes = load_data(task1_train_file, task1_test_file, alphabet)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
y_train = np.array(y_train)
y_test = np.array(y_test)
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, num_classes)
Y_test = np_utils.to_categorical(y_test, num_classes)
print('Build model...')
main_input = Input(shape=(maxlen,))
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
embedding_layer = Embedding(alphabet_size,
embedding_dims,
input_length=maxlen,
dropout=embedding_droupout)
embedded = embedding_layer(main_input)
# we add a Convolution1D for each filter length, which will learn nb_filters[i]
# word group filters of size filter_lengths[i]:
convs = []
for i in xrange(len(nb_filters)):
conv_layer = Convolution1D(nb_filter=nb_filters[i],
filter_length=filter_lengths[i],
border_mode='valid',
activation='relu',
subsample_length=1)
conv_out = conv_layer(embedded)
# we use max pooling:
conv_out = MaxPooling1D(pool_length=conv_layer.output_shape[1])(conv_out)
# We flatten the output of the conv layer,
# so that we can concat all conv outpus and add a vanilla dense layer:
conv_out = Flatten()(conv_out)
convs.append(conv_out)
# concat all conv outputs
x = merge(convs, mode='concat') if len(convs) > 1 else convs[0]
#concat = BatchNormalization()(concat)
# We add a vanilla hidden layer:
x = Dense(hidden_dims)(x)
x = Dropout(fc_dropout)(x)
x = Activation('relu')(x)
# We project onto number of classes output layer, and squash it with a softmax:
main_output = Dense(num_classes, activation='softmax')(x)
# finally, define the model
model = Model(input=main_input, output=main_output)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
# define callbacks
stopping = EarlyStopping(monitor='val_loss', patience='10')
checkpointer = ModelCheckpoint(filepath=model_file, verbose=1, save_best_only=True)
tensorboard = TensorBoard(log_dir="./logs-multifilter-large-morehidden-moredrop-task1", write_graph=False)
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
callbacks=[stopping, checkpointer, tensorboard])
probabilities = model.predict(X_test, batch_size=batch_size)
predictions = probabilities.argmax(axis=-1)
idx2label = load_labels(task1_labels_file)
#with open('cnn_predictions.txt', 'w') as g:
# for i in xrange(len(y_test)):
# g.write(' '.join([str(v) for v in X_test[i]]) + '\t' + idx2label.get(y_test[i], 'ERROR') + '\t' + idx2label.get(predictions[i], 'ERROR') + '\n')
print('Performance of final model (not necessarily best model):')
print('========================================================')
cm = confusion_matrix(y_test, predictions)
print('Confusion matrix:')
print(cm)
acc = accuracy_score(y_test, predictions)
print('Accuracy score:')
print(acc)
labels = [label for (idx, label) in sorted(idx2label.items())]
score_report = classification_report(y_test, predictions, target_names=labels)
print('Score report:')
print(score_report)
best_model = load_model(model_file)
probabilities = best_model.predict(X_test, batch_size=batch_size)
predictions = probabilities.argmax(axis=-1)
print('Performance of best model:')
print('==========================')
cm = confusion_matrix(y_test, predictions)
print('Confusion matrix:')
print(cm)
acc = accuracy_score(y_test, predictions)
print('Accuracy score:')
print(acc)
labels = [label for (idx, label) in sorted(idx2label.items())]
score_report = classification_report(y_test, predictions, target_names=labels)
print('Score report:')
print(score_report)
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/matplotlib/testing/jpl_units/UnitDblFormatter.py | 23 | 1485 | #===========================================================================
#
# UnitDblFormatter
#
#===========================================================================
"""UnitDblFormatter module containing class UnitDblFormatter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.ticker as ticker
#
# Place all imports before here.
#===========================================================================
__all__ = [ 'UnitDblFormatter' ]
#===========================================================================
class UnitDblFormatter( ticker.ScalarFormatter ):
"""The formatter for UnitDbl data types. This allows for formatting
with the unit string.
"""
def __init__( self, *args, **kwargs ):
'The arguments are identical to matplotlib.ticker.ScalarFormatter.'
ticker.ScalarFormatter.__init__( self, *args, **kwargs )
def __call__( self, x, pos = None ):
'Return the format for tick val x at position pos'
if len(self.locs) == 0:
return ''
else:
return str(x)
def format_data_short( self, value ):
"Return the value formatted in 'short' format."
return str(value)
def format_data( self, value ):
"Return the value formatted into a string."
return str(value)
| mit |
oscarbranson/cbsyst | cbsyst/helpers.py | 1 | 8655 | import uncertainties.unumpy as unp
import numpy as np
import pandas as pd
# Helpers useful to the user
# --------------------------
def data_out(cbdat, path=None, include_constants=False):
"""
Save output from cbsyst.
Parameters
----------
cbdat : dict / Bunch
The output from Csys, Bsys, ABsys or CBsys.
path : str
The file name (and path) where you want to
save the data. If not provided, data are not
saved to a file.
The extension of the file determines the output
format. Can be 'csv', 'xls', 'html, 'tex', or 'pkl'.
include_constants : bool
If True, include pK and alpha constants in output.
Returns
-------
* pandas.DataFrame of output
* Saves file (if specified)
"""
cols = [
"pH",
"DIC",
"fCO2",
"pCO2",
"CO2",
"HCO3",
"CO3",
"TA",
"BT",
"BO3",
"BO4",
"dBT",
"dBO3",
"dBO4",
"ABT",
"ABO3",
"ABO4",
"T",
"S",
"P",
"Ca",
"Mg",
]
consts = ["K0", "K1", "K2", "KB", "KW", "KSO4", "KspA", "KspC"]
size = cbdat.pH.size
out = pd.DataFrame(index=range(size))
for c in cols:
if c in cbdat and cbdat[c] is not None:
if (np.ndim(cbdat[c]) == 1) & (cbdat[c].size == 1):
cbdat[c] = cbdat[c][0]
if c in cbdat:
out.loc[:, c] = cbdat[c]
if include_constants:
for c in consts:
if c in cbdat.Ks and cbdat.Ks[c] is not None:
if (np.ndim(cbdat.Ks[c]) == 1) & (cbdat.Ks[c].size == 1):
cbdat.Ks[c] = cbdat.Ks[c][0]
out.loc[:, "p" + c] = -np.log10(cbdat.Ks[c])
if "alphaB" in cbdat and cbdat.alphaB is not None:
if (np.ndim(cbdat.alphaB) == 1) & (cbdat.alphaB.size == 1):
cbdat.alphaB = cbdat.alphaB[0]
out.loc[:, "alphaB"] = cbdat.alphaB
if path is not None:
fmt = path.split(".")[-1]
fdict = {
"csv": "to_csv",
"html": "to_html",
"xls": "to_excel",
"pkl": "to_pickle",
"tex": "to_latex",
}
if fmt not in fdict:
raise ValueError(
(
"File extension does not match available output\n"
+ "options. Should be one of 'csv', 'html', 'xls',\n"
+ "'pkl' (pickle) or 'tex' (LaTeX)."
)
)
try:
_ = getattr(out, fdict[fmt])(path, index=None)
except TypeError:
_ = getattr(out, fdict[fmt])(path)
return out
# Programmatic helpers for code elsewhere
# ---------------------------------------
# Bunch modifies dict to allow item access using dot (.) operator
class Bunch(dict):
def __init__(self, *args, **kwds):
super(Bunch, self).__init__(*args, **kwds)
self.__dict__ = self
def noms(*it):
"""
Return nominal_values for provided objects.
Parameters
----------
*it : n objects
"""
return [unp.nominal_values(i) for i in it]
def maxL(*it):
"""
Calculate maximum length of provided items.
Parameters
----------
*it : objects
Items of various lengths. Only lengths
of iterables are returned.
Returns
-------
Length of longest object (int).
"""
m = set()
for i in it:
try:
m.add(len(i))
except TypeError:
pass
if len(m) > 0:
return max(m)
else:
return 1
def cast_array(*it):
"""
Recasts inputs into array of shape (len(it), maxL(*it))
"""
new = np.zeros((len(it), maxL(*it)))
for i, t in enumerate(it):
new[i, :] = t
return new
def NnotNone(*it):
"""
Returns the number of elements of it tha are not None.
Parameters
----------
it : iterable
iterable of elements that are either None, or not None
Returns
-------
int
"""
return sum([i is not None for i in it])
# pK <--> K converters
def ch(pK):
"""
Convert pK to K
"""
return np.power(10.0, np.multiply(pK, -1.0))
def cp(K):
"""
Convert K to pK
"""
return -np.log10(K)
# Helpers for aspects of seawater chemistry
# -----------------------------------------
def prescorr(P, Tc, a0, a1, a2, b0, b1):
"""
Calculate pressore correction factor for thermodynamic Ks.
From Millero et al (2007, doi:10.1021/cr0503557)
Eqns 38-40
Usage:
K_corr / K_orig = [output]
Kcorr = [output] * K_orig
"""
dV = a0 + a1 * Tc + a2 * Tc ** 2
dk = (b0 + b1 * Tc) / 1000
# factor of 1000 not mentioned in Millero,
# but present in Zeebe book, and used in CO2SYS
RT = 83.1451 * (Tc + 273.15)
return np.exp((-dV + 0.5 * dk * P) * P / RT)
def swdens(TempC, Sal):
"""
Seawater Density (kg / L) from Temp (C) and Sal (PSU)
Chapter 5, Section 4.2 of Dickson, Sabine and Christian
(2007, http://cdiac.ornl.gov/oceans/Handbook_2007.html)
Parameters
----------
TempC : array-like
Temperature in celcius.
Sal : array-like
Salinity in PSU
Returns
-------
Density in kg / L
"""
# convert temperature to IPTS-68
T68 = (TempC + 0.0002) / 0.99975
pSMOW = (
999.842594
+ 6.793952e-2 * T68
+ -9.095290e-3 * T68 ** 2
+ 1.001685e-4 * T68 ** 3
+ -1.120083e-6 * T68 ** 4
+ 6.536332e-9 * T68 ** 5
)
A = (
8.24493e-1
+ -4.0899e-3 * T68
+ 7.6438e-5 * T68 ** 2
+ -8.2467e-7 * T68 ** 3
+ 5.3875e-9 * T68 ** 4
)
B = -5.72466e-3 + 1.0227e-4 * T68 + -1.6546e-6 * T68 ** 2
C = 4.8314e-4
return (pSMOW + A * Sal + B * Sal ** 1.5 + C * Sal ** 2) / 1000
def calc_TS(Sal):
"""
Calculate total Sulphur
Morris, A. W., and Riley, J. P., Deep-Sea Research 13:699-705, 1966:
this is .02824.*Sali./35. = .0008067.*Sali
"""
a, b, c = (0.14, 96.062, 1.80655)
return (a / b) * (Sal / c) # mol/kg-SW
def calc_TF(Sal):
"""
Calculate total Fluorine
Riley, J. P., Deep-Sea Research 12:219-220, 1965:
this is .000068.*Sali./35. = .00000195.*Sali
"""
a, b, c = (0.000067, 18.998, 1.80655)
return (a / b) * (Sal / c) # mol/kg-SW
# def calc_TB(Sal):
# """
# Calculate total Boron
# Lee, Kim, Byrne, Millero, Feely, Yong-Ming Liu. 2010.
# Geochimica Et Cosmochimica Acta 74 (6): 1801-1811
# """
# a, b = (0.0004326, 35.)
# return a * Sal / b
def calc_TB(Sal):
"""
Calculate total Boron
Directly from CO2SYS:
Uppstrom, L., Deep-Sea Research 21:161-162, 1974:
this is 0.000416 * Sal/35. = 0.0000119 * Sal
TB(FF) = (0.000232 / 10.811) * (Sal / 1.80655) in mol/kg-SW
"""
a, b = (0.0004157, 35.0)
return a * Sal / b
def calc_fH(TempK, Sal):
# Same as CO2SYS
# Takahashi et al, Chapter 3 in GEOSECS Pacific Expedition,
# v. 3, 1982 (p. 80)
a, b, c, d = (1.2948, -2.036e-3, 4.607e-4, -1.475e-6)
return a + b * TempK + (c + d * TempK) * Sal ** 2
# Convert between pH scales
def calc_pH_scales(pHtot, pHfree, pHsws, pHNBS, TS, TF, TempK, Sal, Ks):
"""
Calculate pH on all scales, given one.
"""
# check if any pH scale is given.
npH = NnotNone(pHfree, pHsws, pHtot, pHNBS)
if npH == 1:
# pH scale conversions
FREEtoTOT = -np.log10((1 + TS / Ks.KSO4))
SWStoTOT = -np.log10((1 + TS / Ks.KSO4) / (1 + TS / Ks.KSO4 + TF / Ks.KF))
fH = calc_fH(TempK, Sal)
if pHtot is not None:
return {
"pHfree": pHtot - FREEtoTOT,
"pHsws": pHtot - SWStoTOT,
"pHNBS": pHtot - SWStoTOT - np.log10(fH),
}
elif pHsws is not None:
return {
"pHfree": pHsws + SWStoTOT - FREEtoTOT,
"pHtot": pHsws + SWStoTOT,
"pHNBS": pHsws - np.log10(fH),
}
elif pHfree is not None:
return {
"pHsws": pHfree + FREEtoTOT - SWStoTOT,
"pHtot": pHfree + FREEtoTOT,
"pHNBS": pHfree + FREEtoTOT - SWStoTOT - np.log10(fH),
}
elif pHNBS is not None:
return {
"pHsws": pHNBS + np.log10(fH),
"pHtot": pHNBS + np.log10(fH) + SWStoTOT,
"pHfree": pHNBS + np.log10(fH) + SWStoTOT - FREEtoTOT,
}
else:
return {}
| mit |
mrshu/scikit-learn | benchmarks/bench_plot_neighbors.py | 6 | 6409 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print "N = %i (%i out of %i)" % (NN, i + 1, len(Nrange))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print "D = %i (%i out of %i)" % (DD, i + 1, len(Drange))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print "k = %i (%i out of %i)" % (kk, i + 1, len(krange))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('time (seconds)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data\nred = construction; blue = N-point query"
% (dataset[0].upper() + dataset[1:]),
fontsize=16)
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
rishikksh20/scikit-learn | examples/neighbors/plot_kde_1d.py | 60 | 5120 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, int(0.3 * N)),
np.random.normal(5, 1, int(0.7 * N))))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
socrata/arcs | arcs/launch_job.py | 1 | 16169 | """
Create a CrowdFlower job to collect relevance judments for domain, query pairs.
This script will read a file of domain, query pairs from the command-line, and collect results from
Cetera for each pair. You may optionally specify different experimental groups (eg. baseline
vs. experiment1) via the `-g` option. These should be specified as JSON strings.
Example:
python arcs/launch_job.py \
-i ~/Data/arcs/20160126.experiment_1/queries.tsv \
-g '{"name": "adjusted boost clause", "description": "Moved field boosts to 'should' clause", "params": {}}' \
-r 10 \
-D 'postgresql://username:password@hostname/dbname'
The group definition should have a name, description, and params field. The params field should be
a nested object specifying any relevant parameters of the experiment.
A full CSV is created, which contains all of the job data. Additionally, a CrowdFlower CSV is
created which corresponds precisely with the data uploaded to create the job in CrowdFlower.
All data is persisted in a Postgres database, the parameters of which are specified via the -D
option.
"""
import argparse
import pandas as pd
import logging
import psycopg2
from functools import partial
from datetime import datetime
from cetera import get_cetera_results
from crowdflower import create_job_from_copy, add_data_to_job
from crowdsourcing_utils import cleanup_description, make_dataset_sample
from db import (
find_judged_qrps, insert_incomplete_job, add_raw_group_results,
insert_unjudged_data_for_group, insert_empty_group
)
from experiment import GroupDefinition
CORE_COLUMNS = ['domain', 'query', 'result_fxf', 'result_position', 'group_id']
DISPLAY_DATA = ['name', 'link', 'description', 'sample']
CSV_COLUMNS = CORE_COLUMNS + DISPLAY_DATA
RAW_COLUMNS = ['domain', 'query', 'results', 'group_id']
logging.basicConfig(format='%(message)s', level=logging.INFO)
LOGGER = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.WARNING)
def _transform_cetera_result(result, result_position, num_rows, num_columns):
"""
Utility function for transforming Cetera result dictionary into something
more suitable for the crowdsourcing task. Presently, we're grabbing name,
link (ie. URL), and the first sentence of description.
Args:
result (dict): A single search result from Cetera
result_position (int): The position of the result in the result set
num_rows (int): The number of rows to show in the dataset sample
num_columns (int): The number of columns to show in the dataset sample
Returns:
A dictionary of data for each result
"""
desc = cleanup_description(result["resource"].get("description"))
domain = result["metadata"]["domain"]
fxf = result["resource"].get("id")
data_sample = make_dataset_sample(domain, fxf, num_rows, num_columns)
return {
"result_domain": domain,
"result_position": result_position,
"result_fxf": fxf,
"name": result["resource"].get("name"),
"link": result["link"],
"description": desc,
"sample": data_sample
}
def raw_results_to_dataframe(group_results, group_id, num_rows, num_columns):
"""
Add group ID to raw results tuple.
Notes:
1. We keep raw results around for posterity.
2. When domain is specified as "www.opendatanetwork.com" in the input, we replace it with
the source domain of the corresponding result
Args:
group_results (iterable): An iterable of results tuples as returned by get_cetera_results
group_id (int): An identifier for the group of results
num_rows (int): The number of rows to show in the dataset sample
num_columns (int): The number of columns to show in the dataset sample
Returns:
An iterable of result dictionaries with the required and relevant metadata
"""
LOGGER.info("Transforming raw results")
results = pd.DataFrame.from_records(
[(results + (group_id,)) for results in group_results],
columns=RAW_COLUMNS)
transform = partial(_transform_cetera_result, num_rows=num_rows, num_columns=num_columns)
results["results"] = results["results"].apply(lambda rs: [transform(r[1], r[0]) for r in rs])
results["query"] = results["query"].apply(str)
return results
def filter_previously_judged(db_conn, qrps_df):
"""
Filter a Pandas DataFrame of query-result pairs to only those that have not
previously been judged.
Args:
db_conn (psycopg2.extensions.connection): Connection to a database
qrps_df (pandas.DataFrame): A DataFrame of query, result data
Returns:
A copy of the input DataFrame filtered down to unjudged QRPs
"""
previously_judged = find_judged_qrps(db_conn)
return qrps_df[qrps_df.apply(
lambda row: (row["query"], row["result_fxf"]) not in previously_judged, axis=1)]
def expanded_results_dataframe(raw_results):
"""
Stack raw results column and join with `raw_results` dataframe such that we have one
query-result pair per row.
Args:
raw_results (pandas.DataFrame): A DataFrame with queries and results
Returns:
An expanded DataFrame with on query-result pair per row
"""
# create new series by stacking/expanding results list
results_s = raw_results["results"].apply(lambda rs: pd.Series(rs))
# drop unnecessary index, reset index to jibe w/ raw_results_df, and create new dataframe
expanded_results_df = pd.DataFrame(
{"result": results_s.unstack().reset_index(level=0, drop=True)})
# join w/ original dataframe
expanded_results_df = raw_results.join(expanded_results_df)
# filter all rows for which there are zero results
expanded_results_df = expanded_results_df[expanded_results_df["result"].notnull()]\
.reset_index()
# add columns from fields in dict
results_dict_df = pd.DataFrame.from_records(list(expanded_results_df["result"]))
results_dict_df.set_index(expanded_results_df.index, inplace=True)
expanded_results_df = expanded_results_df.join(results_dict_df)
# drop original domain, and replace with result domain
expanded_results_df = expanded_results_df.drop("domain", 1)
expanded_results_df = expanded_results_df.rename(columns={"result_domain": "domain"})
return expanded_results_df
def collect_search_results(groups, query_domain_file, num_results, num_rows, num_columns,
output_file=None, cetera_host=None, cetera_port=None):
"""
Send queries included in `query_domain_file` to Cetera, collecting n=num_results results
for each query. Bundle everything up into a Pandas DataFrame. Write out full expanded results
to a CSV.
Args:
groups (Iterable[GroupDefinition]): An iterable of GroupDefinitions
query_domain_file (str): A 2-column tab-delimited file containing query-domain pairs
num_results (int): The number of search results to fetch for each query
num_rows (int): The number of rows to show in the dataset sample
num_columns (int): The number of columns to show in the dataset sample
output_file (str): An optional file path to which the job CSV is to be written
cetera_host (str): An optional Cetera hostname
cetera_port (int): An optional Cetera port number
Returns:
A pair containing the raw results dataframe (one row per query-domain pair) and an expanded
results dataframe where each row corresponds to a query-result pair.
"""
assert(num_results > 0)
LOGGER.info("Reading query domain pairs from {}".format(query_domain_file))
with open(query_domain_file, "r") as f:
next(f) # skip header
domain_queries = [tuple(x.strip().split('\t')[:2]) for x in f if x.strip()]
raw_results_df = pd.DataFrame(columns=RAW_COLUMNS)
# get search results for queries in each group and combine
for group in groups:
results = get_cetera_results(domain_queries, cetera_host, cetera_port,
num_results=num_results, cetera_params=group.params)
raw_results_df = pd.concat(
[raw_results_df, raw_results_to_dataframe(results, group.id, num_rows, num_columns)])
output_file = output_file or \
"{}-full.csv".format(datetime.now().strftime("%Y%m%d"))
expanded_results_df = expanded_results_dataframe(raw_results_df)[CSV_COLUMNS]
expanded_results_df.to_csv(output_file, encoding="utf-8")
return raw_results_df, expanded_results_df
def submit_job(db_conn, groups, data_df, job_to_copy, output_file=None):
"""
Create CrowdFlower job for catalog search result data in `data_df`.
An external CrowdFlower ID is created by launching an initial empty job (using a previous job
(including settings and test data) as the initial state. After creating a CrowdFlower job and
getting an external ID, we persist the job itself to the DB
Args:
db_conn (psycopg2.extensions.connection): Connection to a database
groups (iterable): An iterable of GroupDefinitions
data_df (pandas.DataFrame): A DataFrame of query, result data
job_to_copy (int): External identifier for existing job to copy for its test data
output_file (str): Optional path to a CSV file to be created and submitted to CrowdFlower
Returns:
An Arcs Job with its external ID populated
"""
LOGGER.info("Creating CrowdFlower job")
# create empty CrowdFlower job by copying test units from existing job
job = create_job_from_copy(job_to_copy)
# filter previously judged QRPs, so that we don't pay to have them rejudged
num_rows_pre_filter = len(data_df)
data_df = filter_previously_judged(db_conn, data_df)
num_rows_post_filter = len(data_df)
LOGGER.info("Eliminated {} rows that had been previously judged".format(
num_rows_pre_filter - num_rows_post_filter))
# multiple groups may in fact produce the same results, for any given query,
# so let's ensure we're having each (query, result) pair judged only once
grouped = data_df.groupby(["query", "result_fxf"])
data_df = grouped.first().reset_index()
LOGGER.info("Eliminated {} redundant query-result rows".format(
num_rows_post_filter - len(data_df)))
output_file = output_file or \
"{}-crowdflower.csv".format(datetime.now().strftime("%Y%m%d"))
LOGGER.info("Writing out {} rows as CSV to {}".format(len(data_df), output_file))
data_df.to_csv(output_file, encoding="utf-8",
index=False, escapechar="\\", na_rep=None)
LOGGER.info("Adding data to job from CSV")
try:
add_data_to_job(job.external_id, output_file)
except Exception as e:
if hasattr(e, "message"):
msg = "Unable to send CSV to CrowdFlower: {}".format(e.message)
else:
msg = "Unable to send CSV to CrowdFlower"
LOGGER.warn(msg)
LOGGER.warn("Try uploading the data manually using the web UI.")
LOGGER.info("Job submitted.")
LOGGER.info("Job consists of {} group(s): {}".format(
len(groups), '\n'.join([str(g) for g in groups])))
LOGGER.info("https://make.crowdflower.com/jobs/{}".format(job.external_id))
return job
def _df_data_to_records(df):
return (dict(zip(df.columns, record)) for record in df.to_records(index=False))
def persist_job_data(db_conn, job, groups, raw_data_df):
"""
Write all job data to the DB.
We write an initial incomplete job, using the external ID populated upon job submission. We
store the job unit data in a JSON blob in the DB. And we write group-specific data to the DB
without any judgments that will be updated upon job completion. The input data should be the
full DataFrame, as opposed to the deduplicated data we send to CrowdFlower.
Args:
db_conn (psycopg2.extensions.connection): Connection to a database
job (Job): An Arcs Job object with, at a minimum, its external_id set
groups (iterable): An iterable of GroupDefinitions
data_df (pandas.DataFrame): A DataFrame of query, result data
raw_data_df (pandas.DataFrame): A DataFrame of raw results where each row corresponds to a
query, and results are left in a collection
Returns:
None
"""
LOGGER.info("Writing incomplete job to DB")
job = insert_incomplete_job(db_conn, job)
# we store display data as JSON in the DB, so let's add a new column of just that
raw_data_df["payload"] = pd.Series(_df_data_to_records(raw_data_df[RAW_COLUMNS]))
# write all QRPs to DB
LOGGER.info("Writing query-result pairs to DB")
for group in groups:
# filter to just this group
raw_group_data = raw_data_df[raw_data_df["group_id"] == group.id].drop(
"group_id", axis=1, inplace=False)
# persist the raw group data for posterity
add_raw_group_results(db_conn, group.id, list(raw_group_data["payload"]))
# insert all query/result data
insert_unjudged_data_for_group(db_conn, job.id, group.id,
_df_data_to_records(raw_group_data))
# drop the payload column we added above
raw_data_df.drop("payload", axis=1, inplace=True)
def parse_args():
parser = argparse.ArgumentParser(
description='Gather domains and queries from parsed nginx logs, '
'gather the top n results from cetera')
parser.add_argument('-i', '--input_file', required=True,
help='Tab-delimited file of queries and domains to as the basis for \
the crowdsourcing task')
parser.add_argument('-D', '--db_conn_str', required=True,
help='Database connection string')
parser.add_argument('-F', '--full_csv_file',
help='Path for full CSV file with full set of search result data')
parser.add_argument('-C', '--crowdflower_csv_file',
help='Path for filtered CSV file restricted to set query-result pairs \
requiring judgment')
parser.add_argument('-r', '--num_results', dest='num_results', type=int,
default=40,
help='Number of results per (domain, query) pair to fetch from cetera, \
default %(default)s')
parser.add_argument('--num_rows',
help='Number of rows to limit the sample to, default %(default)s',
type=int,
default=5)
parser.add_argument('--num_columns',
help='Number of columns to limit the sample to, default %(default)s',
type=int,
default=5)
parser.add_argument('-c', '--cetera_host',
help='Cetera hostname (eg. localhost) default %(default)s')
parser.add_argument('-p', '--cetera_port',
help='Cetera port, default %(default)s')
parser.add_argument('-g', '--group', dest='groups', type=GroupDefinition.from_json,
action="append")
parser.add_argument('-j', '--job_to_copy', type=int, required=True,
help='CrowdFlower job ID to copy for test data units')
return parser.parse_args()
def main():
args = parse_args()
db_conn = psycopg2.connect(args.db_conn_str)
groups = args.groups or [GroupDefinition(name="baseline", description="", params={})]
groups = [insert_empty_group(db_conn, group) for group in groups]
raw_results_df, expanded_results_df = collect_search_results(
groups, args.input_file, args.num_results, args.num_rows, args.num_columns,
args.full_csv_file, args.cetera_host, args.cetera_port)
job = submit_job(
db_conn, groups, expanded_results_df, args.job_to_copy, args.crowdflower_csv_file)
persist_job_data(db_conn, job, groups, raw_results_df)
db_conn.commit()
if __name__ == "__main__":
main()
| mit |
BorisJeremic/Real-ESSI-Examples | education_examples/_Chapter_Material_Behaviour_Examples/Interface_Models/Axial_Models/HardContact/ForceBasedHardContact/plot.py | 1 | 1440 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import matplotlib as mpl
import sys
import numpy as np;
plt.rcParams.update({'font.size': 24})
# set tick width
mpl.rcParams['xtick.major.size'] = 10
mpl.rcParams['xtick.major.width'] = 5
mpl.rcParams['xtick.minor.size'] = 10
mpl.rcParams['xtick.minor.width'] = 5
plt.rcParams['xtick.labelsize']=20
mpl.rcParams['ytick.major.size'] = 10
mpl.rcParams['ytick.major.width'] = 5
mpl.rcParams['ytick.minor.size'] = 10
mpl.rcParams['ytick.minor.width'] = 5
plt.rcParams['ytick.labelsize']=20
# Go over each feioutput and plot each one.
thefile = "Monotonic_Contact_Behaviour_Adding_Normal_Load.h5.feioutput";
finput = h5py.File(thefile)
plt.style.use('grayscale')
# Read the time and displacement
times = finput["time"][:]
normal_displacement = finput["/Model/Elements/Element_Outputs"][6,:]
normal_stress = -finput["/Model/Elements/Element_Outputs"][9,:];
# Plot the figure. Add labels and titles.
fig = plt.figure(figsize=(10,10))
# plt.xtitle('axes title', size = 50)
plt.plot(normal_displacement*1e3,normal_stress/1000,Linewidth=4)
plt.xlabel(r"Penetration $\Delta_n$ $[mm]$")
plt.ylabel(r"Normal Stress $\sigma_n$ $[kPa]$")
plt.hold(True)
outfigname = "Axial_Response.pdf";
# Make space for and rotate the x-axis tick labels
fig.autofmt_xdate()
plt.grid(linestyle='--', linewidth='0.5', color='k')
plt.savefig(outfigname, bbox_inches='tight')
# plt.show()
| cc0-1.0 |
sanpi0205/MNIST | svm.scikit/svm_poly.scikit_random_gridsearch.py | 3 | 6760 | # ======================================
#
# script run on AWS c4.4xlarge
#
# ======================================
from __future__ import division
import os, time, math, csv
import cPickle as pickle
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import RandomizedSearchCV
from sklearn.metrics import classification_report, confusion_matrix
np.random.seed(seed=1009)
file_path = '../data/'
DESKEWED = True
if DESKEWED:
train_img_filename = 'train-images_deskewed.csv'
test_img_filename = 't10k-images_deskewed.csv'
else:
train_img_filename = 'train-images.csv'
test_img_filename = 't10k-images.csv'
train_label_filename = 'train-labels.csv'
test_label_filename = 't10k-labels.csv'
# ##Read the training images and labels
# In[113]:
# read trainX
with open(file_path + train_img_filename,'r') as f:
data_iter = csv.reader(f, delimiter = ',')
data = [data for data in data_iter]
trainX = np.ascontiguousarray(data, dtype = np.float64)
# scale trainX
scaler = StandardScaler()
scaler.fit(trainX) # find mean/std for trainX
trainX = scaler.transform(trainX) # scale trainX with trainX mean/std
# read trainY
with open(file_path + train_label_filename,'r') as f:
data_iter = csv.reader(f, delimiter = ',')
data = [data for data in data_iter]
trainY = np.ascontiguousarray(data, dtype = np.int8).ravel()
# shuffle trainX & trainY
trainX, trainY = shuffle(trainX, trainY, random_state=0)
# ##Read the test images and labels
# In[114]:
# read testX
with open(file_path + test_img_filename,'r') as f:
data_iter = csv.reader(f, delimiter = ',')
data = [data for data in data_iter]
testX = np.ascontiguousarray(data, dtype = np.float64)
# scale testX
testX = scaler.transform(testX) # scale testX with trainX mean/std
# read testY
with open(file_path + test_label_filename,'r') as f:
data_iter = csv.reader(f, delimiter = ',')
data = [data for data in data_iter]
testY = np.ascontiguousarray(data, dtype = np.int8).ravel()
# shuffle testX, testY
testX, testY = shuffle(testX, testY, random_state=0)
# #SVC Default Parameter Settings
# In[ ]:
# default parameters for SVC
# ==========================
default_svc_params = {}
default_svc_params['C'] = 1.0 # penalty
default_svc_params['class_weight'] = None # Set the parameter C of class i to class_weight[i]*C
# set to 'auto' for unbalanced classes
default_svc_params['gamma'] = 0.0 # Kernel coefficient for 'rbf', 'poly' and 'sigmoid'
default_svc_params['kernel'] = 'rbf' # 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or a callable
default_svc_params['shrinking'] = True # Whether to use the shrinking heuristic.
default_svc_params['probability'] = False # Whether to enable probability estimates.
default_svc_params['tol'] = 0.001 # Tolerance for stopping criterion.
default_svc_params['cache_size'] = 200 # size of the kernel cache (in MB).
default_svc_params['max_iter'] = -1 # limit on iterations within solver, or -1 for no limit.
#default_svc_params['random_state'] = 1009
default_svc_params['verbose'] = False
default_svc_params['degree'] = 3 # 'poly' only
default_svc_params['coef0'] = 0.0 # 'poly' and 'sigmoid' only
# set parameters for the classifier
# =================================
svc_params = dict(default_svc_params)
svc_params['cache_size'] = 2000
#svc_params['probability'] = True
svc_params['kernel'] = 'poly'
svc_params['C'] = 1.0
svc_params['gamma'] = 0.0
svc_params['degree'] = 3
svc_params['coef0'] = 1
# the classifier
# ==============
svc_clf = SVC(**svc_params)
# ##RANDOMIZED grid search
# In[ ]:
t0 = time.time()
# search grid
# ===========
search_grid = dict(C = np.logspace( 0, 5, 50),
gamma = np.logspace(-5, -1, 50),
degree = [2, 3, 4, 5, 6, 7, 8, 9])
# for coef0, see http://stackoverflow.com/questions/21390570/scikit-learn-svc-coef0-parameter-range
# but also see http://www.eric-kim.net/eric-kim-net/posts/1/kernel_trick.html
# stratified K-Fold indices
# =========================
SKFolds = StratifiedKFold(y = trainY,
n_folds = 3,
indices = None)
# default parameters for RandomizedSearchCV
# =========================================
default_random_params = {}
default_random_params['scoring'] = None
default_random_params['fit_params'] = None # dict of parameters to pass to the fit method
default_random_params['n_jobs'] = 1 # Number of jobs to run in parallel (-1 => all cores)
default_random_params['pre_dispatch'] = '2*n_jobs' # memory is copied this many times
# reduce if you're running into memory problems
default_random_params['iid'] = True # assume the folds are iid
default_random_params['refit'] = True # Refit the best estimator with the entire dataset
default_random_params['cv'] = None
default_random_params['verbose'] = 0
#default_random_params['random_state'] = None
default_random_params['n_iter'] = 10
# set parameters for the randomized grid search
# =============================================
random_params = dict(default_random_params)
random_params['verbose'] = 1
#random_params['random_state'] = 1009
random_params['cv'] = SKFolds
random_params['n_jobs'] = -1 # -1 => use all available cores
# one core per fold
# for each point in the grid
random_params['n_iter'] = 200 # choose this many random combinations of parameters
# from 'search_grid'
# perform the randomized parameter grid search
# ============================================
random_search = RandomizedSearchCV(estimator = svc_clf,
param_distributions = search_grid,
**random_params)
random_search.fit(trainX, trainY)
pickle.dump( random_search, open( 'SVC_POLY.pkl', 'wb' ) )
print("\ntime in minutes {0:.2f}".format((time.time()-t0)/60))
| mit |
mrcslws/htmresearch | projects/capybara/sandbox/sklearn/base_example.py | 9 | 1327 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from sklearn.neural_network import MLPClassifier
X = [[0., 0.], [1., 1.], [1.5, 1.5], [2., 2.]]
y = [0, 0, 1, 1]
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X, y)
predictions = clf.predict([[0.5, 0.5], [1.7, 1.7]])
print "Predictions: %s. Expected: [0, 1]" % predictions
| agpl-3.0 |
florentchandelier/zipline | zipline/utils/calendars/exchange_calendar_tsx.py | 5 | 2687 | from datetime import time
from pandas.tseries.holiday import (
Holiday,
DateOffset,
MO,
weekend_to_monday,
GoodFriday
)
from pytz import timezone
from zipline.utils.calendars.trading_calendar import TradingCalendar, \
HolidayCalendar
from zipline.utils.calendars.us_holidays import Christmas
from zipline.utils.calendars.exchange_calendar_lse import (
WeekendChristmas,
BoxingDay,
WeekendBoxingDay
)
# New Year's Day
TSXNewYearsDay = Holiday(
"New Year's Day",
month=1,
day=1,
observance=weekend_to_monday,
)
# Ontario Family Day
FamilyDay = Holiday(
"Family Day",
month=2,
day=1,
offset=DateOffset(weekday=MO(3)),
start_date='2008-01-01',
)
# Victoria Day
VictoriaDay = Holiday(
'Victoria Day',
month=5,
day=25,
offset=DateOffset(weekday=MO(-1)),
)
# Canada Day
CanadaDay = Holiday(
'Canada Day',
month=7,
day=1,
observance=weekend_to_monday,
)
# Civic Holiday
CivicHoliday = Holiday(
'Civic Holiday',
month=8,
day=1,
offset=DateOffset(weekday=MO(1)),
)
# Labor Day
LaborDay = Holiday(
'Labor Day',
month=9,
day=1,
offset=DateOffset(weekday=MO(1)),
)
# Thanksgiving
Thanksgiving = Holiday(
'Thanksgiving',
month=10,
day=1,
offset=DateOffset(weekday=MO(2)),
)
class TSXExchangeCalendar(TradingCalendar):
"""
Exchange calendar for the Toronto Stock Exchange
Open Time: 9:30 AM, EST
Close Time: 4:00 PM, EST
Regularly-Observed Holidays:
- New Years Day (observed on first business day on/after)
- Family Day (Third Monday in February after 2008)
- Good Friday
- Victoria Day (Monday before May 25th)
- Canada Day (July 1st, observed first business day after)
- Civic Holiday (First Monday in August)
- Labor Day (First Monday in September)
- Thanksgiving (Second Monday in October)
- Christmas Day
- Dec. 27th (if Christmas is on a weekend)
- Boxing Day
- Dec. 28th (if Boxing Day is on a weekend)
"""
@property
def name(self):
return "TSX"
@property
def tz(self):
return timezone('Canada/Atlantic')
@property
def open_time(self):
return time(9, 31)
@property
def close_time(self):
return time(16)
@property
def regular_holidays(self):
return HolidayCalendar([
TSXNewYearsDay,
FamilyDay,
GoodFriday,
VictoriaDay,
CanadaDay,
CivicHoliday,
LaborDay,
Thanksgiving,
Christmas,
WeekendChristmas,
BoxingDay,
WeekendBoxingDay
])
| apache-2.0 |
silky/sms-tools | lectures/04-STFT/plots-code/sine-spectrum.py | 24 | 1563 | import matplotlib.pyplot as plt
import numpy as np
from scipy.fftpack import fft, ifft
N = 256
M = 63
f0 = 1000
fs = 10000
A0 = .8
hN = N/2
hM = (M+1)/2
fftbuffer = np.zeros(N)
X1 = np.zeros(N, dtype='complex')
X2 = np.zeros(N, dtype='complex')
x = A0 * np.cos(2*np.pi*f0/fs*np.arange(-hM+1,hM))
plt.figure(1, figsize=(9.5, 7))
w = np.hanning(M)
plt.subplot(2,3,1)
plt.title('w (hanning window)')
plt.plot(np.arange(-hM+1, hM), w, 'b', lw=1.5)
plt.axis([-hM+1, hM, 0, 1])
fftbuffer[:hM] = w[hM-1:]
fftbuffer[N-hM+1:] = w[:hM-1]
X = fft(fftbuffer)
X1[:hN] = X[hN:]
X1[N-hN:] = X[:hN]
mX = 20*np.log10(abs(X1))
plt.subplot(2,3,2)
plt.title('mW')
plt.plot(np.arange(-hN, hN), mX, 'r', lw=1.5)
plt.axis([-hN,hN,-40,max(mX)])
pX = np.angle(X1)
plt.subplot(2,3,3)
plt.title('pW')
plt.plot(np.arange(-hN, hN), np.unwrap(pX), 'c', lw=1.5)
plt.axis([-hN,hN,min(np.unwrap(pX)),max(np.unwrap(pX))])
plt.subplot(2,3,4)
plt.title('xw (windowed sinewave)')
xw = x*w
plt.plot(np.arange(-hM+1, hM), xw, 'b', lw=1.5)
plt.axis([-hM+1, hM, -1, 1])
fftbuffer = np.zeros(N)
fftbuffer[0:hM] = xw[hM-1:]
fftbuffer[N-hM+1:] = xw[:hM-1]
X = fft(fftbuffer)
X2[:hN] = X[hN:]
X2[N-hN:] = X[:hN]
mX2 = 20*np.log10(abs(X2))
plt.subplot(2,3,5)
plt.title('mXW')
plt.plot(np.arange(-hN, hN), mX2, 'r', lw=1.5)
plt.axis([-hN,hN,-40,max(mX)])
pX = np.angle(X2)
plt.subplot(2,3,6)
plt.title('pXW')
plt.plot(np.arange(-hN, hN), np.unwrap(pX), 'c', lw=1.5)
plt.axis([-hN,hN,min(np.unwrap(pX)),max(np.unwrap(pX))])
plt.tight_layout()
plt.savefig('sine-spectrum.png')
plt.show()
| agpl-3.0 |
austin-taylor/flare | flare/tools/tld.py | 1 | 1607 | import os
import pandas as pd
import warnings
import sys
if (sys.version_info > (3, 0)):
import pickle as pickle
else:
import cPickle as pickle
import tldextract
warnings.filterwarnings("ignore", 'This pattern has match groups')
LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))
class TLDCheck(object):
"""
Parses out TLD from domains and checks it against IANA.
"""
TLD_SOURCE = 'http://data.iana.org/TLD/tlds-alpha-by-domain.txt'
def __init__(self, update=False):
"""
tld = TLDCheck()
tld.tld_lookup('google.com')
tld.tld_lookup('google.asdf')
:param update: True will update the file with the most recent source
"""
self.tld_list = os.path.join(LOCAL_DIR, '..', 'data', 'tld', 'tld_list.pkl')
self.update = update
self.tld_set = self.create_list()
def parse_tld_suffix(self, domain):
return tldextract.extract(domain).suffix
def tld_lookup(self, tld):
return self.parse_tld_suffix(tld).upper() in self.tld_set
def create_list(self):
if self.update:
tld_file = pd.read_table(self.TLD_SOURCE,
names=['tld_domain'], skiprows=1)
tld_set = frozenset(tld_file.tld_domain.dropna().tolist())
with open(self.tld_list, 'wb') as handle:
pickle.dump(tld_set, handle, protocol=pickle.HIGHEST_PROTOCOL)
return tld_set
else:
with open(self.tld_list, 'rb') as handle:
tld_set = pickle.load(handle)
return tld_set | mit |
Dwii/Master-Thesis | implementation/Cuda/lbm_simple_lbmcuda/plot_benchmark.py | 1 | 1780 | # Display a list of *.dat files in a bar chart.
# Based on an example from https://chrisalbon.com/python/matplotlib_grouped_bar_plot.html
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
if len(sys.argv) > 3 and (len(sys.argv)-3) % 2 :
print("usage: python3 {0} <benchmark> <image path> (<dat1> <legend1> [<dat2> <legend2>] .. [<datN> <legendN>] ) ".format(os.path.basename(sys.argv[0])))
exit(1)
benchmark = sys.argv[1]
image_path = sys.argv[2]
groups = (len(sys.argv)-3)/2
# Load benchark
domains = ()
nb_setups = 0
for line in open(benchmark,'r'):
n, snx, sny, snz = line.split()
domains += ( "{0}x{1}x{2}".format(snx, sny, snz), )
nb_setups += 1
# Setting the positions and width for the bars
pos = list(range(nb_setups))
width = 1 / (groups+2)
# Plotting the bars
fig, ax = plt.subplots(figsize=(10,5))
prop_iter = iter(plt.rcParams['axes.prop_cycle'])
legends = ()
maxLups = 0
for i, argi in enumerate(range(3, len(sys.argv), 2)):
mlups = np.array(list(map(float, open(sys.argv[argi])))) / 1E6
legends += ( sys.argv[argi+1], )
maxLups = max(maxLups, max(mlups))
plt.bar([p + width*i for p in pos],
mlups,
width,
alpha=0.5,
color=next(prop_iter)['color'])
# Set the y axis label
ax.set_ylabel('MLUPS')
# Set the chart's title
#ax.set_title(title)
# Set the position of the x ticks
ax.set_xticks([p + 1.5 * width for p in pos])
# Set the labels for the x ticks
ax.set_xticklabels(domains)
# Setting the x-axis and y-axis limits
plt.xlim(min(pos)-width, max(pos)+width*4)
#plt.ylim([0, maxLups] )
# Adding the legend and showing the plot
plt.legend(legends, loc='upper left')
ax.yaxis.grid()
plt.savefig(image_path)
plt.tight_layout()
plt.show() | mit |
surenkum/feature_selection | learn_forest.py | 1 | 8232 | import utils as ut
import pdb
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
from sklearn.metrics import mean_squared_error
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from sklearn.tree import export_graphviz
'''
Main function that learns a random forest based on toxicity data
Inputs: filepath: path to .xls file
toxicity: Type of toxicity we are analyzing, defaults to CNT
'''
def learn_forest(filepath,toxicity="CNT"):
# Learning a forest for predicting PMN
# Please choose PMN, MAC, LDH, TP, TCC (Total Cell Count -- only for AgNP)
features = ['PMN','MAC','LDH','TP','TCC']
for feature in features:
print "Processing the data to estimate ",feature
# For cross-validation, we can exclude certain studies from training. For
# example to exclude Pauluhun,2010 use
# To use the entire training data, pass author_exclude as None
author_exclude = None#[['Seiffert J',2015],['Silva R',2015]]#None
particle_exclude = None#[{'Particle Type (1=basic, 2 = citratecapped, 3 = PVPcapped)':1}]
# Getting training input and output
(train_inp,train_out,test_inp,test_out,feature_names) = ut.prepare_data_rf(filepath,\
feature,author_exclude,toxicity = toxicity,
other_excludes = particle_exclude)
# Get median values for plotting dose response curves
(median_vals, min_vals, max_vals) = get_median_min_max(train_inp)
# Training
# Imputing all the NaN values
estimator = Pipeline([("imputer",Imputer(strategy="mean")),
("forest",ExtraTreesRegressor(random_state=0))])
estimator.fit(train_inp,train_out)
# Plotting risk-contour curves
print feature_names
feature_indexes = [1,7]
plot_risk_contour(estimator,median_vals,min_vals,max_vals,\
feature_indexes,feature_names,feature)
# Plotting dose-response curves
# Testing for nano-particle size
feature_index = 0
feature_vals = [1,3]
plot_dose_response(estimator,median_vals,min_vals, max_vals, \
feature_index,feature_vals,feature_names,feature)
# Testing the model against validation if it exists or else calculating
# error on the training input itself
# See if have some test samples
if test_out.shape[0]>0:
predict_test = estimator.predict(test_inp)
# Estimating MSE score
score = mean_squared_error(test_out,predict_test)
print "MSE error for ",feature," after excluding ",author_exclude, "is : ",score
else:
predict_test = estimator.predict(train_inp)
# Estimating MSE score
score = mean_squared_error(train_out,predict_test)
print "MSE error for ",feature," with all points in the model is : ",score
# Exporting the learned graph
feature_string = np.array(['Particle Type','Mean Diameter, nm','Exposure Mode',
'Rat Species','Mean Animal Mass, g','Sex','Surface Area (m^2/g)',
'Mass Conc. (ug/m^3)','Exp. Hours','Total Dose (ug/kg)',
'Post Exp. (days)'])
print "original feature names ",feature_names
print "replaced feature names ",feature_string
# Increase font size for plots
matplotlib.rcParams.update({'font.size': 12})
# Print all the estimators
for ind,em in enumerate(estimator._final_estimator.estimators_):
export_graphviz(em,out_file="tree"+str(ind)+".dot",feature_names = feature_string)
# Plotting feature importance
feature_importance = estimator._final_estimator.feature_importances_
# make importances relative to max importance
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, feature_string[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance for feature '+feature)
plt.show()
# Get the median, minimum and maximum values of all the input dimensions
def get_median_min_max(train_inp):
# Getting the median values of the entire data
median_vals = np.zeros((train_inp.shape[1],))
min_vals = np.zeros((train_inp.shape[1],))
max_vals = np.zeros((train_inp.shape[1],))
for i in range(train_inp.shape[1]):
median_vals[i] = np.median(train_inp[~np.isnan(train_inp[:,i]),i])
min_vals[i] = np.nanmin(train_inp[:,i])
max_vals[i] = np.nanmax(train_inp[:,i])
return (median_vals,min_vals,max_vals)
'''
Requires the estimator, median values, two different feature indexes
to plot risk contours curves
'''
def plot_risk_contour(estimator,median_vals,min_vals,max_vals,\
feature_indexes,feature_names,target_feature):
assert (len(feature_indexes)==2), "Need 2 feature indexes to plot risk contour"
# Plotting all the output values from the curve
# Divide the minimum and maximum values in 20 points range
origin = 'lower'
cmap = plt.cm.get_cmap("rainbow")
cmap.set_under("magenta")
cmap.set_over("yellow")
num_points = 20
x = np.arange(min_vals[feature_indexes[0]],max_vals[feature_indexes[0]],\
(max_vals[feature_indexes[0]]-min_vals[feature_indexes[0]])/(num_points*1.0))
y = np.arange(min_vals[feature_indexes[1]],max_vals[feature_indexes[1]],\
(max_vals[feature_indexes[1]]-min_vals[feature_indexes[1]])/(num_points*1.0))
X,Y = np.meshgrid(x,y)
Z = np.zeros((X.shape))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
# Get current input values
inp_feature = median_vals
inp_feature[feature_indexes[0]] = X[i,j] # fill-in first feature
inp_feature[feature_indexes[1]] = Y[i,j] # fill-in second feature
Z[i,j] = estimator.predict(inp_feature.reshape(1,-1))[0]
# Plotting the contour
plt.figure()
CS = plt.contourf(X, Y, Z, 10,
#[-1, -0.1, 0, 0.1],
#alpha=0.5,
cmap=cmap,
origin=origin)
plt.xlabel(feature_names[feature_indexes[0]])
plt.ylabel(feature_names[feature_indexes[1]])
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel(target_feature)
plt.show()
'''
Requires the estimator, median values, two different feature indexes
to plot dose response curves
'''
def plot_dose_response(estimator,median_vals,min_vals, max_vals, \
feature_index,feature_vals,feature_names,target_feature):
# Plotting all the output values from the curve
# Divide the minimum and maximum values in 20 points range
# Total dose is 9th index
num_points = 20
if (abs(min_vals[9]-max_vals[9])<2):
x = np.arange(100,1000,900.0/20)
else:
x = np.arange(min_vals[9],max_vals[9],\
(max_vals[9]-min_vals[9])/(num_points*1.0))
plot_response = np.zeros((x.shape[0],len(feature_vals)))
for i in range(x.shape[0]):
for j in range(len(feature_vals)):
# Get current input values
inp_feature = median_vals
inp_feature[9] = x[i] # 9th index is total dose
inp_feature[feature_index] = feature_vals[j] # fill-in second feature
plot_response[i,j] = estimator.predict(inp_feature.reshape(1,-1))[0]
# Plotting the contour
plt.figure()
colors = ['r','g','b','y','k']
for j in range(len(feature_vals)):
plt.plot(x,plot_response[:,j],linewidth=3.0,color=colors[j])
plt.xlabel('Total Dose')
plt.ylabel(target_feature)
plt.show()
if __name__=="__main__":
# filepath = './data/Carbon_Nanotube_Pulmonary_Toxicity_Data_Set_20120313.xls'
filepath = './data/Toxicity Measurements -- Meta Analysis.xlsx'
toxicity = "AgNP" # Use "CNT" for analyzing Carbon Nano toxicity
data = learn_forest(filepath,toxicity)
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/statistics/bxp_demo.py | 3 | 2753 | """
Demo of the new boxplot drawer function
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
# fake data
np.random.seed(937)
data = np.random.lognormal(size=(37, 4), mean=1.5, sigma=1.75)
labels = list('ABCD')
# compute the boxplot stats
stats = cbook.boxplot_stats(data, labels=labels, bootstrap=10000)
# After we've computed the stats, we can go through and change anything.
# Just to prove it, I'll set the median of each set to the median of all
# the data, and double the means
for n in range(len(stats)):
stats[n]['med'] = np.median(data)
stats[n]['mean'] *= 2
print(stats[0].keys())
fs = 10 # fontsize
# demonstrate how to toggle the display of different elements:
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(6,6))
axes[0, 0].bxp(stats)
axes[0, 0].set_title('Default', fontsize=fs)
axes[0, 1].bxp(stats, showmeans=True)
axes[0, 1].set_title('showmeans=True', fontsize=fs)
axes[0, 2].bxp(stats, showmeans=True, meanline=True)
axes[0, 2].set_title('showmeans=True,\nmeanline=True', fontsize=fs)
axes[1, 0].bxp(stats, showbox=False, showcaps=False)
axes[1, 0].set_title('Tufte Style\n(showbox=False,\nshowcaps=False)', fontsize=fs)
axes[1, 1].bxp(stats, shownotches=True)
axes[1, 1].set_title('notch=True', fontsize=fs)
axes[1, 2].bxp(stats, showfliers=False)
axes[1, 2].set_title('showfliers=False', fontsize=fs)
for ax in axes.flatten():
ax.set_yscale('log')
ax.set_yticklabels([])
fig.subplots_adjust(hspace=0.4)
plt.show()
# demonstrate how to customize the display different elements:
boxprops = dict(linestyle='--', linewidth=3, color='darkgoldenrod')
flierprops = dict(marker='o', markerfacecolor='green', markersize=12,
linestyle='none')
medianprops = dict(linestyle='-.', linewidth=2.5, color='firebrick')
meanpointprops = dict(marker='D', markeredgecolor='black',
markerfacecolor='firebrick')
meanlineprops = dict(linestyle='--', linewidth=2.5, color='purple')
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(6,6))
axes[0, 0].bxp(stats, boxprops=boxprops)
axes[0, 0].set_title('Custom boxprops', fontsize=fs)
axes[0, 1].bxp(stats, flierprops=flierprops, medianprops=medianprops)
axes[0, 1].set_title('Custom medianprops\nand flierprops', fontsize=fs)
axes[1, 0].bxp(stats, meanprops=meanpointprops, meanline=False,
showmeans=True)
axes[1, 0].set_title('Custom mean\nas point', fontsize=fs)
axes[1, 1].bxp(stats, meanprops=meanlineprops, meanline=True, showmeans=True)
axes[1, 1].set_title('Custom mean\nas line', fontsize=fs)
for ax in axes.flatten():
ax.set_yscale('log')
ax.set_yticklabels([])
fig.suptitle("I never said they'd be pretty")
fig.subplots_adjust(hspace=0.4)
plt.show()
| mit |
massmutual/scikit-learn | sklearn/cluster/bicluster.py | 211 | 19443 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array),
ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T),
ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
wmunters/py4sp | auxiliary/gust.py | 1 | 1999 | import matplotlib.pyplot as plt
import numpy as np
from scipy.special import erf, erfinv
plt.close('all')
z0 = 10**(-4)
zhub = 0.1
D = 0.1
H = 1
Hh = 1
Nz = 144
# Generate the base profile
uhub = 8.5
kappa = 0.4
utau = uhub*kappa/(np.log(10**3))
z = np.linspace(0, 1, num=Nz*2+1)
zcc = z[1::2]
ubase = utau/kappa*np.log(zcc/z0)
# Generate the gust profile
eps = 2*erfinv(.99)
alpha = zhub - D/2
beta = zhub + D/2
deltag = 2*D
Ug = 5
# Model spatial behavior
fz = (zcc/zhub*np.exp(1-zcc/zhub))**2
#fz = Ug*erf(eps*zcc/2/alpha)
# Model temporal behavior with a cosine
Tend = 1
time = np.linspace(0, Tend,num=100)
Tstart = 0.1
Tstop = 0.5
gt = np.zeros(time.size)
for i, ti in enumerate(time):
if ti>=Tstart and ti<=Tstop:
gt[i] = 1/2*(1 - np.cos(2*np.pi*(ti-Tstart)/(Tstop-Tstart)))
# Put space and time together
ugust = np.zeros((zcc.size, time.size))
for i, gi in enumerate(gt):
ugust[:,i] = Ug*fz*gt[i]
ucorr = np.zeros(ugust.shape)
hz = np.exp(-zcc/Hh)-1
for i, ti in enumerate(time):
sumh = np.sum(hz)
sumgust = np.sum(ugust[:,i])
Uc = sumgust/sumh
ucorr[:,i] = -Uc*hz
plt.figure()
for i in range(time.size):
plt.clf()
plt.subplot(131)
u = ubase + ugust[:,i] + ucorr[:,i]
plt.plot(u, zcc,'b')
plt.plot(ubase, zcc,'k')
plt.xlim((4, 16))
plt.xlabel('U [m/s]')
plt.ylabel('z/H')
plt.axhline(y=0.05, linestyle=':', color='k')
plt.axhline(y=0.15, linestyle=':', color='k')
plt.subplot(132)
plt.plot(ugust[:,i],zcc,'r')
plt.xlim((-Ug,Ug))
plt.xlabel('U_gust [m/s]')
plt.ylabel('z/H')
plt.axhline(y=0.05, linestyle=':', color='k')
plt.axhline(y=0.15, linestyle=':', color='k')
plt.subplot(133)
plt.plot(ucorr[:,i],zcc,'g')
plt.xlim((-Ug,Ug))
plt.xlabel('U_corr [m/s]')
plt.ylabel('z/H')
plt.suptitle('T = '+ str(time[i]))
plt.axhline(y=0.05, linestyle=':', color='k')
plt.axhline(y=0.15, linestyle=':', color='k')
plt.pause(0.05)
print(np.sum(u))
| gpl-2.0 |
ikki407/stacking | test/test_binary_class/scripts/binary.py | 1 | 7498 | # -*- coding: utf-8 -*-
# ----- for creating dataset -----
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
# ----- general import -----
import pandas as pd
import numpy as np
# ----- stacking library -----
from stacking.base import FOLDER_NAME, PATH, INPUT_PATH, TEMP_PATH,\
FEATURES_PATH, OUTPUT_PATH, SUBMIT_FORMAT
# ----- utils -----
from stacking.base import load_data, save_pred_as_submit_format, create_cv_id, \
eval_pred
# ----- classifiers -----
from stacking.base import BaseModel, XGBClassifier, KerasClassifier
# ----- keras -----
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from keras.layers.advanced_activations import LeakyReLU, PReLU
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l1, l2, l1l2, activity_l2
# ----- Set problem type!! -----
problem_type = 'classification'
classification_type = 'binary'
eval_type = 'auc'
BaseModel.set_prob_type(problem_type, classification_type, eval_type)
# ----- create dataset -----
# load data for binary
digits = load_digits(2)
# split data for train and test
data_train, data_test, label_train, label_test = train_test_split(digits.data, digits.target)
# concat data as pandas' dataframe format
data_train = pd.DataFrame(data_train)
label_train = pd.DataFrame(label_train, columns=['target'])
train = pd.concat([data_train, label_train], axis=1)
data_test = pd.DataFrame(data_test)
label_test = pd.DataFrame(label_test, columns=['target'])
test = data_test # not include target
# save data under /data/input.
train.to_csv(INPUT_PATH + 'train.csv', index=False)
test.to_csv(INPUT_PATH + 'test.csv', index=False)
label_test.to_csv(INPUT_PATH + 'label_test.csv', index=False)
# ----- END create dataset -----
# -----create features -----
train_log = train.iloc[:, :64].applymap(lambda x: np.log(x+1))
test_log = test.iloc[:, :64].applymap(lambda x: np.log(x+1))
train_log.columns = map(str, train_log.columns)
test_log.columns = map(str, test_log.columns)
train_log.columns += '_log'
test_log.columns += '_log'
# save data under /data/output/features/.
train_log.to_csv(FEATURES_PATH + 'train_log.csv', index=False)
test_log.to_csv(FEATURES_PATH + 'test_log.csv', index=False)
# ----- END create features -----
# ----- First stage stacking model-----
# FEATURE LISTS in Stage 1.
FEATURE_LIST_stage1 = {
'train':(INPUT_PATH + 'train.csv',
FEATURES_PATH + 'train_log.csv',
),#target is in 'train'
'test':(INPUT_PATH + 'test.csv',
FEATURES_PATH + 'test_log.csv',
),
}
# need to get input shape for NN now
X,y,test = load_data(flist=FEATURE_LIST_stage1, drop_duplicates=True)
assert((False in X.columns == test.columns) == False)
nn_input_dim_NN = X.shape[1:]
del X, y, test
# Models in Stage 1
PARAMS_V1 = {
'colsample_bytree':0.80,
'learning_rate':0.1,"eval_metric":"auc",
'max_depth':5, 'min_child_weight':1,
'nthread':4,
'objective':'binary:logistic','seed':407,
'silent':1, 'subsample':0.60,
}
class ModelV1(BaseModel):
def build_model(self):
return XGBClassifier(params=self.params, num_round=10)
PARAMS_V2 = {
'batch_size':8,
'nb_epoch':5,
'verbose':1,
'callbacks':[],
'validation_split':0.,
'validation_data':None,
'shuffle':True,
#'show_accuracy':True,
'class_weight':None,
'sample_weight':None,
'normalize':True,
'categorize_y':True
}
class ModelV2(BaseModel):
def build_model(self):
model = Sequential()
model.add(Dense(64, input_shape=nn_input_dim_NN, init='he_normal'))
model.add(LeakyReLU(alpha=.00001))
model.add(Dropout(0.5))
model.add(Dense(2, init='he_normal'))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1, decay=1e-5, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=["accuracy"])
return KerasClassifier(nn=model,**self.params)
# ----- END first stage stacking model -----
# ----- Second stage stacking model -----
PARAMS_V1_stage2 = {
'colsample_bytree':0.6,'colsample_bylevel':0.80,
'learning_rate':0.05,"eval_metric":"auc",
'max_depth':6, 'seed':1234,
'nthread':8,'reg_lambda':3,'reg_alpha':0.01,
'objective':'binary:logistic',
'silent':1, 'subsample':0.60,
}
class ModelV1_stage2(BaseModel):
def build_model(self):
return XGBClassifier(params=self.params, num_round=5)
# ----- END first stage stacking model -----
if __name__ == "__main__":
# Create cv-fold index
train = pd.read_csv(INPUT_PATH + 'train.csv')
create_cv_id(train, n_folds_ = 5, cv_id_name='cv_id', seed=407)
######## stage1 Models #########
print 'Start stage 1 training'
m = ModelV1(name="v1_stage1",
flist=FEATURE_LIST_stage1,
params = PARAMS_V1,
kind = 'st'
)
m.run()
m = ModelV2(name="v2_stage1",
flist=FEATURE_LIST_stage1,
params = PARAMS_V2,
kind = 'st'
)
m.run()
print 'Done stage 1'
print
######## stage2 Models #########
print 'Start stage 2 training'
# FEATURE LISTS in Stage 2.
# Need to define here because the outputs for NN dim. haven't been created yet.
FEATURE_LIST_stage2 = {
'train':(INPUT_PATH + 'train.csv',
FEATURES_PATH + 'train_log.csv',
TEMP_PATH + 'v1_stage1_all_fold.csv',
TEMP_PATH + 'v2_stage1_all_fold.csv',
),#targetはここに含まれる
'test':(INPUT_PATH + 'test.csv',
FEATURES_PATH + 'test_log.csv',
TEMP_PATH + 'v1_stage1_test.csv',
TEMP_PATH + 'v2_stage1_test.csv',
),
}
X,y,test = load_data(flist=FEATURE_LIST_stage2, drop_duplicates=True)
assert((False in X.columns == test.columns) == False)
nn_input_dim_NN2 = X.shape[1]
del X, y, test
# Models
m = ModelV1_stage2(name="v1_stage2",
flist=FEATURE_LIST_stage2,
params = PARAMS_V1_stage2,
kind = 'st',
)
m.run()
print 'Done stage 2'
print
# averaging
print 'Saving as submission format'
#sample_sub = pd.read_csv('data/input/sample_submission.csv')
label = pd.read_csv(INPUT_PATH + 'label_test.csv')
testID = range(len(label))
testID = pd.DataFrame(testID, columns=['ID'])
pred = pd.read_csv(TEMP_PATH + 'v1_stage2_TestInAllTrainingData.csv')
print 'Evaluation'
auc = eval_pred(label.target, pred.iloc[:,0], eval_type=eval_type)
pred = pd.concat([testID, pred], axis=1)
pred.to_csv(TEMP_PATH + 'final_submission.csv', index=False)
| mit |
yodebu/currentcostgui | tracer.py | 9 | 3501 | #
# CurrentCost GUI
#
# Copyright (C) 2008 Dale Lane
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# The author of this code can be contacted at [email protected]
# Any contact about this application is warmly welcomed.
#
import logging
import numpy
import scipy
import matplotlib
import platform
import wx
from pysqlite2 import dbapi2 as sqlite
from time import strftime
#
# A class to collect debug and trace information.
#
#
# Dale Lane (http://dalelane.co.uk/blog)
#
enableTrace = False
# used for indenting trace
stackDepth = 0
indentStr = ""
class CurrentCostTracer():
def EnableTrace(self, val):
global enableTrace
enableTrace = val
def IsTraceEnabled(self):
global enableTrace
return enableTrace
def InitialiseTraceFile(self):
global enableTrace, stackDepth, indentStr
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
filename='currentcostdiagnostics.log',
filemode='w+')
if enableTrace == True:
stackDepth = 0
indentStr = ""
logging.info("CurrentCost software - v 0.9.30")
logging.info("-------------------------------")
logging.info("python : version " + repr(platform.python_version()))
logging.info("numpy : version " + repr(numpy.version.version))
logging.info("scipy : version " + repr(scipy.version.version))
logging.info("matplotlib : version " + repr(matplotlib.__version__))
logging.info("wxpython : version " + repr(wx.version()))
logging.info("sqlite : version " + repr(sqlite.version))
def Trace(self, debuginfo):
global enableTrace, indentStr
if enableTrace == True:
logging.debug("DEBUG " + indentStr + " " + debuginfo)
def Error(self, errorinfo):
global enableTrace, indentStr
logging.error("ERROR " + indentStr + " " + errorinfo)
def FunctionEntry(self, functionname):
global enableTrace, indentStr, stackDepth
if enableTrace == True:
stackDepth += 1
logging.info("ENTRY " + indentStr + " " + functionname)
self.prepareIndentString()
def FunctionExit(self, functionname):
global enableTrace, indentStr, stackDepth
if enableTrace == True:
stackDepth -= 1
self.prepareIndentString()
logging.info("EXIT " + indentStr + " " + functionname)
def prepareIndentString(self):
global indentStr, stackDepth
indentStr = ""
for i in range(0, stackDepth):
indentStr += " "
| gpl-3.0 |
hunse/kitti | kitti/tests/test_data.py | 1 | 1903 | import os
import numpy as np
import matplotlib.pyplot as plt
from kitti.data import get_drives, image_shape, data_dir, Calib
from kitti.raw import load_stereo_frame
from kitti.velodyne import load_velodyne_points
def test_get_drives():
drives = get_drives()
print drives
def test_disp2rect():
drive = 11
frame = 0
color = True
img0, img1 = load_stereo_frame(drive, frame, color=color)
calib = Calib(color=color) # get calibration
# get points
vpts = load_velodyne_points(drive, frame)
# remove invalid points
# m = (vpts[:, 0] >= 5)
# m = (vpts[:, 0] >= 5) & (np.abs(vpts[:, 1]) < 5)
m = (vpts[:, 0] >= 5) & (vpts[:, 2] >= -3)
vpts = vpts[m, :]
rpts = calib.velo2rect(vpts)
# get disparities
xyd = calib.rect2disp(rpts)
xyd, valid_rpts = calib.filter_disps(xyd, return_mask=True)
if 1:
# plot disparities
disp = np.zeros(image_shape, dtype=np.uint8)
for x, y, d in np.round(xyd):
disp[y, x] = d
plt.figure(101)
plt.clf()
plt.subplot(211)
plt.imshow(img0, cmap='gray')
plt.subplot(212)
plt.imshow(disp)
plt.show()
# assert False
# convert back to rect
rpts2 = calib.disp2rect(xyd)
assert np.allclose(rpts[valid_rpts], rpts2)
# plotting
if 0:
plt.figure(101)
plt.clf()
img0, img1 = load_stereo_frame(drive, frame, color=color)
plt.imshow(img0, cmap='gray')
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(111, projection='3d')
ax.plot3D(rpts[:, 0], rpts[:, 1], rpts[:, 2], '.')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
if __name__ == '__main__':
# print data_dir
test_get_drives()
# test_disp2rect()
| mit |
Chandramani/kaggle-competitions | african-soil-property-prediction/python_soild.py | 1 | 1618 | import pandas as pd
import numpy as np
from sklearn import svm, cross_validation
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn import preprocessing
train = pd.read_csv('training.csv')
test = pd.read_csv('sorted_test.csv')
labels = train[['Ca','P','pH','SOC','Sand']].values
train_labels = train[['Ca','P','pH','SOC','Sand']]
train.drop(['Ca', 'P', 'pH', 'SOC', 'Sand', 'PIDN'], axis=1, inplace=True)
test.drop('PIDN', axis=1, inplace=True)
xtrain, xtest = np.array(train)[:,:2655], np.array(test)[:,:2655]
xtrain1, xtest1 = np.array(train)[:,:3578], np.array(test)[:,:3578]
sup_vec = svm.SVR(C=10000.0, verbose = 0)
preds = np.zeros((xtest.shape[0], 5))
train_pred = np.zeros((xtrain.shape[0], 5))
for i in range(5):
sup_vec.fit(xtrain, labels[:,i])
preds[:,i] = sup_vec.predict(xtest).astype(float)
train_pred[:,i] = sup_vec.predict(xtrain).astype(float)
sample = pd.read_csv('sample_submission.csv')
sample['Ca'] = preds[:,0]
sample['P'] = preds[:,1]
sample['pH'] = preds[:,2]
sample['SOC'] = preds[:,3]
sample['Sand'] = preds[:,4]
sample.to_csv('beating_benchmark.csv', index = False)
rms_ca = sqrt(mean_squared_error(train_labels['Ca'], train_pred[:,0]))
rms_p = sqrt(mean_squared_error(train_labels['P'], train_pred[:,1]))
rms_ph = sqrt(mean_squared_error(train_labels['pH'], train_pred[:,2]))
rms_soc = sqrt(mean_squared_error(train_labels['SOC'], train_pred[:,3]))
rms_sand = sqrt(mean_squared_error(train_labels['Sand'], train_pred[:,4]))
rmse_avg=(rms_ca+rms_p+rms_sand+rms_soc+rms_ph)/5
print rms_ca,rms_p,rms_ph,rms_soc,rms_sand
print rmse_avg
| apache-2.0 |
Achuth17/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 12 | 10796 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
rlowrance/re-avm | chart06_make_chart_a.py | 1 | 3743 |
from __future__ import division
import matplotlib.pyplot as plt
import pdb
from columns_contain import columns_contain
from Month import Month
cc = columns_contain
def make_chart_a(reduction, median_prices, control):
'graph range of errors by month by method'
print 'make_chart_a'
def make_subplot(validation_month, reduction, relevant_median_prices):
'mutate the default axes'
# draw one line for each model family
for model in ('en', 'gb', 'rf'):
y = [v.mae
for k, v in reduction[validation_month].iteritems()
if k.model == model
]
plt.plot(y, label=model) # the reduction is sorted by increasing mae
plt.yticks(size='xx-small')
if Month(validation_month) not in relevant_median_prices:
print validation_month
print relevant_median_prices
print 'should not happen'
pdb.set_trace()
plt.title('yr mnth %s med price %6.0f' % (
validation_month,
relevant_median_prices[Month(validation_month)]),
loc='right',
fontdict={'fontsize': 'xx-small',
'style': 'italic',
},
)
plt.xticks([]) # no ticks on x axis
return
def make_figure(reduction, path_out, city, relevant_median_prices):
# make and save figure
# debug: sometimes relevant_median_prices is empty
if len(relevant_median_prices) == 0:
print 'no median prices', city
pdb.set_trace()
plt.figure() # new figure
# plt.suptitle('Loss by Test Period, Tree Max Depth, N Trees') # overlays the subplots
axes_number = 0
validation_months = ('200612', '200701', '200702', '200703', '200704', '200705',
'200706', '200707', '200708', '200709', '200710', '200711',
)
row_seq = (1, 2, 3, 4)
col_seq = (1, 2, 3)
cities = city is not None
for row in row_seq:
for col in col_seq:
validation_month = validation_months[axes_number]
if cities:
print 'city %s validation_month %s num transactions %d' % (
city,
validation_month,
len(reduction[validation_month]))
axes_number += 1 # count across rows
plt.subplot(len(row_seq), len(col_seq), axes_number) # could be empty, if no transactions in month
make_subplot(validation_month, reduction, relevant_median_prices)
# annotate the bottom row only
if row == 4:
if col == 1:
plt.xlabel('hp set')
plt.ylabel('mae x $1000')
if col == 3:
plt.legend(loc='best', fontsize=5)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
plt.savefig(path_out)
plt.close()
if control.arg.locality == 'global':
make_figure(reduction, control.path_out_a, None, median_prices)
elif control.arg.locality == 'city':
def make_city(city):
print 'make_city', city
assert len(reduction[city]) > 0, city # detect bug found in earlier version
return make_figure(reduction[city], control.path_out_a % city, city, median_prices[city])
for city in reduction.keys():
make_city(city)
else:
print 'bad control.arg.locality', control.arg
pdb.set_trace()
return
| bsd-3-clause |
larsmans/scipy | doc/source/tutorial/examples/normdiscr_plot2.py | 84 | 1642 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
npoints = 20 # number of integer support points of the distribution minus 1
npointsh = npoints / 2
npointsf = float(npoints)
nbound = 4 #bounds for the truncated normal
normbound = (1 + 1 / npointsf) * nbound #actual bounds of truncated normal
grid = np.arange(-npointsh, npointsh+2,1) #integer grid
gridlimitsnorm = (grid - 0.5) / npointsh * nbound #bin limits for the truncnorm
gridlimits = grid - 0.5
grid = grid[:-1]
probs = np.diff(stats.truncnorm.cdf(gridlimitsnorm, -normbound, normbound))
gridint = grid
normdiscrete = stats.rv_discrete(
values=(gridint, np.round(probs, decimals=7)),
name='normdiscrete')
n_sample = 500
np.random.seed(87655678) #fix the seed for replicability
rvs = normdiscrete.rvs(size=n_sample)
rvsnd = rvs
f,l = np.histogram(rvs,bins=gridlimits)
sfreq = np.vstack([gridint,f,probs*n_sample]).T
fs = sfreq[:,1] / float(n_sample)
ft = sfreq[:,2] / float(n_sample)
fs = sfreq[:,1].cumsum() / float(n_sample)
ft = sfreq[:,2].cumsum() / float(n_sample)
nd_std = np.sqrt(normdiscrete.stats(moments='v'))
ind = gridint # the x locations for the groups
width = 0.35 # the width of the bars
plt.figure()
plt.subplot(111)
rects1 = plt.bar(ind, ft, width, color='b')
rects2 = plt.bar(ind+width, fs, width, color='r')
normline = plt.plot(ind+width/2.0, stats.norm.cdf(ind+0.5,scale=nd_std),
color='b')
plt.ylabel('cdf')
plt.title('Cumulative Frequency and CDF of normdiscrete')
plt.xticks(ind+width, ind)
plt.legend((rects1[0], rects2[0]), ('true', 'sample'))
plt.show()
| bsd-3-clause |
harisbal/pandas | pandas/tests/generic/test_panel.py | 4 | 1949 | # -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from warnings import catch_warnings, simplefilter
from pandas import Panel
from pandas.util.testing import (assert_panel_equal,
assert_almost_equal)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from .test_generic import Generic
class TestPanel(Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y, by_blocks=True)
@td.skip_if_no('xarray', min_version='0.7.0')
def test_to_xarray(self):
from xarray import DataArray
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
p = tm.makePanel()
result = p.to_xarray()
assert isinstance(result, DataArray)
assert len(result.coords) == 3
assert_almost_equal(list(result.coords.keys()),
['items', 'major_axis', 'minor_axis'])
assert len(result.dims) == 3
# idempotency
assert_panel_equal(result.to_pandas(), p)
# run all the tests, but wrap each in a warning catcher
for t in ['test_rename', 'test_get_numeric_data',
'test_get_default', 'test_nonzero',
'test_downcast', 'test_constructor_compound_dtypes',
'test_head_tail',
'test_size_compat', 'test_split_compat',
'test_unexpected_keyword',
'test_stat_unexpected_keyword', 'test_api_compat',
'test_stat_non_defaults_args',
'test_truncate_out_of_bounds',
'test_metadata_propagation', 'test_copy_and_deepcopy',
'test_pct_change', 'test_sample']:
def f():
def tester(self):
f = getattr(super(TestPanel, self), t)
with catch_warnings(record=True):
simplefilter("ignore", FutureWarning)
f()
return tester
setattr(TestPanel, t, f())
| bsd-3-clause |
anirudhnair/KernelBasedCharcterization | TAUTraceAnalysis/EBSTraceStat/EBSTraceStat.py | 1 | 35614 | '''
Created on Feb 11, 2014
@author: anirudhj
'''
import os,shutil
from sax import saxpy as SaX
import fileinput, shlex
from Settings import Common
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
class EBSTraceStat(object):
'''
get stats from EBS trace
'''
def __init__(self, filePath):
self.m_filePath = ""
self.traceTable = None
self.m_Metrics = None
self.m_pid = '0'
self.m_sax = None
self.m_ObjDumpReader = None
if os.path.isfile(filePath + ".bak"):
os.remove(filePath + ".bak")
shutil.copyfile(filePath, filePath + ".bak")
self.m_filePath = filePath + ".bak"
self.m_pid = os.path.basename(self.m_filePath).split(".")[2]
self.m_sax = SaX.SAX(Common.SIGLEN, Common.SAX_ALPHABETS, 1e-6)
def RegisterObjDumpReader(self,reader):
self.m_ObjDumpReader = reader
def CleanTrace(self):
'''
remove lines with %
remove $ from line begining
note PAPI events and remove all lines before this line
split | as delimiter, extract 4th token and retian in file
'''
for line in fileinput.FileInput(self.m_filePath,inplace=1):
if "Metrics" in line:
self.ExtractMetrics(line)
continue
elif ( line[0] == '#'):
continue
if ('$' in line or '%' in line) :
line = line.replace('$','\n')
line = line.replace('%','\n%')
print line,
fileinput.close()
for line in fileinput.FileInput(self.m_filePath,inplace=1):
if( line[0] == "%"):
continue;
cleanedLine = line.strip()
if (cleanedLine):
metricLine = (line.split("|")[4]).strip()
instPointer = (line.split("|")[6]).strip()
metricVals = ','.join(['"{0}"'.format(fragment) if ' ' in fragment else fragment \
for fragment in shlex.split(metricLine)])
metricVals = metricVals + ',' + instPointer
print metricVals
#print ','.join(shlex.split(cleanedLine))
fileinput.close()
def ExtractMetrics(self, line):
listMetrics = line.split(' ')
del listMetrics[0]
del listMetrics[0]
self.m_Metrics = listMetrics
def GetMetricsList(self):
return self.m_Metrics
def LoadTrace(self):
'''
The first metric is always TIME. Second the EBS_SOURCE
'''
listHeader = []
listHeader.extend(self.m_Metrics)
listHeader.append(Common.IP)
self.traceTable = pd.read_table(self.m_filePath,sep=',',header=None,names= listHeader)
self.traceTable = self.traceTable.sort_index(by=self.m_Metrics[0], ascending=[True])
sizeList = len(self.m_Metrics)
for index_ in range(2,sizeList):
self.traceTable[self.m_Metrics[index_]] = self.traceTable[self.m_Metrics[index_]]. \
sub(self.traceTable[self.m_Metrics[index_]].shift(), fill_value = 0)
def AnalyzeIPFDBetweenTimeStamps(self,startTimes, endTimes):
iterIPFDMap = {}
iter_ = 0
for index_ in range(len(startTimes)):
startTime_ = startTimes[index_]
endTime_ = endTimes[index_]
reducedTrace_ = self.traceTable[(self.traceTable['TIME'] >= startTime_) & (self.traceTable['TIME'] <= endTime_)]
listIPs = reducedTrace_[Common.IP].values
#print listIPs
listInsts = []
for ip in listIPs:
instType = self.m_ObjDumpReader.GetInstructionType(ip[2:])
if( instType == 0):
continue
listInsts.append(instType)
if( len(listInsts) == 0):
print "---------------------------------------------------"
print "WARNING: Empty instruction list"
print "---------------------------------------------------"
instCtr = Common.GetFrequencyList(listInsts)
iterIPFDMap[iter_] = instCtr
#ploting
x = []
y = []
for key in instCtr:
x.append(key)
x.sort()
for key in x:
y.append(instCtr[key])
plt.bar(np.arange(len(x)) ,y,align='center')
plt.xticks(np.arange(len(x)), x,rotation=30, size='small')
plt.savefig(os.path.dirname(self.m_filePath) + "/IPFD_" + str(iter_),format="pdf",dpi=500)
plt.clf()
iter_+=1
return iterIPFDMap
def AnalyzeBetweenTimeStamps(self,x_index,y_index,startTimes,endTimes):
startTime = startTimes[0]
endTime = endTimes[len(endTimes) - 1]
reducedTrace = self.traceTable[(self.traceTable['TIME'] >= startTime) & (self.traceTable['TIME'] <= endTime)]
y_vals = reducedTrace[self.m_Metrics[y_index]]
x_vals = reducedTrace[self.m_Metrics[x_index]].values
plt.plot(x_vals, y_vals , 'g-') #first value is useless as it is not sub'ed
# sax string rep for each iter
saxStr = ''
iterSAXMap = {}
iter_ = 0
for index_ in range(len(startTimes)):
startTime_ = startTimes[index_]
endTime_ = endTimes[index_]
reducedTrace_ = self.traceTable[(self.traceTable['TIME'] >= startTime_) & (self.traceTable['TIME'] <= endTime_)]
y_vals_ = reducedTrace_[self.m_Metrics[y_index]]
saxStr_,indices = self.m_sax.to_letter_rep(y_vals_)
saxStr+=saxStr_
iterSAXMap[iter_] = saxStr_
iter_+=1
saxNum = Common.GetNumArrayFromString(saxStr)
#ploting
vlinePoints = endTimes
plt.vlines(vlinePoints, [y_vals.min()],[y_vals.max()],'r','dashed')
plt.xlabel(self.m_Metrics[x_index])
plt.ylabel(self.m_Metrics[y_index])
plt.xlim(x_vals.min(), x_vals.max())
fileDumpPath = (self.m_Metrics[x_index] + "_" + self.m_Metrics[y_index] + "_"+ self.m_pid +".pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(24, 16)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
plt.plot(saxNum, 'g-')
plt.xlabel('SAX string Length')
plt.ylabel('SAX alphabets')
plt.title(self.m_Metrics[y_index])
xticks = range(0,Common.SIGLEN*len(startTimes),Common.SIGLEN)
plt.xticks(xticks)
plt.yticks(range(Common.SAX_ALPHABETS))
fileDumpPath = (self.m_Metrics[x_index] + "_" + self.m_Metrics[y_index] + "_"+ self.m_pid +"_SAX.pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(18, 12)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
return iterSAXMap
def Analyze(self,x_index,y_index):
y_vals = self.traceTable[self.m_Metrics[y_index]].sub(self.traceTable[self.m_Metrics[y_index]].shift(), fill_value = 0).values
y_vals = y_vals[1:]
plt.plot(self.traceTable[self.m_Metrics[x_index]].values[1:], y_vals, 'g-')
plt.xlabel(self.m_Metrics[x_index])
plt.ylabel(self.m_Metrics[y_index])
fileDumpPath = (self.m_Metrics[x_index] + "_" + self.m_Metrics[y_index] + "_" + self.m_pid + ".pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(24, 16)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
def SAXString(self,index,startTime,endTime):
reducedTrace = self.traceTable[(self.traceTable['TIME'] >= startTime) & (self.traceTable['TIME'] <= endTime)]
vals = reducedTrace[self.m_Metrics[index]].sub(reducedTrace[self.m_Metrics[index]].shift(), fill_value = 0).values
vals = vals[1:]
return self.m_sax.to_letter_rep(vals)
def IPCAnalyzeBetweenTimeStamps(self,startTimes,endTimes):
startTime = startTimes[0]
endTime = endTimes[len(endTimes) - 1]
tmptrace = self.traceTable
tmptrace[self.m_Metrics[Common.INS_INDEX]] = tmptrace[self.m_Metrics[Common.INS_INDEX]].sub(tmptrace[self.m_Metrics[Common.INS_INDEX]].shift(), fill_value = 0)
reducedTrace = tmptrace[(tmptrace['TIME'] >= startTime) & (tmptrace['TIME'] <= endTime)]
insDiff = reducedTrace[self.m_Metrics[Common.INS_INDEX]].values
cycDiff = reducedTrace[self.m_Metrics[Common.CYC_INDEX]].values
IPC = np.divide(insDiff.astype(float),cycDiff.astype(float))
x_vals = reducedTrace[self.m_Metrics[Common.TIME_INDEX]].values
plt.plot(x_vals, IPC , 'g-') #first value is useless as it is not sub'ed
# sax string rep for each iter
saxStr = ''
iterSAXMap = {}
iter_ = 0
for index_ in range(len(startTimes)):
startTime_ = startTimes[index_]
endTime_ = endTime[index_]
reducedTrace_ = tmptrace[(tmptrace['TIME'] >= startTime_) & (tmptrace['TIME'] <= endTime_)]
insDiff_ = reducedTrace_[self.m_Metrics[Common.INS_INDEX]].values
cycDiff_ = reducedTrace_[self.m_Metrics[Common.CYC_INDEX]].values
IPC_ = np.divide(insDiff_.astype(float),cycDiff_.astype(float))
saxStr_,indices = self.m_sax.to_letter_rep(IPC_)
saxStr=+saxStr_
iterSAXMap[iter_] = saxStr_
iter+=1
saxNum = Common.GetNumArrayFromString(saxStr)
#ploting
vlinePoints = endTimes
plt.vlines(vlinePoints, [IPC.min()],[IPC.max()],'r','dashed')
plt.xlabel(self.m_Metrics[Common.TIME_INDEX])
plt.ylabel('IPC')
plt.xlim(x_vals.min(), x_vals.max())
fileDumpPath = (self.m_Metrics[Common.TIME_INDEX] + "_" + 'IPC' + "_"+ self.m_pid +".pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(24, 16)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
plt.plot(saxNum, 'g-')
plt.xlabel('SAX string Length')
plt.ylabel('SAX alphabets')
plt.title('IPC')
xticks = range(0,Common.SIGLEN*len(startTimes),Common.SIGLEN)
plt.xticks(xticks)
plt.yticks(range(Common.SAX_ALPHABETS))
fileDumpPath = (self.m_Metrics[Common.TIME_INDEX] + "_" + 'IPC' + "_"+ self.m_pid +"_SAX.pdf").strip()
fileDumpPath = os.path.dirname(self.m_filePath) + "/" + fileDumpPath
figure = plt.gcf()
figure.set_size_inches(18, 12)
plt.savefig(fileDumpPath,format="pdf",dpi=500)
plt.clf()
return iterSAXMap
def GetPID(self):
return int(self.m_pid)
| apache-2.0 |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/parcoords/line/_colorbar.py | 1 | 73457 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "parcoords.line"
_path_str = "parcoords.line.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"minexponent",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for dates
see: https://github.com/d3/d3-time-format#locale_format. We add
two items to d3's date formatter: "%h" for half of the year as
a decimal number as well as "%{n}f" for fractional seconds with
n digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.parcoords.line.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.parcoords.line
.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
parcoords.line.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn.
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use parcoords.line.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use parcoords.line.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcoords.line.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcoo
rds.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcoords.line.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcoords.line.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
parcoords.line.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcoords.line.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcoords.line.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format. And for
dates see: https://github.com/d3/d3-time-
format#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal
number as well as "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.parcoords.line.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcoo
rds.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcoords.line.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.parcoords.line.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
parcoords.line.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
parcoords.line.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcoords.line.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.line.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
jni/networkx | examples/multigraph/chess_masters.py | 54 | 5146 | #!/usr/bin/env python
"""
An example of the MultiDiGraph clas
The function chess_pgn_graph reads a collection of chess
matches stored in the specified PGN file
(PGN ="Portable Game Notation")
Here the (compressed) default file ---
chess_masters_WCC.pgn.bz2 ---
contains all 685 World Chess Championship matches
from 1886 - 1985.
(data from http://chessproblem.my-free-games.com/chess/games/Download-PGN.php)
The chess_pgn_graph() function returns a MultiDiGraph
with multiple edges. Each node is
the last name of a chess master. Each edge is directed
from white to black and contains selected game info.
The key statement in chess_pgn_graph below is
G.add_edge(white, black, game_info)
where game_info is a dict describing each game.
"""
# Copyright (C) 2006-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
# tag names specifying what game info should be
# stored in the dict on each digraph edge
game_details=["Event",
"Date",
"Result",
"ECO",
"Site"]
def chess_pgn_graph(pgn_file="chess_masters_WCC.pgn.bz2"):
"""Read chess games in pgn format in pgn_file.
Filenames ending in .gz or .bz2 will be uncompressed.
Return the MultiDiGraph of players connected by a chess game.
Edges contain game data in a dict.
"""
import bz2
G=nx.MultiDiGraph()
game={}
datafile = bz2.BZ2File(pgn_file)
lines = (line.decode().rstrip('\r\n') for line in datafile)
for line in lines:
if line.startswith('['):
tag,value=line[1:-1].split(' ',1)
game[str(tag)]=value.strip('"')
else:
# empty line after tag set indicates
# we finished reading game info
if game:
white=game.pop('White')
black=game.pop('Black')
G.add_edge(white, black, **game)
game={}
return G
if __name__ == '__main__':
import networkx as nx
G=chess_pgn_graph()
ngames=G.number_of_edges()
nplayers=G.number_of_nodes()
print("Loaded %d chess games between %d players\n"\
% (ngames,nplayers))
# identify connected components
# of the undirected version
Gcc=list(nx.connected_component_subgraphs(G.to_undirected()))
if len(Gcc)>1:
print("Note the disconnected component consisting of:")
print(Gcc[1].nodes())
# find all games with B97 opening (as described in ECO)
openings=set([game_info['ECO']
for (white,black,game_info) in G.edges(data=True)])
print("\nFrom a total of %d different openings,"%len(openings))
print('the following games used the Sicilian opening')
print('with the Najdorff 7...Qb6 "Poisoned Pawn" variation.\n')
for (white,black,game_info) in G.edges(data=True):
if game_info['ECO']=='B97':
print(white,"vs",black)
for k,v in game_info.items():
print(" ",k,": ",v)
print("\n")
try:
import matplotlib.pyplot as plt
except ImportError:
import sys
print("Matplotlib needed for drawing. Skipping")
sys.exit(0)
# make new undirected graph H without multi-edges
H=nx.Graph(G)
# edge width is proportional number of games played
edgewidth=[]
for (u,v,d) in H.edges(data=True):
edgewidth.append(len(G.get_edge_data(u,v)))
# node size is proportional to number of games won
wins=dict.fromkeys(G.nodes(),0.0)
for (u,v,d) in G.edges(data=True):
r=d['Result'].split('-')
if r[0]=='1':
wins[u]+=1.0
elif r[0]=='1/2':
wins[u]+=0.5
wins[v]+=0.5
else:
wins[v]+=1.0
try:
pos=nx.graphviz_layout(H)
except:
pos=nx.spring_layout(H,iterations=20)
plt.rcParams['text.usetex'] = False
plt.figure(figsize=(8,8))
nx.draw_networkx_edges(H,pos,alpha=0.3,width=edgewidth, edge_color='m')
nodesize=[wins[v]*50 for v in H]
nx.draw_networkx_nodes(H,pos,node_size=nodesize,node_color='w',alpha=0.4)
nx.draw_networkx_edges(H,pos,alpha=0.4,node_size=0,width=1,edge_color='k')
nx.draw_networkx_labels(H,pos,fontsize=14)
font = {'fontname' : 'Helvetica',
'color' : 'k',
'fontweight' : 'bold',
'fontsize' : 14}
plt.title("World Chess Championship Games: 1886 - 1985", font)
# change font and write text (using data coordinates)
font = {'fontname' : 'Helvetica',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 14}
plt.text(0.5, 0.97, "edge width = # games played",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.text(0.5, 0.94, "node size = # games won",
horizontalalignment='center',
transform=plt.gca().transAxes)
plt.axis('off')
plt.savefig("chess_masters.png",dpi=75)
print("Wrote chess_masters.png")
plt.show() # display
| bsd-3-clause |
shyamalschandra/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
aminert/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
jungla/ICOM-fluidity-toolbox | 2D/U/plot_KE_mli.py | 1 | 1420 | import os, sys
import vtktools
import fluidity_tools
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
label0 = 'm_25_3b'
label1 = 'm_25_3b'
basename1 = 'mli_checkpoint'
basename0 = 'mli'
path0 = '/tamay2/mensa/fluidity/'+label0+'/'
path1 = '/tamay2/mensa/fluidity/'+label1+'/'
try: os.stat('./plot/'+label1)
except OSError: os.mkdir('./plot/'+label1)
#
file0 = basename0+'.stat'
filepath0 = path0+file0
stat0 = fluidity_tools.stat_parser(filepath0)
file1 = basename1+'.stat'
filepath1 = path1+file1
stat1 = fluidity_tools.stat_parser(filepath1)
time0 = stat0["ElapsedTime"]["value"]/86400.0
time1 = stat1["ElapsedTime"]["value"]/86400.0
KE0 = (stat0["BoussinesqFluid"]["Velocity%magnitude"]["l2norm"])/np.sqrt((10000*10000*50))
KE1 = (stat1["BoussinesqFluid"]["Velocity%magnitude"]["l2norm"])/np.sqrt((10000*10000*50))
#time1[time1 > 1] = np.nan
#KE1[time1 > 1] = np.nan
# plot KE
fig = plt.figure()
#plt.plot(time0[:], KE0[:], '--k',linewidth=3)
plt.plot(time1[:], KE1[:], '-k',linewidth=3)
plt.xlabel("Time $[days]$", fontsize=22)
plt.ylabel("rms Magnitude Velocity $[m/s]$", fontsize=22)
plt.xticks(fontsize=18)
plt.yticks(fontsize=18)
#plt.xlim([-0.000017, 0.00004])
#plt.xticks([-0.00001, 0.00002])
plt.tight_layout()
plt.savefig('./plot/'+label1+'/KE_t_'+label1+'.eps',bbox_inches='tight')
plt.close()
print 'saved '+'./plot/'+label1+'/KE_t_'+label1+'.eps\n'
#
| gpl-2.0 |
WangWenjun559/Weiss | summary/sumy/sklearn/utils/tests/test_class_weight.py | 140 | 11909 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| apache-2.0 |
halflings/receval | test_receval.py | 1 | 3895 | import pandas as pd
import receval
def test_data_loading():
df = receval.download.load_movielens()
df = receval.preprocessing.aggregate_ratings(df, method='min')
receval.preprocessing.validate_dataframe(df)
def test_random_splitter():
df = receval.download.load_movielens()
df = receval.preprocessing.aggregate_ratings(df, method='max')
for test_size in [0.1, 0.4, 0.9]:
splitter = receval.splitter.RandomSplitter(
test_size=test_size, per_user=True)
train, test = splitter.split(df)
test_count = test.groupby('user').count()['rating']
train_count = train.groupby('user').count()['rating']
assert (test_count / (test_count + train_count)).apply(lambda v: abs(v - test_size) < 0.05).all(), "test/train ratio is off"
def test_temporal_splitter():
df = receval.download.load_movielens()
for test_size in [0.1, 0.9]:
splitter = receval.splitter.TemporalSplitter(
test_size=test_size, per_user=True)
train, test = splitter.split(df)
test_count = test.groupby('user').count()['rating']
train_count = train.groupby('user').count()['rating']
assert (test_count / (test_count + train_count)).apply(lambda v: abs(v - test_size) < 0.05).all(), "test/train ratio is off"
# TODO : do a more specific test (that tests the split is in fact on time,
# not just random)
def test_recommender():
df = receval.download.load_movielens()
dummy_cmd = receval.recommender.DummyCommandRecommender()
dummy_obj = receval.recommender.DummyRecommender()
splitter = receval.splitter.RandomSplitter(0.5, per_user=True)
print("* Splitting...")
train, test = splitter.split(df)
test_users = test.user.unique()
print(dummy_cmd.recommend(train, test_users))
print("* Dummy object...")
print(dummy_obj.recommend(train, test_users))
rec = receval.recommender.BaselineRecommender()
recommendations = rec.recommend(train, test_users)
print(recommendations)
def test_simple_preprocessing_recommender():
df = pd.DataFrame(dict(user=[0, 0, 0, 1, 1, 2, 2, 2],
item=[0, 1, 3, 0, 0, 0, 1, 1],
rating=[1, 0.01, 0.9, 0.05, 0.85, 0.95, 1., 0.9]))
df = df[['user', 'item', 'rating']]
def threshold_and_dedup_func(ratings):
ratings = ratings.copy()
ratings['rating'] = ratings['rating'].apply(
lambda v: 1 if v > 0.8 else 0)
ratings = ratings.drop_duplicates(subset=['user', 'item'])
return ratings
splitter = receval.splitter.RandomSplitter(0.5)
train, test = splitter.split(df)
class ModifiedTestRecommender(receval.recommender.BaselineRecommender):
def _recommend(self, train_ratings, test_users):
assert train_ratings.rating.isin(
[0, 1]).all(), "The ratings weren't thresholded like expected"
return super(ModifiedTestRecommender, self)._recommend(train_ratings, test_users)
recommender = ModifiedTestRecommender(
preprocessing_func=threshold_and_dedup_func)
recommender.recommend(train, test.user.unique())
def test_evaluation_instance():
ratings = receval.download.load_movielens()
splitter = receval.splitter.TemporalSplitter(test_size=0.3, per_user=True)
train, test = splitter.split(ratings)
rec = receval.recommender.BaselineRecommender(
num_recommendations=20)
recommendations = rec.recommend(train, test.user.unique())
receval.evaluation.Evaluation(recommendations, test)
def test_word2vec_class():
ratings = receval.download.load_movielens()
splitter = receval.splitter.TemporalSplitter(test_size=0.3, per_user=True)
train, test = splitter.split(ratings)
rec = receval.recommender.Word2VecRecommender(num_recommendations=50)
rec.recommend(train, test.user.unique())
| apache-2.0 |
rahul-c1/scikit-learn | sklearn/covariance/robust_covariance.py | 17 | 28933 | """
Robust location and covariance estimators.
Here are implemented estimators that are resistant to outliers.
"""
# Author: Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import linalg
from scipy.stats import chi2
from . import empirical_covariance, EmpiricalCovariance
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_random_state
# Minimum Covariance Determinant
# Implementing of an algorithm by Rousseeuw & Van Driessen described in
# (A Fast Algorithm for the Minimum Covariance Determinant Estimator,
# 1999, American Statistical Association and the American Society
# for Quality, TECHNOMETRICS)
# XXX Is this really a public function? It's not listed in the docs or
# exported by sklearn.covariance. Deprecate?
def c_step(X, n_support, remaining_iterations=30, initial_estimates=None,
verbose=False, cov_computation_method=empirical_covariance,
random_state=None):
"""C_step procedure described in [Rouseeuw1984]_ aiming at computing MCD.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data set in which we look for the n_support observations whose
scatter matrix has minimum determinant.
n_support : int, > n_samples / 2
Number of observations to compute the robust estimates of location
and covariance from.
remaining_iterations : int, optional
Number of iterations to perform.
According to [Rouseeuw1999]_, two iterations are sufficient to get
close to the minimum, and we never need more than 30 to reach
convergence.
initial_estimates : 2-tuple, optional
Initial estimates of location and shape from which to run the c_step
procedure:
- initial_estimates[0]: an initial location estimate
- initial_estimates[1]: an initial covariance estimate
verbose : boolean, optional
Verbose mode.
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Returns
-------
location : array-like, shape (n_features,)
Robust location estimates.
covariance : array-like, shape (n_features, n_features)
Robust covariance estimates.
support : array-like, shape (n_samples,)
A mask for the `n_support` observations whose scatter matrix has
minimum determinant.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
X = np.asarray(X)
random_state = check_random_state(random_state)
return _c_step(X, n_support, remaining_iterations=remaining_iterations,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state)
def _c_step(X, n_support, random_state, remaining_iterations=30,
initial_estimates=None, verbose=False,
cov_computation_method=empirical_covariance):
n_samples, n_features = X.shape
# Initialisation
support = np.zeros(n_samples, dtype=bool)
if initial_estimates is None:
# compute initial robust estimates from a random subset
support[random_state.permutation(n_samples)[:n_support]] = True
else:
# get initial robust estimates from the function parameters
location = initial_estimates[0]
covariance = initial_estimates[1]
# run a special iteration for that case (to get an initial support)
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(1)
# compute new estimates
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(0)
covariance = cov_computation_method(X_support)
# Iterative procedure for Minimum Covariance Determinant computation
det = fast_logdet(covariance)
previous_det = np.inf
while (det < previous_det) and (remaining_iterations > 0):
# save old estimates values
previous_location = location
previous_covariance = covariance
previous_det = det
previous_support = support
# compute a new support from the full data set mahalanobis distances
precision = pinvh(covariance)
X_centered = X - location
dist = (np.dot(X_centered, precision) * X_centered).sum(axis=1)
# compute new estimates
support = np.zeros(n_samples, dtype=bool)
support[np.argsort(dist)[:n_support]] = True
X_support = X[support]
location = X_support.mean(axis=0)
covariance = cov_computation_method(X_support)
det = fast_logdet(covariance)
# update remaining iterations for early stopping
remaining_iterations -= 1
previous_dist = dist
dist = (np.dot(X - location, precision) * (X - location)).sum(axis=1)
# Catch computation errors
if np.isinf(det):
raise ValueError(
"Singular covariance matrix. "
"Please check that the covariance matrix corresponding "
"to the dataset is full rank and that MinCovDet is used with "
"Gaussian-distributed data (or at least data drawn from a "
"unimodal, symmetric distribution.")
# Check convergence
if np.allclose(det, previous_det):
# c_step procedure converged
if verbose:
print("Optimal couple (location, covariance) found before"
" ending iterations (%d left)" % (remaining_iterations))
results = location, covariance, det, support, dist
elif det > previous_det:
# determinant has increased (should not happen)
warnings.warn("Warning! det > previous_det (%.15f > %.15f)"
% (det, previous_det), RuntimeWarning)
results = previous_location, previous_covariance, \
previous_det, previous_support, previous_dist
# Check early stopping
if remaining_iterations == 0:
if verbose:
print('Maximum number of iterations reached')
results = location, covariance, det, support, dist
return results
def select_candidates(X, n_support, n_trials, select=1, n_iter=30,
verbose=False,
cov_computation_method=empirical_covariance,
random_state=None):
"""Finds the best pure subset of observations to compute MCD from it.
The purpose of this function is to find the best sets of n_support
observations with respect to a minimization of their covariance
matrix determinant. Equivalently, it removes n_samples-n_support
observations to construct what we call a pure data set (i.e. not
containing outliers). The list of the observations of the pure
data set is referred to as the `support`.
Starting from a random support, the pure data set is found by the
c_step procedure introduced by Rousseeuw and Van Driessen in
[Rouseeuw1999]_.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data (sub)set in which we look for the n_support purest observations.
n_support : int, [(n + p + 1)/2] < n_support < n
The number of samples the pure data set must contain.
select : int, int > 0
Number of best candidates results to return.
n_trials : int, nb_trials > 0 or 2-tuple
Number of different initial sets of observations from which to
run the algorithm.
Instead of giving a number of trials to perform, one can provide a
list of initial estimates that will be used to iteratively run
c_step procedures. In this case:
- n_trials[0]: array-like, shape (n_trials, n_features)
is the list of `n_trials` initial location estimates
- n_trials[1]: array-like, shape (n_trials, n_features, n_features)
is the list of `n_trials` initial covariances estimates
n_iter : int, nb_iter > 0
Maximum number of iterations for the c_step procedure.
(2 is enough to be close to the final solution. "Never" exceeds 20).
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
See Also
---------
c_step
Returns
-------
best_locations : array-like, shape (select, n_features)
The `select` location estimates computed from the `select` best
supports found in the data set (`X`).
best_covariances : array-like, shape (select, n_features, n_features)
The `select` covariance estimates computed from the `select`
best supports found in the data set (`X`).
best_supports : array-like, shape (select, n_samples)
The `select` best supports found in the data set (`X`).
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS
"""
random_state = check_random_state(random_state)
n_samples, n_features = X.shape
if isinstance(n_trials, numbers.Integral):
run_from_estimates = False
elif isinstance(n_trials, tuple):
run_from_estimates = True
estimates_list = n_trials
n_trials = estimates_list[0].shape[0]
else:
raise TypeError("Invalid 'n_trials' parameter, expected tuple or "
" integer, got %s (%s)" % (n_trials, type(n_trials)))
# compute `n_trials` location and shape estimates candidates in the subset
all_estimates = []
if not run_from_estimates:
# perform `n_trials` computations from random initial supports
for j in range(n_trials):
all_estimates.append(
_c_step(
X, n_support, remaining_iterations=n_iter, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
else:
# perform computations from every given initial estimates
for j in range(n_trials):
initial_estimates = (estimates_list[0][j], estimates_list[1][j])
all_estimates.append(_c_step(
X, n_support, remaining_iterations=n_iter,
initial_estimates=initial_estimates, verbose=verbose,
cov_computation_method=cov_computation_method,
random_state=random_state))
all_locs_sub, all_covs_sub, all_dets_sub, all_supports_sub, all_ds_sub = \
zip(*all_estimates)
# find the `n_best` best results among the `n_trials` ones
index_best = np.argsort(all_dets_sub)[:select]
best_locations = np.asarray(all_locs_sub)[index_best]
best_covariances = np.asarray(all_covs_sub)[index_best]
best_supports = np.asarray(all_supports_sub)[index_best]
best_ds = np.asarray(all_ds_sub)[index_best]
return best_locations, best_covariances, best_supports, best_ds
def fast_mcd(X, support_fraction=None,
cov_computation_method=empirical_covariance,
random_state=None):
"""Estimates the Minimum Covariance Determinant matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
`[n_sample + n_features + 1] / 2`.
random_state : integer or numpy.RandomState, optional
The generator used to randomly subsample. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Notes
-----
The FastMCD algorithm has been introduced by Rousseuw and Van Driessen
in "A Fast Algorithm for the Minimum Covariance Determinant Estimator,
1999, American Statistical Association and the American Society
for Quality, TECHNOMETRICS".
The principle is to compute robust estimates and random subsets before
pooling them into a larger subsets, and finally into the full data set.
Depending on the size of the initial sample, we have one, two or three
such computation levels.
Note that only raw estimates are returned. If one is interested in
the correction and reweighting steps described in [Rouseeuw1999]_,
see the MinCovDet object.
References
----------
.. [Rouseeuw1999] A Fast Algorithm for the Minimum Covariance
Determinant Estimator, 1999, American Statistical Association
and the American Society for Quality, TECHNOMETRICS
.. [Butler1993] R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400
Returns
-------
location : array-like, shape (n_features,)
Robust location of the data.
covariance : array-like, shape (n_features, n_features)
Robust covariance of the features.
support : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the robust location and covariance estimates of the data set.
"""
random_state = check_random_state(random_state)
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
n_samples, n_features = X.shape
# minimum breakdown value
if support_fraction is None:
n_support = int(np.ceil(0.5 * (n_samples + n_features + 1)))
else:
n_support = int(support_fraction * n_samples)
# 1-dimensional case quick computation
# (Rousseeuw, P. J. and Leroy, A. M. (2005) References, in Robust
# Regression and Outlier Detection, John Wiley & Sons, chapter 4)
if n_features == 1:
if n_support < n_samples:
# find the sample shortest halves
X_sorted = np.sort(np.ravel(X))
diff = X_sorted[n_support:] - X_sorted[:(n_samples - n_support)]
halves_start = np.where(diff == np.min(diff))[0]
# take the middle points' mean to get the robust location estimate
location = 0.5 * (X_sorted[n_support + halves_start]
+ X_sorted[halves_start]).mean()
support = np.zeros(n_samples, dtype=bool)
X_centered = X - location
support[np.argsort(np.abs(X_centered), 0)[:n_support]] = True
covariance = np.asarray([[np.var(X[support])]])
location = np.array([location])
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
else:
support = np.ones(n_samples, dtype=bool)
covariance = np.asarray([[np.var(X)]])
location = np.asarray([np.mean(X)])
X_centered = X - location
# get precision matrix in an optimized way
precision = pinvh(covariance)
dist = (np.dot(X_centered, precision) * (X_centered)).sum(axis=1)
# Starting FastMCD algorithm for p-dimensional case
if (n_samples > 500) and (n_features > 1):
# 1. Find candidate supports on subsets
# a. split the set in subsets of size ~ 300
n_subsets = n_samples // 300
n_samples_subsets = n_samples // n_subsets
samples_shuffle = random_state.permutation(n_samples)
h_subset = int(np.ceil(n_samples_subsets *
(n_support / float(n_samples))))
# b. perform a total of 500 trials
n_trials_tot = 500
# c. select 10 best (location, covariance) for each subset
n_best_sub = 10
n_trials = max(10, n_trials_tot // n_subsets)
n_best_tot = n_subsets * n_best_sub
all_best_locations = np.zeros((n_best_tot, n_features))
try:
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
except MemoryError:
# The above is too big. Let's try with something much small
# (and less optimal)
all_best_covariances = np.zeros((n_best_tot, n_features,
n_features))
n_best_tot = 10
n_best_sub = 2
for i in range(n_subsets):
low_bound = i * n_samples_subsets
high_bound = low_bound + n_samples_subsets
current_subset = X[samples_shuffle[low_bound:high_bound]]
best_locations_sub, best_covariances_sub, _, _ = select_candidates(
current_subset, h_subset, n_trials,
select=n_best_sub, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
subset_slice = np.arange(i * n_best_sub, (i + 1) * n_best_sub)
all_best_locations[subset_slice] = best_locations_sub
all_best_covariances[subset_slice] = best_covariances_sub
# 2. Pool the candidate supports into a merged set
# (possibly the full dataset)
n_samples_merged = min(1500, n_samples)
h_merged = int(np.ceil(n_samples_merged *
(n_support / float(n_samples))))
if n_samples > 1500:
n_best_merged = 10
else:
n_best_merged = 1
# find the best couples (location, covariance) on the merged set
selection = random_state.permutation(n_samples)[:n_samples_merged]
locations_merged, covariances_merged, supports_merged, d = \
select_candidates(
X[selection], h_merged,
n_trials=(all_best_locations, all_best_covariances),
select=n_best_merged,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 3. Finally get the overall best (locations, covariance) couple
if n_samples < 1500:
# directly get the best couple (location, covariance)
location = locations_merged[0]
covariance = covariances_merged[0]
support = np.zeros(n_samples, dtype=bool)
dist = np.zeros(n_samples)
support[selection] = supports_merged[0]
dist[selection] = d[0]
else:
# select the best couple on the full dataset
locations_full, covariances_full, supports_full, d = \
select_candidates(
X, n_support,
n_trials=(locations_merged, covariances_merged),
select=1,
cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
elif n_features > 1:
# 1. Find the 10 best couples (location, covariance)
# considering two iterations
n_trials = 30
n_best = 10
locations_best, covariances_best, _, _ = select_candidates(
X, n_support, n_trials=n_trials, select=n_best, n_iter=2,
cov_computation_method=cov_computation_method,
random_state=random_state)
# 2. Select the best couple on the full dataset amongst the 10
locations_full, covariances_full, supports_full, d = select_candidates(
X, n_support, n_trials=(locations_best, covariances_best),
select=1, cov_computation_method=cov_computation_method,
random_state=random_state)
location = locations_full[0]
covariance = covariances_full[0]
support = supports_full[0]
dist = d[0]
return location, covariance, support, dist
class MinCovDet(EmpiricalCovariance):
"""Minimum Covariance Determinant (MCD): robust estimator of covariance.
The Minimum Covariance Determinant covariance estimator is to be applied
on Gaussian-distributed data, but could still be relevant on data
drawn from a unimodal, symmetric distribution. It is not meant to be used
with multi-modal data (the algorithm used to fit a MinCovDet object is
likely to fail in such a case).
One should consider projection pursuit methods to deal with multi-modal
datasets.
Parameters
----------
store_precision : bool
Specify if the estimated precision is stored.
assume_centered : Boolean
If True, the support of the robust location and the covariance
estimates is computed, and a covariance estimate is recomputed from
it, without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, 0 < support_fraction < 1
The proportion of points to be included in the support of the raw
MCD estimate. Default is None, which implies that the minimum
value of support_fraction will be used within the algorithm:
[n_sample + n_features + 1] / 2
random_state : integer or numpy.RandomState, optional
The random generator used. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
raw_location_ : array-like, shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : array-like, shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
location_ : array-like, shape (n_features,)
Estimated robust location
covariance_ : array-like, shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : array-like, shape (n_samples,)
A mask of the observations that have been used to compute
the robust estimates of location and shape.
dist_ : array-like, shape (n_samples,)
Mahalanobis distances of the training set (on which `fit` is called)
observations.
References
----------
.. [Rouseeuw1984] `P. J. Rousseeuw. Least median of squares regression.
J. Am Stat Ass, 79:871, 1984.`
.. [Rouseeuw1999] `A Fast Algorithm for the Minimum Covariance Determinant
Estimator, 1999, American Statistical Association and the American
Society for Quality, TECHNOMETRICS`
.. [Butler1993] `R. W. Butler, P. L. Davies and M. Jhun,
Asymptotics For The Minimum Covariance Determinant Estimator,
The Annals of Statistics, 1993, Vol. 21, No. 3, 1385-1400`
"""
_nonrobust_covariance = staticmethod(empirical_covariance)
def __init__(self, store_precision=True, assume_centered=False,
support_fraction=None, random_state=None):
self.store_precision = store_precision
self.assume_centered = assume_centered
self.support_fraction = support_fraction
self.random_state = random_state
def fit(self, X, y=None):
"""Fits a Minimum Covariance Determinant with the FastMCD algorithm.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
# check that the empirical covariance is full rank
if (linalg.svdvals(np.dot(X.T, X)) > 1e-8).sum() != n_features:
warnings.warn("The covariance matrix associated to your dataset "
"is not full rank")
# compute and store raw estimates
raw_location, raw_covariance, raw_support, raw_dist = fast_mcd(
X, support_fraction=self.support_fraction,
cov_computation_method=self._nonrobust_covariance,
random_state=random_state)
if self.assume_centered:
raw_location = np.zeros(n_features)
raw_covariance = self._nonrobust_covariance(X[raw_support],
assume_centered=True)
# get precision matrix in an optimized way
precision = pinvh(raw_covariance)
raw_dist = np.sum(np.dot(X, precision) * X, 1)
self.raw_location_ = raw_location
self.raw_covariance_ = raw_covariance
self.raw_support_ = raw_support
self.location_ = raw_location
self.support_ = raw_support
self.dist_ = raw_dist
# obtain consistency at normal models
self.correct_covariance(X)
# re-weight estimator
self.reweight_covariance(X)
return self
def correct_covariance(self, data):
"""Apply a correction to raw Minimum Covariance Determinant estimates.
Correction using the empirical correction factor suggested
by Rousseeuw and Van Driessen in [Rouseeuw1984]_.
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
covariance_corrected : array-like, shape (n_features, n_features)
Corrected robust covariance estimate.
"""
correction = np.median(self.dist_) / chi2(data.shape[1]).isf(0.5)
covariance_corrected = self.raw_covariance_ * correction
self.dist_ /= correction
return covariance_corrected
def reweight_covariance(self, data):
"""Re-weight raw Minimum Covariance Determinant estimates.
Re-weight observations using Rousseeuw's method (equivalent to
deleting outlying observations from the data set before
computing location and covariance estimates). [Rouseeuw1984]_
Parameters
----------
data : array-like, shape (n_samples, n_features)
The data matrix, with p features and n samples.
The data set must be the one which was used to compute
the raw estimates.
Returns
-------
location_reweighted : array-like, shape (n_features, )
Re-weighted robust location estimate.
covariance_reweighted : array-like, shape (n_features, n_features)
Re-weighted robust covariance estimate.
support_reweighted : array-like, type boolean, shape (n_samples,)
A mask of the observations that have been used to compute
the re-weighted robust location and covariance estimates.
"""
n_samples, n_features = data.shape
mask = self.dist_ < chi2(n_features).isf(0.025)
if self.assume_centered:
location_reweighted = np.zeros(n_features)
else:
location_reweighted = data[mask].mean(0)
covariance_reweighted = self._nonrobust_covariance(
data[mask], assume_centered=self.assume_centered)
support_reweighted = np.zeros(n_samples, dtype=bool)
support_reweighted[mask] = True
self._set_covariance(covariance_reweighted)
self.location_ = location_reweighted
self.support_ = support_reweighted
X_centered = data - self.location_
self.dist_ = np.sum(
np.dot(X_centered, self.get_precision()) * X_centered, 1)
return location_reweighted, covariance_reweighted, support_reweighted
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/textpath.py | 3 | 15516 | # -*- coding: utf-8 -*-
import urllib
from matplotlib.path import Path
import matplotlib.font_manager as font_manager
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING, LOAD_TARGET_LIGHT
from matplotlib.mathtext import MathTextParser
import matplotlib.dviread as dviread
import numpy as np
import warnings
class TextToPath(object):
"""
A class that convert a given text to a path using ttf fonts.
"""
FONT_SCALE = 100.
DPI = 72
def __init__(self):
"""
Initialization
"""
self.mathtext_parser = MathTextParser('path')
self.tex_font_map = None
from matplotlib.cbook import maxdict
self._ps_fontd = maxdict(50)
self._texmanager = None
self._adobe_standard_encoding = None
def _get_adobe_standard_encoding(self):
enc_name = dviread.find_tex_file('8a.enc')
enc = dviread.Encoding(enc_name)
return dict([(c, i) for i, c in enumerate(enc.encoding)])
def _get_font(self, prop):
"""
find a ttf font.
"""
fname = font_manager.findfont(prop)
font = FT2Font(str(fname))
font.set_size(self.FONT_SCALE, self.DPI)
return font
def _get_hinting_flag(self):
return LOAD_NO_HINTING
def _get_char_id(self, font, ccode):
"""
Return a unique id for the given font and character-code set.
"""
ps_name = font.get_sfnt()[(1,0,0,6)]
char_id = urllib.quote('%s-%x' % (ps_name, ccode))
return char_id
def _get_char_id_ps(self, font, ccode):
"""
Return a unique id for the given font and character-code set (for tex).
"""
ps_name = font.get_ps_font_info()[2]
char_id = urllib.quote('%s-%d' % (ps_name, ccode))
return char_id
def glyph_to_path(self, font, currx=0.):
"""
convert the ft2font glyph to vertices and codes.
"""
verts, codes = font.get_path()
if currx != 0.0:
verts[:,0] += currx
return verts, codes
def get_text_width_height_descent(self, s, prop, ismath):
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=None)
return w, h, d
fontsize = prop.get_size_in_points()
scale = float(fontsize) / self.FONT_SCALE
if ismath:
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, trash, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width * scale, height * scale, descent * scale
font = self._get_font(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
return w * scale, h * scale, d * scale
def get_text_path(self, prop, s, ismath=False, usetex=False):
"""
convert text *s* to path (a tuple of vertices and codes for matplotlib.math.Path).
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. Effective only if usetex == False.
"""
if usetex==False:
if ismath == False:
font = self._get_font(prop)
glyph_info, glyph_map, rects = self.get_glyphs_with_font(font, s)
else:
glyph_info, glyph_map, rects = self.get_glyphs_mathtext(prop, s)
else:
glyph_info, glyph_map, rects = self.get_glyphs_tex(prop, s)
verts, codes = [], []
for glyph_id, xposition, yposition, scale in glyph_info:
verts1, codes1 = glyph_map[glyph_id]
if len(verts1):
verts1 = np.array(verts1)*scale + [xposition, yposition]
verts.extend(verts1)
codes.extend(codes1)
for verts1, codes1 in rects:
verts.extend(verts1)
codes.extend(codes1)
return verts, codes
def get_glyphs_with_font(self, font, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes using the
provided ttf font.
"""
# Mostly copied from backend_svg.py.
cmap = font.get_charmap()
lastgind = None
currx = 0
xpositions = []
glyph_ids = []
if glyph_map is None:
glyph_map = dict()
if return_new_glyphs_only:
glyph_map_new = dict()
else:
glyph_map_new = glyph_map
# I'm not sure if I get kernings right. Needs to be verified. -JJL
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
gind = 0
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
horiz_advance = (glyph.linearHoriAdvance / 65536.0)
char_id = self._get_char_id(font, ccode)
if not char_id in glyph_map:
glyph_map_new[char_id] = self.glyph_to_path(font)
currx += (kern / 64.0)
xpositions.append(currx)
glyph_ids.append(char_id)
currx += horiz_advance
lastgind = gind
ypositions = [0] * len(xpositions)
sizes = [1.] * len(xpositions)
rects = []
return zip(glyph_ids, xpositions, ypositions, sizes), glyph_map_new, rects
def get_glyphs_mathtext(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes by parsing it with mathtext.
"""
prop = prop.copy()
prop.set_size(self.FONT_SCALE)
width, height, descent, glyphs, rects = self.mathtext_parser.parse(
s, self.DPI, prop)
if glyph_map is None:
glyph_map = dict()
if return_new_glyphs_only:
glyph_map_new = dict()
else:
glyph_map_new = glyph_map
xpositions = []
ypositions = []
glyph_ids = []
sizes = []
currx, curry = 0, 0
for font, fontsize, ccode, ox, oy in glyphs:
char_id = self._get_char_id(font, ccode)
if not char_id in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
glyph_map_new[char_id] = self.glyph_to_path(font)
xpositions.append(ox)
ypositions.append(oy)
glyph_ids.append(char_id)
size = fontsize / self.FONT_SCALE
sizes.append(size)
myrects = []
for ox, oy, w, h in rects:
vert1=[(ox, oy), (ox, oy+h), (ox+w, oy+h), (ox+w, oy), (ox, oy), (0,0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return zip(glyph_ids, xpositions, ypositions, sizes), glyph_map_new, myrects
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def get_glyphs_tex(self, prop, s, glyph_map=None,
return_new_glyphs_only=False):
"""
convert the string *s* to vertices and codes using matplotlib's usetex mode.
"""
# codes are modstly borrowed from pdf backend.
texmanager = self.get_texmanager()
if self.tex_font_map is None:
self.tex_font_map = dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
if self._adobe_standard_encoding is None:
self._adobe_standard_encoding = self._get_adobe_standard_encoding()
fontsize = prop.get_size_in_points()
if hasattr(texmanager, "get_dvi"): #
dvifilelike = texmanager.get_dvi(s, self.FONT_SCALE)
dvi = dviread.DviFromFileLike(dvifilelike, self.DPI)
else:
dvifile = texmanager.make_dvi(s, self.FONT_SCALE)
dvi = dviread.Dvi(dvifile, self.DPI)
page = iter(dvi).next()
dvi.close()
if glyph_map is None:
glyph_map = dict()
if return_new_glyphs_only:
glyph_map_new = dict()
else:
glyph_map_new = glyph_map
glyph_ids, xpositions, ypositions, sizes = [], [], [], []
# Gather font information and do some setup for combining
# characters into strings.
#oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
font_and_encoding = self._ps_fontd.get(dvifont.texname)
font_bunch = self.tex_font_map[dvifont.texname]
if font_and_encoding is None:
font = FT2Font(str(font_bunch.filename))
for charmap_name, charmap_code in [("ADOBE_CUSTOM", 1094992451),
("ADOBE_STANDARD", 1094995778)]:
try:
font.select_charmap(charmap_code)
except ValueError:
pass
else:
break
else:
charmap_name = ""
warnings.warn("No supported encoding in font (%s)." % font_bunch.filename)
if charmap_name == "ADOBE_STANDARD" and font_bunch.encoding:
enc0 = dviread.Encoding(font_bunch.encoding)
enc = dict([(i, self._adobe_standard_encoding.get(c, None)) \
for i, c in enumerate(enc0.encoding)])
else:
enc = dict()
self._ps_fontd[dvifont.texname] = font, enc
else:
font, enc = font_and_encoding
ft2font_flag = LOAD_TARGET_LIGHT
char_id = self._get_char_id_ps(font, glyph)
if not char_id in glyph_map:
font.clear()
font.set_size(self.FONT_SCALE, self.DPI)
if enc: charcode = enc.get(glyph, None)
else: charcode = glyph
if charcode:
glyph0 = font.load_char(charcode, flags=ft2font_flag)
else:
warnings.warn("The glyph (%d) of font (%s) cannot be converted with the encoding. Glyph may be wrong" % (glyph, font_bunch.filename))
glyph0 = font.load_char(glyph, flags=ft2font_flag)
glyph_map_new[char_id] = self.glyph_to_path(font)
glyph_ids.append(char_id)
xpositions.append(x1)
ypositions.append(y1)
sizes.append(dvifont.size/self.FONT_SCALE)
myrects = []
for ox, oy, h, w in page.boxes:
vert1=[(ox, oy), (ox+w, oy), (ox+w, oy+h), (ox, oy+h), (ox, oy), (0,0)]
code1 = [Path.MOVETO,
Path.LINETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
myrects.append((vert1, code1))
return zip(glyph_ids, xpositions, ypositions, sizes), \
glyph_map_new, myrects
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from matplotlib.transforms import Affine2D
text_to_path = TextToPath()
class TextPath(Path):
"""
Create a path from the text.
"""
def __init__(self, xy, s, size=None, prop=None,
_interpolation_steps=1, usetex=False,
*kl, **kwargs):
"""
Create a path from the text. No support for TeX yet. Note that
it simply is a path, not an artist. You need to use the
PathPatch (or other artists) to draw this path onto the
canvas.
xy : position of the text.
s : text
size : font size
prop : font property
"""
if prop is None:
prop = FontProperties()
if size is None:
size = prop.get_size_in_points()
self._xy = xy
self.set_size(size)
self._cached_vertices = None
self._vertices, self._codes = self.text_get_vertices_codes(prop, s, usetex=usetex)
self.should_simplify = False
self.simplify_threshold = rcParams['path.simplify_threshold']
self.has_nonfinite = False
self._interpolation_steps = _interpolation_steps
def set_size(self, size):
"""
set the size of the text
"""
self._size = size
self._invalid = True
def get_size(self):
"""
get the size of the text
"""
return self._size
def _get_vertices(self):
"""
Return the cached path after updating it if necessary.
"""
self._revalidate_path()
return self._cached_vertices
def _get_codes(self):
"""
Return the codes
"""
return self._codes
vertices = property(_get_vertices)
codes = property(_get_codes)
def _revalidate_path(self):
"""
update the path if necessary.
The path for the text is initially create with the font size
of FONT_SCALE, and this path is rescaled to other size when
necessary.
"""
if self._invalid or \
(self._cached_vertices is None):
tr = Affine2D().scale(self._size/text_to_path.FONT_SCALE,
self._size/text_to_path.FONT_SCALE).translate(*self._xy)
self._cached_vertices = tr.transform(self._vertices)
self._invalid = False
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# copied from Text.is_math_text -JJL
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def text_get_vertices_codes(self, prop, s, usetex):
"""
convert the string *s* to vertices and codes using the
provided font property *prop*. Mostly copied from
backend_svg.py.
"""
if usetex:
verts, codes = text_to_path.get_text_path(prop, s, usetex=True)
else:
clean_line, ismath = self.is_math_text(s)
verts, codes = text_to_path.get_text_path(prop, clean_line, ismath=ismath)
return verts, codes
| gpl-2.0 |
michaelgat/Udacity_DL | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
jensv/fluxtubestability | lambda_k_plotting.py | 1 | 24541 | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 16 00:30:53 2015
@author: jensv
Module for examining stability spaces.
"""
from __future__ import print_function, unicode_literals, division
from __future__ import absolute_import
from future import standard_library, utils
from future.builtins import (ascii, bytes, chr, dict, filter, hex, input,
int, map, next, oct, open, pow, range, round,
str, super, zip)
import numpy as np
from scipy.special import kv, kvp
import analytic_condition as ac
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from matplotlib.colors import SymLogNorm, BoundaryNorm
from matplotlib.ticker import FormatStrFormatter, FixedFormatter
import matplotlib.patches as patches
import matplotlib.ticker as ticker
import seaborn as sns
sns.set_style('white')
sns.set_context('poster')
def plot_lambda_k_space_dw(filename, epsilon, name, mode_to_plot='m_neg_1',
show_points=False, lim=None, levels=None, log=True,
linthresh=1E-7, bounds=(1.5, 3.0), norm=True,
analytic_compare=False,
label_pos=((0.5, 0.4), (2.1, 0.4), (2.8, 0.2)),
delta_values=[-1,0,1],
interpolate=False,
cmap=None, hatch=False,
figsize=None,
save_as=None,
return_ax=False,
axes=None):
r"""
Plot the delta_w of external instabilities in the lambda-k space.
"""
if figsize:
fig = plt.figure(figsize=figsize)
epsilon_case = np.load(filename)
lambda_a_mesh = epsilon_case['lambda_a_mesh']
k_a_mesh = epsilon_case['k_a_mesh']
external_m_neg_1 = epsilon_case['d_w_m_neg_1']
external_sausage = epsilon_case['d_w_m_0']
epsilon_case.close()
instability_map = {'m_0': external_sausage,
'm_neg_1': external_m_neg_1}
kink_pal = sns.blend_palette([sns.xkcd_rgb["dandelion"],
sns.xkcd_rgb["white"]], 7, as_cmap=True)
kink_pal = sns.diverging_palette(73, 182, s=72, l=85, sep=1, n=9, as_cmap=True)
sausage_pal = sns.blend_palette(['orange', 'white'], 7, as_cmap=True)
sausage_pal = sns.diverging_palette(49, 181, s=99, l=78, sep=1, n=9, as_cmap=True)
if cmap:
instability_palette = {'m_0': cmap,
'm_neg_1': cmap}
else:
instability_palette = {'m_0': sausage_pal,
'm_neg_1': kink_pal}
if interpolate:
instability_map['m_neg_1'] = interpolate_nans(lambda_a_mesh,
k_a_mesh,
instability_map['m_neg_1']
)
values = instability_map[mode_to_plot]
if norm:
values = values / np.nanmax(np.abs(values))
else:
values = values
if levels:
if log:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
levels=levels, norm=SymLogNorm(linthresh))
cbar = plt.colorbar(label=r'$\delta W$')
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh,
values, levels=levels,
colors='grey',
norm=SymLogNorm(linthresh))
else:
norm = BoundaryNorm(levels, 256)
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
levels=levels, norm=norm)
cbar = plt.colorbar(label=r'$\delta W$')
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh,
values, levels=levels,
colors='grey')
else:
if log:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
norm=SymLogNorm(linthresh))
cbar = plt.colorbar(label=r'$\delta W$')
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh,
values, colors='grey',
norm=SymLogNorm(linthresh))
else:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot])
cbar = plt.colorbar(label=r'$\delta W$')
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh,
values, colors='grey')
if lim:
plot.set_clim(lim)
cbar.add_lines(contourlines)
plt.plot([0.01, 0.1, 1.0, 2.0, 3.0],
[0.005, 0.05, 0.5, 1.0, 1.5], color='black')
axes = plt.gca()
axes.set_axis_bgcolor(sns.xkcd_rgb['white'])
lambda_bar_analytic = np.linspace(0.01, 3., 750)
k_bar_analytic = np.linspace(0.01, 1.5, 750)
(lambda_bar_mesh_analytic,
k_bar_mesh_analytic) = np.meshgrid(lambda_bar_analytic, k_bar_analytic)
if analytic_compare:
analytic_comparison(mode_to_plot, k_bar_mesh_analytic,
lambda_bar_mesh_analytic, epsilon, label_pos)
if show_points:
plt.scatter(lambda_a_mesh, k_a_mesh, marker='o', c='b', s=5)
plt.ylim(0.01, bounds[0])
plt.xlim(0.01, bounds[1])
axes = plt.gca()
axes.set_xticks(np.arange(0., 4.5, 1.))
axes.set_yticks(np.arange(0., 2.0, 0.5))
plt.setp(axes.get_xticklabels(), fontsize=30)
plt.setp(axes.get_yticklabels(), fontsize=30)
plt.ylabel(r'$\bar{k}$', fontsize=40, rotation='horizontal', labelpad=30)
plt.xlabel(r'$\bar{\lambda}$', fontsize=40)
cbar.ax.tick_params(labelsize=30)
def my_formatter_fun(x):
if x == 0:
return r'$0$'
if np.sign(x) > 0:
return r'$10^{%i}$' % np.int(np.log10(x))
else:
return r'$-10^{%i}$' % np.int(np.log10(np.abs(x)))
labels = [my_formatter_fun(level) for level in levels]
cbar.ax.set_yticklabels(labels)
sns.despine(ax=axes)
if hatch:
xmin, xmax = axes.get_xlim()
ymin, ymax = axes.get_ylim()
xy = (xmin,ymin)
width = xmax - xmin
height = ymax - ymin
p = patches.Rectangle(xy, width, height, hatch='+', fill=None, zorder=-10)
axes.add_patch(p)
plt.tight_layout()
if return_ax:
return axes, cbar
else:
plt.savefig('../../output/plots/' + name + '.png')
if save_as:
plt.savefig(save_as)
plt.show()
def interpolate_nans(lambda_a, k_a, quantity):
r"""
Return mesh with nans interpolated from neighboring values.
"""
index_to_keep = np.isnan(quantity.ravel())
interp_values = quantity.ravel()[~index_to_keep]
interp_k = k_a.ravel()[~index_to_keep]
interp_lambda = lambda_a.ravel()[~index_to_keep]
return griddata((interp_lambda, interp_k),
interp_values,
(lambda_a, k_a),
method='linear')
def plot_dW_given_delta(filename, epsilon, name, mode_to_plot='m_neg_1',
show_points=False, lim=None, levels=None, log=False,
linthresh=1E-7, bounds=(1.5, 3.0), floor_norm=False,
analytic_compare=False,
label_pos=((0.5, 0.4), (2.1, 0.4), (2.8, 0.2)),
delta_values=[-1,0,1],
interpolate=False, with_interface=False):
r"""
Plot the delta_w of external instabilities in the lambda-k space.
"""
epsilon_case = np.load(filename)
lambda_a_mesh = epsilon_case['lambda_a_mesh']
k_a_mesh = epsilon_case['k_a_mesh']
delta_mesh_sausage = epsilon_case['delta_m_0']
delta_mesh_kink = epsilon_case['delta_m_neg_1']
epsilon_case.close()
if with_interface:
external_sausage_norm = ac.conditions(k_a_mesh, lambda_a_mesh, epsilon,
0, delta_mesh_sausage)
external_m_neg_1_norm = ac.conditions(k_a_mesh, lambda_a_mesh, epsilon,
1, delta_mesh_kink)
else:
external_sausage_norm = ac.conditions_without_interface(k_a_mesh,
lambda_a_mesh,
epsilon,
0,
delta_mesh_sausage)
external_m_neg_1_norm = ac.conditions_without_interface(k_a_mesh,
lambda_a_mesh,
epsilon,
1,
delta_mesh_kink)
instability_map = {'m_0': external_sausage_norm,
'm_neg_1': external_m_neg_1_norm}
kink_pal = sns.blend_palette([sns.xkcd_rgb["dandelion"],
sns.xkcd_rgb["white"]], 7, as_cmap=True)
kink_pal = sns.diverging_palette(73, 182, s=72, l=85, sep=1, n=9, as_cmap=True)
sausage_pal = sns.blend_palette(['orange', 'white'], 7, as_cmap=True)
sausage_pal = sns.diverging_palette(49, 181, s=99, l=78, sep=1, n=9, as_cmap=True)
instability_palette = {'m_0': sausage_pal,
'm_neg_1': kink_pal}
if interpolate:
instability_map['m_neg_1'] = interpolate_nans(lambda_a_mesh,
k_a_mesh,
instability_map['m_neg_1']
)
values = instability_map[mode_to_plot]
if floor_norm:
values = np.clip(values, -100., 100.)
values = values / -np.nanmin(values)
values = np.clip(values, -1., 1.)
else:
values = values / -np.nanmin(values)
if levels:
if log:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
levels=levels, norm=SymLogNorm(linthresh))
else:
norm = BoundaryNorm(levels, 256)
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
levels=levels, norm=norm)
cbar = plt.colorbar(label=r'$\delta W$',
format=FormatStrFormatter('%.0e'))
cbar.set_label(label=r'$\delta W$', size=45, rotation=0, labelpad=30)
contourlines = plt.contour(lambda_a_mesh, k_a_mesh, values,
levels=levels[:-1], colors='grey')
cbar.add_lines(contourlines)
else:
if log:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot],
norm=SymLogNorm(linthresh))
else:
plot = plt.contourf(lambda_a_mesh, k_a_mesh, values,
cmap=instability_palette[mode_to_plot])
if lim:
plot.set_clim(lim)
plt.plot([0.01, 0.1, 1.0, 2.0, 3.0],
[0.005, 0.05, 0.5, 1.0, 1.5], color='black')
axes = plt.gca()
axes.set_axis_bgcolor(sns.xkcd_rgb['grey'])
lambda_bar = np.linspace(0.01, 3., 750)
k_bar = np.linspace(0.01, 1.5, 750)
lambda_bar_mesh, k_bar_mesh = np.meshgrid(lambda_bar, k_bar)
if analytic_compare:
analytic_comparison(mode_to_plot, k_bar_mesh, lambda_bar_mesh, epsilon,
label_pos)
if show_points:
plt.scatter(lambda_a_mesh, k_a_mesh, marker='o', c='b', s=5)
plt.ylim(0.01, bounds[0])
plt.xlim(0.01, bounds[1])
axes = plt.gca()
plt.setp(axes.get_xticklabels(), fontsize=40)
plt.setp(axes.get_yticklabels(), fontsize=40)
plt.ylabel(r'$\bar{k}$', fontsize=45, rotation='horizontal', labelpad=30)
plt.xlabel(r'$\bar{\lambda}$', fontsize=45)
cbar.ax.tick_params(labelsize=40)
sns.despine(ax=axes)
plt.tight_layout()
plt.savefig('../../output/plots/' + name + '.png')
plt.show()
def analytic_comparison_flex(mode_to_plot, k_bar_mesh, lambda_bar_mesh, epsilon,
delta_values, label_pos):
r"""
Add red lines indicating stability boundaries from analytical model.
"""
line_labels = FixedFormatter(delta_values)
assert (mode_to_plot == 'm_neg_1' or
mode_to_plot == 'm_0'), ("Please specify mode_to_plot as either" +
"m_neg_1 or m_0")
if mode_to_plot == 'm_neg_1':
m = 1
color = 'red'
if mode_to_plot == 'm_0':
m = 0
color = 'red'
stability_kink_given_delta = []
for delta in delta_values:
stability_kink_given_delta.append(ac.conditions(k_bar_mesh,
lambda_bar_mesh,
epsilon,
m,
delta))
stability_kink = stability_kink_given_delta[0] < 0
stability_kink = stability_kink.astype(float)
stability_kink[stability_kink_given_delta[0] >= 0] = -1.5
stability_kink[stability_kink_given_delta[0] < 0] = -0.5
value = 0.5
for i in range(len(delta_values[1:])):
stability_kink[stability_kink_given_delta[i] < 0] = value
value += 1.
levels = np.array(range(len(delta_values))) - 1
cs = plt.contour(lambda_bar_mesh, k_bar_mesh, stability_kink,
levels=levels, colors=color, linewidths=5,
linestyles='dotted')
line_labels = {}
for i, level in enumerate(levels):
line_labels.update({level: r'$\delta = $ %2.1f' % (delta_values[i])})
print(levels, value, line_labels)
plt.clabel(cs, fmt=line_labels, fontsize=40, manual=label_pos)
return cs
def single_analytic_comparison(mode_to_plot,
k_bar_mesh,
lambda_bar_mesh,
epsilon,
delta_value,
label_pos):
"""
Add contour of analytic stability condition.
"""
line_labels = FixedFormatter(delta_value)
assert (mode_to_plot == 'm_neg_1' or
mode_to_plot == 'm_0'), ("Please specify mode_to_plot as either" +
"m_neg_1 or m_0")
if mode_to_plot == 'm_neg_1':
m = 1
color = 'red'
if mode_to_plot == 'm_0':
m = 0
color = 'red'
stability_kink_given_delta = []
stability_kink_given_delta.append(ac.conditions(k_bar_mesh,
lambda_bar_mesh,
epsilon,
m,
delta_value))
stability_kink = stability_kink_given_delta[0] < 0
stability_kink = stability_kink.astype(float)
stability_kink[stability_kink_given_delta[0] >= 0] = -1.5
stability_kink[stability_kink_given_delta[0] < 0] = -0.5
levels = [-1]
cs = plt.contour(lambda_bar_mesh, k_bar_mesh, stability_kink,
levels=levels, colors=color, linewidths=5,
linestyles='dotted')
line_labels = {}
#plt.clabel(cs, fmt={-1: '%2.1f' % delta_value}, fontsize=40, manual=label_pos)
return cs
def analytic_comparison(mode_to_plot, k_bar_mesh, lambda_bar_mesh, epsilon,
label_pos, lines=None, colors=None):
r"""
Add red lines indicating stability boundaries from analytical model.
"""
if not lines:
line_labels = FixedFormatter(['-1', '0', '1'])
else:
line_labels = FixedFormatter([str(line) for line in lines])
assert (mode_to_plot == 'm_neg_1' or
mode_to_plot == 'm_0'), ("Please specify mode_to_plot as either" +
"m_neg_1 or m_0")
if mode_to_plot == 'm_neg_1':
m = 1
if not colors:
color = 'red'
else:
color = colors
if mode_to_plot == 'm_0':
m = 0
if not colors:
color = 'red'
else:
color = colors
if not lines:
stability_kink_m_neg_1 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, -1.)
stability_kink_m_0 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, 0.)
stability_kink_m_1 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, 1)
else:
stability_kink_m_neg_1 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, lines[0])
stability_kink_m_0 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, lines[1])
stability_kink_m_1 = ac.conditions(k_bar_mesh, lambda_bar_mesh,
epsilon, m, lines[2])
stability_kink = stability_kink_m_neg_1 < 0
stability_kink = stability_kink.astype(float)
stability_kink[stability_kink_m_neg_1 >= 0] = -1.5
stability_kink[stability_kink_m_neg_1 < 0] = -0.5
stability_kink[stability_kink_m_0 < 0] = 0.5
stability_kink[stability_kink_m_1 < 0] = 1.5
cs = plt.contour(lambda_bar_mesh, k_bar_mesh, stability_kink,
levels=[-1, 0, 1], colors=color, linewidths=10,
linestyles='dotted')
if not lines:
plt.clabel(cs, fmt={-1: r'$\delta = -1$', 0: r'$\delta = 0$',
1: r'$\delta = 1$'}, fontsize=40, manual=label_pos)
else:
plt.clabel(cs, fmt={-1: r'$\delta =$ %' % lines[0], 0: r'$\delta =$ %' % lines[1],
1: r'$\delta =$ %' % lines[2]}, fontsize=40, manual=label_pos)
return cs
def plot_lambda_k_space_delta(filename, mode_to_plot,
clip=False, delta_min=-1.5,
delta_max=1., levels=None,
interpolate=True, compare_analytic=False,
epsilon=None, analytic_label_pos=None, lines=None,
plot_numeric_boundary=False, cmap=None, analytic_color=None):
r"""
Plot values of delta in lambda k space.
"""
data_meshes = np.load(filename)
lambda_mesh = data_meshes['lambda_a_mesh']
k_mesh = data_meshes['k_a_mesh']
if mode_to_plot == 0:
color = 'green'
delta_mesh = data_meshes['delta_m_0']
external_sausage = data_meshes['d_w_m_0']
else:
#color = sns.xkcd_rgb["dandelion"]
color = 'green'
delta_mesh = data_meshes['delta_m_neg_1']
external_kink = data_meshes['d_w_m_neg_1']
if interpolate:
delta_mesh = interpolate_nans(lambda_mesh,
k_mesh,
delta_mesh)
if clip:
delta_mesh = np.clip(delta_mesh, delta_min, delta_max)
if cmap:
colors = cmap
else:
colors = sns.light_palette(color, n_colors=6, reverse=True,
as_cmap=True)
if levels:
plt.contourf(lambda_mesh, k_mesh, delta_mesh, cmap=colors,
levels=levels)
else:
plt.contourf(lambda_mesh, k_mesh, delta_mesh, cmap=colors)
cbar = plt.colorbar(label=r'$\delta$')
cbar.set_label(label=r'$\delta(\bar{\lambda},\bar{k})$', size=45, rotation=0, labelpad=30)
if levels:
contourlines = plt.contour(lambda_mesh, k_mesh, delta_mesh,
colors='grey', levels=levels)
else:
contourlines = plt.contour(lambda_mesh, k_mesh, delta_mesh,
colors='grey')
cbar.add_lines(contourlines)
if mode_to_plot == 0:
mode_to_plot = 'm_0'
else:
mode_to_plot = 'm_neg_1'
if compare_analytic:
if analytic_color:
analytic_comparison(mode_to_plot, k_mesh, lambda_mesh, epsilon,
analytic_label_pos, lines=lines, colors=analytic_color)
else:
analytic_comparison(mode_to_plot, k_mesh, lambda_mesh, epsilon,
analytic_label_pos, lines=lines)
if plot_numeric_boundary:
contour = plt.contour(lambda_mesh,
k_mesh,
external_sausage,
levels=[0],
colors='grey',
linestyles='-.')
axes = plt.gca()
axes.set_axis_bgcolor(sns.xkcd_rgb['grey'])
plt.setp(axes.get_xticklabels(), fontsize=40)
plt.setp(axes.get_yticklabels(), fontsize=40)
plt.ylabel(r'$\bar{k}$', fontsize=45, rotation='horizontal', labelpad=30)
plt.xlabel(r'$\bar{\lambda}$', fontsize=45)
cbar.ax.tick_params(labelsize=40)
axes.set_xticks(np.arange(0., 5, 1.))
axes.set_yticks(np.arange(0., 2.0, 0.5))
plt.ylim(0.01, 1.5)
plt.xlim(0.01, 3.0)
sns.despine(ax=axes)
plt.tight_layout()
def sausage_kink_ratio(filename, xy_limits=None, cmap=None, save_as=None,
levels=None, return_ax=False):
r"""
Plot ratio of sausage and kink potential energies.
"""
meshes = np.load(filename)
lambda_bar_mesh = meshes['lambda_a_mesh']
k_bar_mesh = meshes['k_a_mesh']
external_m_neg_1 = meshes['d_w_m_neg_1']
external_sausage = meshes['d_w_m_0']
meshes.close()
sausage_stable_region = np.invert((external_sausage < 0))
ratio = np.abs(external_sausage / external_m_neg_1)
ratio[sausage_stable_region] = np.nan
ratio_log = np.log10(ratio)
if not cmap:
cmap = sns.light_palette(sns.xkcd_rgb['red orange'],
as_cmap=True)
if levels:
contours = plt.contourf(lambda_bar_mesh, k_bar_mesh,
ratio_log, cmap=cmap, levels=levels)
else:
contours = plt.contourf(lambda_bar_mesh, k_bar_mesh,
ratio_log, cmap=cmap)
colorbar = plt.colorbar(format=FormatStrFormatter(r'$10^{%i}$'))
colorbar.set_label(r'$\frac{\delta W_{m=0}}{\delta W_{m=-1}}$',
size=35, rotation=0, labelpad=50)
if levels:
lines = plt.contour(lambda_bar_mesh, k_bar_mesh,
ratio_log, colors='grey', levels=levels)
else:
lines = plt.contour(lambda_bar_mesh, k_bar_mesh,
ratio_log, colors='grey')
colorbar.add_lines(lines)
axes = plt.gca()
axes.plot([0, 3.], [0., 1.5], '--', c='black', lw=5)
axes.set_xlabel(r'$\bar{\lambda}$', fontsize=40)
plt.setp(axes.get_xticklabels(), fontsize=30)
axes.set_xticks(np.arange(0., 4.5, 0.5))
axes.set_ylabel(r'$\bar{k}$', fontsize=40)
plt.setp(axes.get_yticklabels(), fontsize=30)
axes.set_yticks(np.arange(0., 2.0, 0.5))
if xy_limits:
axes.set_ylim((xy_limits[0], xy_limits[1]))
axes.set_xlim((xy_limits[2], xy_limits[3]))
sns.despine()
colorbar.ax.yaxis.set_ticks_position('right')
colorbar.ax.tick_params(labelsize=30)
plt.tight_layout()
if return_ax:
return axes, colobar
else:
if save_as:
plt.savefig(save_as)
plt.show()
| mit |
jonasjberg/autonameow | autonameow/vendor/dateutil/parser/_parser.py | 8 | 57607 | # -*- coding: utf-8 -*-
"""
This module offers a generic date/time string parser which is able to parse
most known formats to represent a date and/or time.
This module attempts to be forgiving with regards to unlikely input formats,
returning a datetime object even for dates which are ambiguous. If an element
of a date/time stamp is omitted, the following rules are applied:
- If AM or PM is left unspecified, a 24-hour clock is assumed, however, an hour
on a 12-hour clock (``0 <= hour <= 12``) *must* be specified if AM or PM is
specified.
- If a time zone is omitted, a timezone-naive datetime is returned.
If any other elements are missing, they are taken from the
:class:`datetime.datetime` object passed to the parameter ``default``. If this
results in a day number exceeding the valid number of days per month, the
value falls back to the end of the month.
Additional resources about date/time string formats can be found below:
- `A summary of the international standard date and time notation
<http://www.cl.cam.ac.uk/~mgk25/iso-time.html>`_
- `W3C Date and Time Formats <http://www.w3.org/TR/NOTE-datetime>`_
- `Time Formats (Planetary Rings Node) <https://pds-rings.seti.org:443/tools/time_formats.html>`_
- `CPAN ParseDate module
<http://search.cpan.org/~muir/Time-modules-2013.0912/lib/Time/ParseDate.pm>`_
- `Java SimpleDateFormat Class
<https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html>`_
"""
from __future__ import unicode_literals
import datetime
import re
import string
import time
import warnings
from calendar import monthrange
from io import StringIO
import six
from six import binary_type, integer_types, text_type
from decimal import Decimal
from warnings import warn
from .. import relativedelta
from .. import tz
__all__ = ["parse", "parserinfo"]
# TODO: pandas.core.tools.datetimes imports this explicitly. Might be worth
# making public and/or figuring out if there is something we can
# take off their plate.
class _timelex(object):
# Fractional seconds are sometimes split by a comma
_split_decimal = re.compile("([.,])")
def __init__(self, instream):
if six.PY2:
# In Python 2, we can't duck type properly because unicode has
# a 'decode' function, and we'd be double-decoding
if isinstance(instream, (binary_type, bytearray)):
instream = instream.decode()
else:
if getattr(instream, 'decode', None) is not None:
instream = instream.decode()
if isinstance(instream, text_type):
instream = StringIO(instream)
elif getattr(instream, 'read', None) is None:
raise TypeError('Parser must be a string or character stream, not '
'{itype}'.format(itype=instream.__class__.__name__))
self.instream = instream
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
"""
This function breaks the time string into lexical units (tokens), which
can be parsed by the parser. Lexical units are demarcated by changes in
the character set, so any continuous string of letters is considered
one unit, any continuous string of numbers is considered one unit.
The main complication arises from the fact that dots ('.') can be used
both as separators (e.g. "Sep.20.2009") or decimal points (e.g.
"4:30:21.447"). As such, it is necessary to read the full context of
any dot-separated strings before breaking it into tokens; as such, this
function maintains a "token stack", for when the ambiguous context
demands that multiple tokens be parsed at once.
"""
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
while not self.eof:
# We only realize that we've reached the end of a token when we
# find a character that's not part of the current token - since
# that character may be part of the next token, it's stored in the
# charstack.
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
# First character of the token - determines if we're starting
# to parse a word, a number or something else.
token = nextchar
if self.isword(nextchar):
state = 'a'
elif self.isnum(nextchar):
state = '0'
elif self.isspace(nextchar):
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
# If we've already started reading a word, we keep reading
# letters until we find something that's not part of a word.
seenletters = True
if self.isword(nextchar):
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
# If we've already started reading a number, we keep reading
# numbers until we find something that doesn't fit.
if self.isnum(nextchar):
token += nextchar
elif nextchar == '.' or (nextchar == ',' and len(token) >= 2):
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
# If we've seen some letters and a dot separator, continue
# parsing, and the tokens will be broken up later.
seenletters = True
if nextchar == '.' or self.isword(nextchar):
token += nextchar
elif self.isnum(nextchar) and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
# If we've seen at least one dot separator, keep going, we'll
# break up the tokens later.
if nextchar == '.' or self.isnum(nextchar):
token += nextchar
elif self.isword(nextchar) and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and (seenletters or token.count('.') > 1 or
token[-1] in '.,')):
l = self._split_decimal.split(token)
token = l[0]
for tok in l[1:]:
if tok:
self.tokenstack.append(tok)
if state == '0.' and token.count('.') == 0:
token = token.replace(',', '.')
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
@classmethod
def split(cls, s):
return list(cls(s))
@classmethod
def isword(cls, nextchar):
""" Whether or not the next character is part of a word """
return nextchar.isalpha()
@classmethod
def isnum(cls, nextchar):
""" Whether the next character is part of a number """
return nextchar.isdigit()
@classmethod
def isspace(cls, nextchar):
""" Whether the next character is whitespace """
return nextchar.isspace()
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __len__(self):
return (sum(getattr(self, attr) is not None
for attr in self.__slots__))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
"""
Class which handles what inputs are accepted. Subclass this to customize
the language and acceptable values for each parameter.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. Default is ``False``.
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
Default is ``False``.
"""
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"), # TODO: "Tues"
("Wed", "Wednesday"),
("Thu", "Thursday"), # TODO: "Thurs"
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"), # TODO: "Febr"
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
# TODO: ERA = ["AD", "BC", "CE", "BCE", "Stardate",
# "Anno Domini", "Year of Our Lord"]
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year // 100 * 100
def _convert(self, lst):
dct = {}
for i, v in enumerate(lst):
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
try:
return self._months[name.lower()] + 1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year, century_specified=False):
"""
Converts two-digit years to year within [-50, 49]
range of self._year (current local time)
"""
# Function contract is that the year is always positive
assert year >= 0
if year < 100 and not century_specified:
# assume current century to start
year += self._century
if year >= self._year + 50: # if too far in future
year -= 100
elif year < self._year - 50: # if too far in past
year += 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year, res.century_specified)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class _ymd(list):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.century_specified = False
self.dstridx = None
self.mstridx = None
self.ystridx = None
@property
def has_year(self):
return self.ystridx is not None
@property
def has_month(self):
return self.mstridx is not None
@property
def has_day(self):
return self.dstridx is not None
def could_be_day(self, value):
if self.has_day:
return False
elif not self.has_month:
return 1 <= value <= 31
elif not self.has_year:
# Be permissive, assume leapyear
month = self[self.mstridx]
return 1 <= value <= monthrange(2000, month)[1]
else:
month = self[self.mstridx]
year = self[self.ystridx]
return 1 <= value <= monthrange(year, month)[1]
def append(self, val, label=None):
if hasattr(val, '__len__'):
if val.isdigit() and len(val) > 2:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
elif val > 100:
self.century_specified = True
if label not in [None, 'Y']: # pragma: no cover
raise ValueError(label)
label = 'Y'
super(self.__class__, self).append(int(val))
if label == 'M':
if self.has_month:
raise ValueError('Month is already set')
self.mstridx = len(self) - 1
elif label == 'D':
if self.has_day:
raise ValueError('Day is already set')
self.dstridx = len(self) - 1
elif label == 'Y':
if self.has_year:
raise ValueError('Year is already set')
self.ystridx = len(self) - 1
def _resolve_from_stridxs(self, strids):
"""
Try to resolve the identities of year/month/day elements using
ystridx, mstridx, and dstridx, if enough of these are specified.
"""
if len(self) == 3 and len(strids) == 2:
# we can back out the remaining stridx value
missing = [x for x in range(3) if x not in strids.values()]
key = [x for x in ['y', 'm', 'd'] if x not in strids]
assert len(missing) == len(key) == 1
key = key[0]
val = missing[0]
strids[key] = val
assert len(self) == len(strids) # otherwise this should not be called
out = {key: self[strids[key]] for key in strids}
return (out.get('y'), out.get('m'), out.get('d'))
def resolve_ymd(self, yearfirst, dayfirst):
len_ymd = len(self)
year, month, day = (None, None, None)
strids = (('y', self.ystridx),
('m', self.mstridx),
('d', self.dstridx))
strids = {key: val for key, val in strids if val is not None}
if (len(self) == len(strids) > 0 or
(len(self) == 3 and len(strids) == 2)):
return self._resolve_from_stridxs(strids)
mstridx = self.mstridx
if len_ymd > 3:
raise ValueError("More than three YMD values")
elif len_ymd == 1 or (mstridx is not None and len_ymd == 2):
# One member, or two members with a month string
if mstridx is not None:
month = self[mstridx]
# since mstridx is 0 or 1, self[mstridx-1] always
# looks up the other element
other = self[mstridx - 1]
else:
other = self[0]
if len_ymd > 1 or mstridx is None:
if other > 31:
year = other
else:
day = other
elif len_ymd == 2:
# Two members with numbers
if self[0] > 31:
# 99-01
year, month = self
elif self[1] > 31:
# 01-99
month, year = self
elif dayfirst and self[1] <= 12:
# 13-01
day, month = self
else:
# 01-13
month, day = self
elif len_ymd == 3:
# Three members
if mstridx == 0:
if self[1] > 31:
# Apr-2003-25
month, year, day = self
else:
month, day, year = self
elif mstridx == 1:
if self[0] > 31 or (yearfirst and self[2] <= 31):
# 99-Jan-01
year, month, day = self
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
day, month, year = self
elif mstridx == 2:
# WTF!?
if self[1] > 31:
# 01-99-Jan
day, year, month = self
else:
# 99-01-Jan
year, day, month = self
else:
if (self[0] > 31 or
self.ystridx == 0 or
(yearfirst and self[1] <= 12 and self[2] <= 31)):
# 99-01-01
if dayfirst and self[2] <= 12:
year, day, month = self
else:
year, month, day = self
elif self[0] > 12 or (dayfirst and self[1] <= 12):
# 13-01-01
day, month, year = self
else:
# 01-13-01
month, day, year = self
return year, month, day
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None, **kwargs):
"""
Parse the date/time string into a :class:`datetime.datetime` object.
:param timestr:
Any date/time string using the supported formats.
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a
naive :class:`datetime.datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param \\*\\*kwargs:
Keyword arguments as passed to ``_parse()``.
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises TypeError:
Raised for non-string or character stream input.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if default is None:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res, skipped_tokens = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("Unknown string format:", timestr)
if len(res) == 0:
raise ValueError("String does not contain a date:", timestr)
ret = self._build_naive(res, default)
if not ignoretz:
ret = self._build_tzaware(ret, res, tzinfos)
if kwargs.get('fuzzy_with_tokens', False):
return ret, skipped_tokens
else:
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset", "ampm","any_unused_tokens"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False,
fuzzy_with_tokens=False):
"""
Private method which performs the heavy lifting of parsing, called from
``parse()``, which passes on its ``kwargs`` to this function.
:param timestr:
The string to parse.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM
and YMD. If set to ``None``, this value is retrieved from the
current :class:`parserinfo` object (which itself defaults to
``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken
to be the year, otherwise the last number is taken to be the year.
If this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
"""
if fuzzy_with_tokens:
fuzzy = True
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr) # Splits the timestr into tokens
skipped_idxs = []
# year/month/day list
ymd = _ymd()
len_l = len(l)
i = 0
try:
while i < len_l:
# Check if it's a number
value_repr = l[i]
try:
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Numeric token
i = self._parse_numeric_token(l, i, info, ymd, res, fuzzy)
# Check weekday
elif info.weekday(l[i]) is not None:
value = info.weekday(l[i])
res.weekday = value
# Check month name
elif info.month(l[i]) is not None:
value = info.month(l[i])
ymd.append(value, 'M')
if i + 1 < len_l:
if l[i + 1] in ('-', '/'):
# Jan-01[-99]
sep = l[i + 1]
ymd.append(l[i + 2])
if i + 3 < len_l and l[i + 3] == sep:
# Jan-01-99
ymd.append(l[i + 4])
i += 2
i += 2
elif (i + 4 < len_l and l[i + 1] == l[i + 3] == ' ' and
info.pertain(l[i + 2])):
# Jan of 01
# In this case, 01 is clearly year
if l[i + 4].isdigit():
# Convert it here to become unambiguous
value = int(l[i + 4])
year = str(info.convertyear(value))
ymd.append(year, 'Y')
else:
# Wrong guess
pass
# TODO: not hit in tests
i += 4
# Check am/pm
elif info.ampm(l[i]) is not None:
value = info.ampm(l[i])
val_is_ampm = self._ampm_valid(res.hour, res.ampm, fuzzy)
if val_is_ampm:
res.hour = self._adjust_ampm(res.hour, value)
res.ampm = value
elif fuzzy:
skipped_idxs.append(i)
# Check for a timezone name
elif self._could_be_tzname(res.hour, res.tzname, res.tzoffset, l[i]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i + 1 < len_l and l[i + 1] in ('+', '-'):
l[i + 1] = ('+', '-')[l[i + 1] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
# Check for a numbered timezone
elif res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
len_li = len(l[i + 1])
# TODO: check that l[i + 1] is integer?
if len_li == 4:
# -0300
hour_offset = int(l[i + 1][:2])
min_offset = int(l[i + 1][2:])
elif i + 2 < len_l and l[i + 2] == ':':
# -03:00
hour_offset = int(l[i + 1])
min_offset = int(l[i + 3]) # TODO: Check that l[i+3] is minute-like?
i += 2
elif len_li <= 2:
# -[0]3
hour_offset = int(l[i + 1][:2])
min_offset = 0
else:
raise ValueError(timestr)
res.tzoffset = signal * (hour_offset * 3600 + min_offset * 60)
# Look for a timezone name between parenthesis
if (i + 5 < len_l and
info.jump(l[i + 2]) and l[i + 3] == '(' and
l[i + 5] == ')' and
3 <= len(l[i + 4]) and
self._could_be_tzname(res.hour, res.tzname,
None, l[i + 4])):
# -0300 (BRST)
res.tzname = l[i + 4]
i += 4
i += 1
# Check jumps
elif not (info.jump(l[i]) or fuzzy):
raise ValueError(timestr)
else:
skipped_idxs.append(i)
i += 1
# Process year/month/day
year, month, day = ymd.resolve_ymd(yearfirst, dayfirst)
res.century_specified = ymd.century_specified
res.year = year
res.month = month
res.day = day
except (IndexError, ValueError):
return None, None
if not info.validate(res):
return None, None
if fuzzy_with_tokens:
skipped_tokens = self._recombine_skipped(l, skipped_idxs)
return res, tuple(skipped_tokens)
else:
return res, None
def _parse_numeric_token(self, tokens, idx, info, ymd, res, fuzzy):
# Token is a number
value_repr = tokens[idx]
try:
value = self._to_decimal(value_repr)
except Exception as e:
six.raise_from(ValueError('Unknown numeric token'), e)
len_li = len(value_repr)
len_l = len(tokens)
if (len(ymd) == 3 and len_li in (2, 4) and
res.hour is None and
(idx + 1 >= len_l or
(tokens[idx + 1] != ':' and
info.hms(tokens[idx + 1]) is None))):
# 19990101T23[59]
s = tokens[idx]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and tokens[idx].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = tokens[idx]
if not ymd and '.' not in tokens[idx]:
ymd.append(s[:2])
ymd.append(s[2:4])
ymd.append(s[4:])
else:
# 19990101T235959[.59]
# TODO: Check if res attributes already set.
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = self._parsems(s[4:])
elif len_li in (8, 12, 14):
# YYYYMMDD
s = tokens[idx]
ymd.append(s[:4], 'Y')
ymd.append(s[4:6])
ymd.append(s[6:8])
if len_li > 8:
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li > 12:
res.second = int(s[12:])
elif self._find_hms_idx(idx, tokens, info, allow_jump=True) is not None:
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
hms_idx = self._find_hms_idx(idx, tokens, info, allow_jump=True)
(idx, hms) = self._parse_hms(idx, tokens, info, hms_idx)
if hms is not None:
# TODO: checking that hour/minute/second are not
# already set?
self._assign_hms(res, value_repr, hms)
elif idx + 2 < len_l and tokens[idx + 1] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
value = self._to_decimal(tokens[idx + 2]) # TODO: try/except for this?
(res.minute, res.second) = self._parse_min_sec(value)
if idx + 4 < len_l and tokens[idx + 3] == ':':
res.second, res.microsecond = self._parsems(tokens[idx + 4])
idx += 2
idx += 2
elif idx + 1 < len_l and tokens[idx + 1] in ('-', '/', '.'):
sep = tokens[idx + 1]
ymd.append(value_repr)
if idx + 2 < len_l and not info.jump(tokens[idx + 2]):
if tokens[idx + 2].isdigit():
# 01-01[-01]
ymd.append(tokens[idx + 2])
else:
# 01-Jan[-01]
value = info.month(tokens[idx + 2])
if value is not None:
ymd.append(value, 'M')
else:
raise ValueError()
if idx + 3 < len_l and tokens[idx + 3] == sep:
# We have three members
value = info.month(tokens[idx + 4])
if value is not None:
ymd.append(value, 'M')
else:
ymd.append(tokens[idx + 4])
idx += 2
idx += 1
idx += 1
elif idx + 1 >= len_l or info.jump(tokens[idx + 1]):
if idx + 2 < len_l and info.ampm(tokens[idx + 2]) is not None:
# 12 am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 2]))
idx += 1
else:
# Year, month or day
ymd.append(value)
idx += 1
elif info.ampm(tokens[idx + 1]) is not None and (0 <= value < 24):
# 12am
hour = int(value)
res.hour = self._adjust_ampm(hour, info.ampm(tokens[idx + 1]))
idx += 1
elif ymd.could_be_day(value):
ymd.append(value)
elif not fuzzy:
raise ValueError()
return idx
def _find_hms_idx(self, idx, tokens, info, allow_jump):
len_l = len(tokens)
if idx+1 < len_l and info.hms(tokens[idx+1]) is not None:
# There is an "h", "m", or "s" label following this token. We take
# assign the upcoming label to the current token.
# e.g. the "12" in 12h"
hms_idx = idx + 1
elif (allow_jump and idx+2 < len_l and tokens[idx+1] == ' ' and
info.hms(tokens[idx+2]) is not None):
# There is a space and then an "h", "m", or "s" label.
# e.g. the "12" in "12 h"
hms_idx = idx + 2
elif idx > 0 and info.hms(tokens[idx-1]) is not None:
# There is a "h", "m", or "s" preceeding this token. Since neither
# of the previous cases was hit, there is no label following this
# token, so we use the previous label.
# e.g. the "04" in "12h04"
hms_idx = idx-1
elif (1 < idx == len_l-1 and tokens[idx-1] == ' ' and
info.hms(tokens[idx-2]) is not None):
# If we are looking at the final token, we allow for a
# backward-looking check to skip over a space.
# TODO: Are we sure this is the right condition here?
hms_idx = idx - 2
else:
hms_idx = None
return hms_idx
def _assign_hms(self, res, value_repr, hms):
# See GH issue #427, fixing float rounding
value = self._to_decimal(value_repr)
if hms == 0:
# Hour
res.hour = int(value)
if value % 1:
res.minute = int(60*(value % 1))
elif hms == 1:
(res.minute, res.second) = self._parse_min_sec(value)
elif hms == 2:
(res.second, res.microsecond) = self._parsems(value_repr)
def _could_be_tzname(self, hour, tzname, tzoffset, token):
return (hour is not None and
tzname is None and
tzoffset is None and
len(token) <= 5 and
all(x in string.ascii_uppercase for x in token))
def _ampm_valid(self, hour, ampm, fuzzy):
"""
For fuzzy parsing, 'a' or 'am' (both valid English words)
may erroneously trigger the AM/PM flag. Deal with that
here.
"""
val_is_ampm = True
# If there's already an AM/PM flag, this one isn't one.
if fuzzy and ampm is not None:
val_is_ampm = False
# If AM/PM is found and hour is not, raise a ValueError
if hour is None:
if fuzzy:
val_is_ampm = False
else:
raise ValueError('No hour specified with AM or PM flag.')
elif not 0 <= hour <= 12:
# If AM/PM is found, it's a 12 hour clock, so raise
# an error for invalid range
if fuzzy:
val_is_ampm = False
else:
raise ValueError('Invalid hour specified for 12-hour clock.')
return val_is_ampm
def _adjust_ampm(self, hour, ampm):
if hour < 12 and ampm == 1:
hour += 12
elif hour == 12 and ampm == 0:
hour = 0
return hour
def _parse_min_sec(self, value):
# TODO: Every usage of this function sets res.second to the return
# value. Are there any cases where second will be returned as None and
# we *dont* want to set res.second = None?
minute = int(value)
second = None
sec_remainder = value % 1
if sec_remainder:
second = int(60 * sec_remainder)
return (minute, second)
def _parsems(self, value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
def _parse_hms(self, idx, tokens, info, hms_idx):
# TODO: Is this going to admit a lot of false-positives for when we
# just happen to have digits and "h", "m" or "s" characters in non-date
# text? I guess hex hashes won't have that problem, but there's plenty
# of random junk out there.
if hms_idx is None:
hms = None
new_idx = idx
elif hms_idx > idx:
hms = info.hms(tokens[hms_idx])
new_idx = hms_idx
else:
# Looking backwards, increment one.
hms = info.hms(tokens[hms_idx]) + 1
new_idx = idx
return (new_idx, hms)
def _recombine_skipped(self, tokens, skipped_idxs):
"""
>>> tokens = ["foo", " ", "bar", " ", "19June2000", "baz"]
>>> skipped_idxs = [0, 1, 2, 5]
>>> _recombine_skipped(tokens, skipped_idxs)
["foo bar", "baz"]
"""
skipped_tokens = []
for i, idx in enumerate(sorted(skipped_idxs)):
if i > 0 and idx - 1 == skipped_idxs[i - 1]:
skipped_tokens[-1] = skipped_tokens[-1] + tokens[idx]
else:
skipped_tokens.append(tokens[idx])
return skipped_tokens
def _build_tzinfo(self, tzinfos, tzname, tzoffset):
if callable(tzinfos):
tzdata = tzinfos(tzname, tzoffset)
else:
tzdata = tzinfos.get(tzname)
# handle case where tzinfo is paased an options that returns None
# eg tzinfos = {'BRST' : None}
if isinstance(tzdata, datetime.tzinfo) or tzdata is None:
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(tzname, tzdata)
return tzinfo
def _build_tzaware(self, naive, res, tzinfos):
if (callable(tzinfos) or (tzinfos and res.tzname in tzinfos)):
tzinfo = self._build_tzinfo(tzinfos, res.tzname, res.tzoffset)
aware = naive.replace(tzinfo=tzinfo)
aware = self._assign_tzname(aware, res.tzname)
elif res.tzname and res.tzname in time.tzname:
aware = naive.replace(tzinfo=tz.tzlocal())
# Handle ambiguous local datetime
aware = self._assign_tzname(aware, res.tzname)
# This is mostly relevant for winter GMT zones parsed in the UK
if (aware.tzname() != res.tzname and
res.tzname in self.info.UTCZONE):
aware = aware.replace(tzinfo=tz.tzutc())
elif res.tzoffset == 0:
aware = naive.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
aware = naive.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
elif not res.tzname and not res.tzoffset:
# i.e. no timezone information was found.
aware = naive
elif res.tzname:
# tz-like string was parsed but we don't know what to do
# with it
warnings.warn("tzname {tzname} identified but not understood. "
"Pass `tzinfos` argument in order to correctly "
"return a timezone-aware datetime. In a future "
"version, this will raise an "
"exception.".format(tzname=res.tzname),
category=UnknownTimezoneWarning)
aware = naive
return aware
def _build_naive(self, res, default):
repl = {}
for attr in ("year", "month", "day", "hour",
"minute", "second", "microsecond"):
value = getattr(res, attr)
if value is not None:
repl[attr] = value
if 'day' not in repl:
# If the default day exceeds the last day of the month, fall back
# to the end of the month.
cyear = default.year if res.year is None else res.year
cmonth = default.month if res.month is None else res.month
cday = default.day if res.day is None else res.day
if cday > monthrange(cyear, cmonth)[1]:
repl['day'] = monthrange(cyear, cmonth)[1]
naive = default.replace(**repl)
if res.weekday is not None and not res.day:
naive = naive + relativedelta.relativedelta(weekday=res.weekday)
return naive
def _assign_tzname(self, dt, tzname):
if dt.tzname() != tzname:
new_dt = tz.enfold(dt, fold=1)
if new_dt.tzname() == tzname:
return new_dt
return dt
def _to_decimal(self, val):
try:
decimal_value = Decimal(val)
# See GH 662, edge case, infinite value should not be converted via `_to_decimal`
if not decimal_value.is_finite():
raise ValueError("Converted decimal value is infinite or NaN")
except Exception as e:
msg = "Could not convert %s to decimal" % val
six.raise_from(ValueError(msg), e)
else:
return decimal_value
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
"""
Parse a string in one of the supported formats, using the
``parserinfo`` parameters.
:param timestr:
A string containing a date/time stamp.
:param parserinfo:
A :class:`parserinfo` object containing parameters for the parser.
If ``None``, the default arguments to the :class:`parserinfo`
constructor are used.
The ``**kwargs`` parameter takes the following keyword arguments:
:param default:
The default datetime object, if this is a datetime object and not
``None``, elements specified in ``timestr`` replace elements in the
default object.
:param ignoretz:
If set ``True``, time zones in parsed strings are ignored and a naive
:class:`datetime` object is returned.
:param tzinfos:
Additional time zone names / aliases which may be present in the
string. This argument maps time zone names (and optionally offsets
from those time zones) to time zones. This parameter can be a
dictionary with timezone aliases mapping time zone names to time
zones or a function taking two parameters (``tzname`` and
``tzoffset``) and returning a time zone.
The timezones to which the names are mapped can be an integer
offset from UTC in seconds or a :class:`tzinfo` object.
.. doctest::
:options: +NORMALIZE_WHITESPACE
>>> from dateutil.parser import parse
>>> from dateutil.tz import gettz
>>> tzinfos = {"BRST": -7200, "CST": gettz("America/Chicago")}
>>> parse("2012-01-19 17:21:00 BRST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21, tzinfo=tzoffset(u'BRST', -7200))
>>> parse("2012-01-19 17:21:00 CST", tzinfos=tzinfos)
datetime.datetime(2012, 1, 19, 17, 21,
tzinfo=tzfile('/usr/share/zoneinfo/America/Chicago'))
This parameter is ignored if ``ignoretz`` is set.
:param dayfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the day (``True``) or month (``False``). If
``yearfirst`` is set to ``True``, this distinguishes between YDM and
YMD. If set to ``None``, this value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param yearfirst:
Whether to interpret the first value in an ambiguous 3-integer date
(e.g. 01/05/09) as the year. If ``True``, the first number is taken to
be the year, otherwise the last number is taken to be the year. If
this is set to ``None``, the value is retrieved from the current
:class:`parserinfo` object (which itself defaults to ``False``).
:param fuzzy:
Whether to allow fuzzy parsing, allowing for string like "Today is
January 1, 2047 at 8:21:00AM".
:param fuzzy_with_tokens:
If ``True``, ``fuzzy`` is automatically set to True, and the parser
will return a tuple where the first element is the parsed
:class:`datetime.datetime` datetimestamp and the second element is
a tuple containing the portions of the string which were ignored:
.. doctest::
>>> from dateutil.parser import parse
>>> parse("Today is January 1, 2047 at 8:21:00AM", fuzzy_with_tokens=True)
(datetime.datetime(2047, 1, 1, 8, 21), (u'Today is ', u' ', u'at '))
:return:
Returns a :class:`datetime.datetime` object or, if the
``fuzzy_with_tokens`` option is ``True``, returns a tuple, the
first element being a :class:`datetime.datetime` object, the second
a tuple containing the fuzzy tokens.
:raises ValueError:
Raised for invalid or unknown string format, if the provided
:class:`tzinfo` is not in a valid format, or if an invalid date
would be created.
:raises OverflowError:
Raised if the parsed date exceeds the largest valid C integer on
your system.
"""
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = [x for x in re.split(r'([,:.]|[a-zA-Z]+|[0-9]+)',tzstr) if x]
used_idxs = list()
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
for ii in range(j):
used_idxs.append(ii)
i = j
if (i < len_l and (l[i] in ('+', '-') or l[i][0] in
"0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
used_idxs.append(i)
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr, (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60) * signal)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i]) * 3600 +
int(l[i + 2]) * 60) * signal)
used_idxs.append(i)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2]) * 3600 * signal)
else:
return None
used_idxs.append(i)
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';':
l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789+-"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
used_idxs.append(i)
i += 2
if l[i] == '-':
value = int(l[i + 1]) * -1
used_idxs.append(i)
i += 1
else:
value = int(l[i])
used_idxs.append(i)
i += 2
if value:
x.week = value
x.weekday = (int(l[i]) - 1) % 7
else:
x.day = int(l[i])
used_idxs.append(i)
i += 2
x.time = int(l[i])
used_idxs.append(i)
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
used_idxs.append(i)
i += 1
else:
signal = 1
used_idxs.append(i)
res.dstoffset = (res.stdoffset + int(l[i]) * signal)
# This was a made-up format that is not in normal use
warn(('Parsed time zone "%s"' % tzstr) +
'is in a non-standard dateutil-specific format, which ' +
'is now deprecated; support for parsing this format ' +
'will be removed in future versions. It is recommended ' +
'that you switch to a standard format like the GNU ' +
'TZ variable format.', tz.DeprecatedTzFormatWarning)
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
used_idxs.append(i)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
used_idxs.append(i)
i += 1
x.month = int(l[i])
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
used_idxs.append(i)
i += 1
assert l[i] in ('-', '.')
used_idxs.append(i)
i += 1
x.weekday = (int(l[i]) - 1) % 7
else:
# year day (zero based)
x.yday = int(l[i]) + 1
used_idxs.append(i)
i += 1
if i < len_l and l[i] == '/':
used_idxs.append(i)
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2]) * 3600 +
int(l[i][2:]) * 60)
elif i + 1 < len_l and l[i + 1] == ':':
# -03:00
x.time = int(l[i]) * 3600 + int(l[i + 2]) * 60
used_idxs.append(i)
i += 2
if i + 1 < len_l and l[i + 1] == ':':
used_idxs.append(i)
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2]) * 3600)
else:
return None
used_idxs.append(i)
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
unused_idxs = set(range(len_l)).difference(used_idxs)
res.any_unused_tokens = not {l[n] for n in unused_idxs}.issubset({",",":"})
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
class UnknownTimezoneWarning(RuntimeWarning):
"""Raised when the parser finds a timezone it cannot parse into a tzinfo"""
# vim:ts=4:sw=4:et
| gpl-2.0 |
NicovincX2/Python-3.5 | Statistiques/Estimation (statistique)/Régression/Arbre de décision/feature_importance_forests_of_trees.py | 1 | 1381 | # -*- coding: utf-8 -*-
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
os.system("pause")
| gpl-3.0 |
zedoul/AnomalyDetection | test_discretization/gmm_on_data.py | 1 | 1160 | import numpy as np
import matplotlib.pyplot as plt
from sklearn import mixture
import matplotlib.pyplot
import matplotlib.mlab
samples = 100000
data = np.zeros(samples)
mu, sigma = 0.05, 0.015
data[0:samples/2] = np.random.normal(mu, sigma, (samples/2))
mu, sigma = 0.18, 0.01
data[(samples/2):samples] = np.random.normal(mu, sigma, (samples/2))
clf = mixture.GMM(n_components=2, covariance_type='full', min_covar=0.00001)
clf.fit(data)
m1, m2 = clf.means_
w1, w2 = clf.weights_
c1, c2 = clf.covars_
# show lables
plt.ylabel("Frequency")
plt.xlabel("x")
# show hist
histdist = plt.hist(data, 100, normed=True, alpha=0.2)
# show gausses
plotgauss1 = lambda x: plt.plot(x,w1*matplotlib.mlab.normpdf(x,m1,np.sqrt(c1))[0], linewidth=2, color='k')
plotgauss2 = lambda x: plt.plot(x,w2*matplotlib.mlab.normpdf(x,m2,np.sqrt(c2))[0], linewidth=2, color='r')
plotgauss1(histdist[1])
plotgauss2(histdist[1])
# predict
print str(clf.predict([0.05])) + " with " + str(clf.predict_proba([0.05]))
print str(clf.predict([0.20])) + " with " + str(clf.predict_proba([0.20]))
print str(clf.predict([0.13])) + " with " + str(clf.predict_proba([0.13]))
plt.show()
| mit |
xiaoxiamii/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
Lawrence-Liu/scikit-learn | sklearn/cluster/spectral.py | 233 | 18153 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- X ** 2 / (2. * delta ** 2))
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
adamgreenhall/scikit-learn | sklearn/tree/export.py | 78 | 15814 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
alpha = int(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] - colors['bounds'][0])))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
lmjohns3/speech-experiment | models/train-rica.py | 1 | 2008 | import climate
import lmj.plot as plt
import numpy as np
import seaborn as sns
import theanets
logging = climate.get_logger('rica')
import models
climate.add_arg('--codebook', metavar='FILE', help='save codebook to FILE')
climate.add_arg('--frames', type=int, metavar='T', help='train on sequences of T frames')
climate.add_arg('--overcomplete', type=float, default=2, metavar='K',
help='learn a Kx overcomplete codebook')
def main(args):
data = np.load(args.dataset, mmap_mode='r')
N = data.shape[1]
T = args.frames
K = int(N * T * args.overcomplete)
def batches():
batch = np.zeros((args.batch_size, N * T), 'f')
for b in range(args.batch_size):
o = np.random.randint(len(data) - T - 1)
batch[b] = data[o:o+T].ravel()
return [batch]
net = theanets.Autoencoder([N * T, (K, 'linear'), (N * T, 'tied')])
net.train(batches,
monitors={'hid1:out': (-0.1, -0.01, 0.01, 0.1)},
**models.kwargs_from_args(args))
D = net.find('hid1', 'w').get_value().T
R = 6
C = 18
dimg = np.zeros((R * (N + 1) - 1, C * (T + 1) - 1), float)
bimg = np.zeros((R * (N + 1) - 1, C * (T + 1) - 1), float)
idx = abs(D).max(axis=1).argsort()[::-1]
for r in range(R):
for c in range(C):
o = np.random.randint(len(data) - T - 1)
dimg[r*(N+1):r*(N+1)+N, c*(T+1):c*(T+1)+T] = data[o:o+T].T
bimg[r*(N+1):r*(N+1)+N, c*(T+1):c*(T+1)+T] = D[idx[r * C + c]].reshape((T, N)).T
_, (dax, bax) = plt.subplots(1, 2)
dax.imshow(dimg, cmap='coolwarm')
sns.despine(ax=dax, left=True, bottom=True)
dax.set_xticks([])
dax.set_yticks([])
bax.imshow(bimg, cmap='coolwarm')
sns.despine(ax=bax, left=True, bottom=True)
bax.set_xticks([])
bax.set_yticks([])
plt.show()
logging.info('%s: saving %s %s', args.codebook, D.shape, D.dtype)
np.save(args.codebook, D)
if __name__ == '__main__':
climate.call(main)
| mit |
tongwang01/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
hitszxp/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
hjanime/VisTrails | vistrails/packages/matplotlib/mixins.py | 1 | 3926 | ###############################################################################
##
## Copyright (C) 2014-2015, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
class MplCorrBaseMixin(object):
def compute_after():
if 'usevlines' in kwargs and kwargs['usevlines']:
output = output + (output[2],)
else:
output = output + (None, None)
class MplAcorrMixin(MplCorrBaseMixin):
pass
class MplXcorrMixin(MplCorrBaseMixin):
pass
class MplBoxplotMixin(object):
def compute_after():
if 'patch_artist' in kwargs and kwargs['patch_artist']:
output['boxPatches'] = output['boxes']
output['boxes'] = []
else:
output['boxPatches'] = []
class MplContourBaseMixin(object):
def compute_before():
if self.has_input("N") and self.has_input("V"):
del args[-1]
class MplContourMixin(MplContourBaseMixin):
def compute_inner():
contour_set = matplotlib.pyplot.contour(*args, **kwargs)
output = (contour_set, contour_set.collections)
class MplContourfMixin(MplContourBaseMixin):
def compute_inner():
contour_set = matplotlib.pyplot.contourf(*args, **kwargs)
output = (contour_set, contour_set.collections)
class MplPieMixin(object):
def compute_after():
if len(output) < 3:
output = output + ([],)
class MplAnnotateMixin(object):
def compute_before():
if self.has_input("fancyArrowProperties"):
kwargs['arrowprops'] = \
self.get_input("fancyArrowProperties").props
elif self.has_input("arrowProperties"):
kwargs['arrowprops'] = \
self.get_input("arrowProperties").props
class MplSpyMixin(object):
def compute_after():
if "marker" not in kwargs and "markersize" not in kwargs and \
not hasattr(kwargs["Z"], 'tocoo'):
output = (output, None)
else:
output = (None, output)
class MplBarMixin(object):
def compute_before(self):
if not kwargs.has_key('left'):
kwargs['left'] = range(len(kwargs['height']))
| bsd-3-clause |
wangtuanjie/airflow | airflow/hooks/dbapi_hook.py | 2 | 5136 | from builtins import str
from past.builtins import basestring
from datetime import datetime
import numpy
import logging
from airflow.hooks.base_hook import BaseHook
from airflow.utils import AirflowException
class DbApiHook(BaseHook):
"""
Abstract base class for sql hooks.
"""
# Override to provide the connection name.
conn_name_attr = None
# Override to have a default connection id for a particular dbHook
default_conn_name = 'default_conn_id'
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector = None
# Whether the db supports a special type of autocmmit
supports_autocommit = False
def __init__(self, *args, **kwargs):
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
def get_conn(self):
"""Returns a connection object"""
db = self.get_connection(getattr(self, self.conn_name_attr))
return self.connector.connect(
host=db.host,
port=db.port,
username=db.login,
schema=db.schema)
def get_pandas_df(self, sql, parameters=None):
'''
Executes the sql and returns a pandas dataframe
'''
import pandas.io.sql as psql
conn = self.get_conn()
df = psql.read_sql(sql, con=conn, params=parameters)
conn.close()
return df
def get_records(self, sql, parameters=None):
'''
Executes the sql and returns a set of records.
'''
conn = self.get_conn()
cur = self.get_cursor()
cur.execute(sql, parameters)
rows = cur.fetchall()
cur.close()
conn.close()
return rows
def get_first(self, sql, parameters=None):
'''
Executes the sql and returns a set of records.
'''
conn = self.get_conn()
cur = conn.cursor()
cur.execute(sql, parameters)
rows = cur.fetchone()
cur.close()
conn.close()
return rows
def run(self, sql, autocommit=False, parameters=None):
"""
Runs a command or a list of commands. Pass a list of sql
statements to the sql parameter to get them to execute
sequentially
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:type sql: str or list
"""
conn = self.get_conn()
if isinstance(sql, basestring):
sql = [sql]
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
cur = conn.cursor()
for s in sql:
cur.execute(s, parameters)
conn.commit()
cur.close()
conn.close()
def set_autocommit(self, conn, autocommit):
conn.autocommit = autocommit
def get_cursor(self):
"""Returns a cursor"""
return self.get_conn().cursor()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
A generic way to insert a set of tuples into a table,
the whole set of inserts is treated as one transaction
"""
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = "({})".format(target_fields)
else:
target_fields = ''
conn = self.get_conn()
cur = conn.cursor()
if self.supports_autocommit:
cur.execute('SET autocommit = 0')
conn.commit()
i = 0
for row in rows:
i += 1
l = []
for cell in row:
if isinstance(cell, basestring):
l.append("'" + str(cell).replace("'", "''") + "'")
elif cell is None:
l.append('NULL')
elif isinstance(cell, numpy.datetime64):
l.append("'" + str(cell) + "'")
elif isinstance(cell, datetime):
l.append("'" + cell.isoformat() + "'")
else:
l.append(str(cell))
values = tuple(l)
sql = "INSERT INTO {0} {1} VALUES ({2});".format(
table,
target_fields,
",".join(values))
cur.execute(sql)
if i % commit_every == 0:
conn.commit()
logging.info(
"Loaded {i} into {table} rows so far".format(**locals()))
conn.commit()
cur.close()
conn.close()
logging.info(
"Done loading. Loaded a total of {i} rows".format(**locals()))
def get_conn(self):
"""
Retuns a sql connection that can be used to retrieve a cursor.
"""
raise NotImplemented()
| apache-2.0 |
kcmartin/ThinkStats2 | code/chap12soln.py | 68 | 4459 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import thinkplot
import thinkstats2
import regression
import timeseries
def RunQuadraticModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
daily['years2'] = daily.years**2
model = smf.ols('ppg ~ years + years2', data=daily)
results = model.fit()
return model, results
def PlotQuadraticModel(daily, name):
"""
"""
model, results = RunQuadraticModel(daily)
regression.SummarizeResults(results)
timeseries.PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries11',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)')
timeseries.PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries12',
title='residuals',
xlabel='years',
ylabel='price per gram ($)')
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
timeseries.PlotPredictions(daily, years, func=RunQuadraticModel)
thinkplot.Save(root='timeseries13',
title='predictions',
xlabel='years',
xlim=[years[0]-0.1, years[-1]+0.1],
ylabel='price per gram ($)')
def PlotEwmaPredictions(daily, name):
"""
"""
# use EWMA to estimate slopes
filled = timeseries.FillMissing(daily)
filled['slope'] = pandas.ewma(filled.ppg.diff(), span=180)
filled[-1:]
# extract the last inter and slope
start = filled.index[-1]
inter = filled.ewma[-1]
slope = filled.slope[-1]
# reindex the DataFrame, adding a year to the end
dates = pandas.date_range(filled.index.min(),
filled.index.max() + np.timedelta64(365, 'D'))
predicted = filled.reindex(dates)
# generate predicted values and add them to the end
predicted['date'] = predicted.index
one_day = np.timedelta64(1, 'D')
predicted['days'] = (predicted.date - start) / one_day
predict = inter + slope * predicted.days
predicted.ewma.fillna(predict, inplace=True)
# plot the actual values and predictions
thinkplot.Scatter(daily.ppg, alpha=0.1, label=name)
thinkplot.Plot(predicted.ewma)
thinkplot.Save()
class SerialCorrelationTest(thinkstats2.HypothesisTest):
"""Tests serial correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
series, lag = data
test_stat = abs(thinkstats2.SerialCorr(series, lag))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
series, lag = self.data
permutation = series.reindex(np.random.permutation(series.index))
return permutation, lag
def TestSerialCorr(daily):
"""Tests serial correlations in daily prices and their residuals.
daily: DataFrame of daily prices
"""
# test the correlation between consecutive prices
series = daily.ppg
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the linear model
_, results = timeseries.RunLinearModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
# test for serial correlation in residuals of the quadratic model
_, results = RunQuadraticModel(daily)
series = results.resid
test = SerialCorrelationTest((series, 1))
pvalue = test.PValue()
print(test.actual, pvalue)
def main(name):
transactions = timeseries.ReadData()
dailies = timeseries.GroupByQualityAndDay(transactions)
name = 'high'
daily = dailies[name]
PlotQuadraticModel(daily, name)
TestSerialCorr(daily)
PlotEwmaPredictions(daily, name)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
bioShaun/OMrnaseq | setup.py | 1 | 1031 | #!/usr/bin/env python
from setuptools import setup, find_packages
version = '0.1dev'
print '''------------------------------
Installing RNAseq version {}
------------------------------
'''.format(version)
setup(
name='rnaseq',
version=version,
author='lx Gui',
author_email='[email protected]',
keywords=['bioinformatics', 'NGS', 'RNAseq'],
license='GPLv3',
packages=find_packages(),
include_package_data=True,
scripts=['scripts/mrna',
'scripts/simple_qc',
'scripts/_qc_wrapper',
'scripts/get_fq_cfg',
'scripts/merge_files',
'scripts/fake_qc'],
install_requires=[
'luigi',
'pyyaml',
'envoy',
'xlsxwriter',
'pandas',
'rpy2<=2.8.6',
'packaging',
'docopt',
'HTSeq',
'click',
'Pillow',
'biopython',
'pathlib'],
)
print '''------------------------------
RNAseq installation complete!
------------------------------
'''
| gpl-3.0 |
maartenbreddels/vaex | packages/vaex-ml/vaex/ml/spec.py | 1 | 1299 | import json
import os
import sys
import traitlets
from . import sklearn
from . import generate
from . import catboost
from . import lightgbm
from . import xgboost
def lmap(f, values):
return list(map(f, values))
def lmapstar(f, values):
return [f(*k) for k in values]
def to_trait(name, trait):
return dict(
name=name,
has_default=trait.default_value is traitlets.Undefined,
default=None
if trait.default_value is traitlets.Undefined
else trait.default_value,
type=str(type(trait).__name__),
help=trait.help,
)
def to_cls(cls):
return dict(
classname=cls.__name__,
snake_name=cls.__dict__.get(
"snake_name", generate.camel_to_underscore(cls.__name__)
),
version=cls.__dict__.get("_version", "1.0.0"),
module=cls.__module__,
traits=lmapstar(to_trait, cls.class_traits().items()),
doc=cls.__doc__
)
def main(args=sys.argv):
spec = lmap(to_cls, generate.registry)
json_data = json.dumps(spec, indent=4, sort_keys=True)
path = os.path.join(os.path.dirname(__file__), "spec.json")
if len(sys.argv) > 1:
path = sys.argv[1]
with open(path, "w") as f:
f.write(json_data)
if __name__ == "__main__":
main()
| mit |
neuroelectro/elephant | elephant/test/make_spike_extraction_test_data.py | 6 | 1572 | def main():
from brian2 import start_scope,mvolt,ms,NeuronGroup,StateMonitor,run
import matplotlib.pyplot as plt
import neo
import quantities as pq
start_scope()
# Izhikevich neuron parameters.
a = 0.02/ms
b = 0.2/ms
c = -65*mvolt
d = 6*mvolt/ms
I = 4*mvolt/ms
# Standard Izhikevich neuron equations.
eqs = '''
dv/dt = 0.04*v**2/(ms*mvolt) + (5/ms)*v + 140*mvolt/ms - u + I : volt
du/dt = a*((b*v) - u) : volt/second
'''
reset = '''
v = c
u += d
'''
# Setup and run simulation.
G = NeuronGroup(1, eqs, threshold='v>30*mvolt', reset='v = -70*mvolt')
G.v = -65*mvolt
G.u = b*G.v
M = StateMonitor(G, 'v', record=True)
run(300*ms)
# Store results in neo format.
vm = neo.core.AnalogSignal(M.v[0],units=pq.V,sampling_period=0.1*pq.ms)
# Plot results.
plt.figure()
plt.plot(vm.times*1000,vm*1000) # Plot mV and ms instead of V and s.
plt.xlabel('Time (ms)')
plt.ylabel('mv')
# Save results.
iom = neo.io.PyNNNumpyIO('spike_extraction_test_data')
block = neo.core.Block()
segment = neo.core.Segment()
segment.analogsignals.append(vm)
block.segments.append(segment)
iom.write(block)
# Load results.
iom2 = neo.io.PyNNNumpyIO('spike_extraction_test_data.npz')
data = iom2.read()
vm = data[0].segments[0].analogsignals[0]
# Plot results.
# The two figures should match.
plt.figure()
plt.plot(vm.times*1000,vm*1000) # Plot mV and ms instead of V and s.
plt.xlabel('Time (ms)')
plt.ylabel('mv')
if __name__ == '__main__':
main()
| bsd-3-clause |
lscheinkman/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/cm.py | 70 | 5385 | """
This module contains the instantiations of color mapping classes
"""
import numpy as np
from numpy import ma
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.cbook as cbook
from matplotlib._cm import *
def get_cmap(name=None, lut=None):
"""
Get a colormap instance, defaulting to rc values if *name* is None
"""
if name is None: name = mpl.rcParams['image.cmap']
if lut is None: lut = mpl.rcParams['image.lut']
assert(name in datad.keys())
return colors.LinearSegmentedColormap(name, datad[name], lut)
class ScalarMappable:
"""
This is a mixin class to support scalar -> RGBA mapping. Handles
normalization and colormapping
"""
def __init__(self, norm=None, cmap=None):
"""
*norm* is an instance of :class:`colors.Normalize` or one of
its subclasses, used to map luminance to 0-1. *cmap* is a
:mod:`cm` colormap instance, for example :data:`cm.jet`
"""
self.callbacksSM = cbook.CallbackRegistry((
'changed',))
if cmap is None: cmap = get_cmap()
if norm is None: norm = colors.Normalize()
self._A = None
self.norm = norm
self.cmap = cmap
self.colorbar = None
self.update_dict = {'array':False}
def set_colorbar(self, im, ax):
'set the colorbar image and axes associated with mappable'
self.colorbar = im, ax
def to_rgba(self, x, alpha=1.0, bytes=False):
'''Return a normalized rgba array corresponding to *x*. If *x*
is already an rgb array, insert *alpha*; if it is already
rgba, return it unchanged. If *bytes* is True, return rgba as
4 uint8s instead of 4 floats.
'''
try:
if x.ndim == 3:
if x.shape[2] == 3:
if x.dtype == np.uint8:
alpha = np.array(alpha*255, np.uint8)
m, n = x.shape[:2]
xx = np.empty(shape=(m,n,4), dtype = x.dtype)
xx[:,:,:3] = x
xx[:,:,3] = alpha
elif x.shape[2] == 4:
xx = x
else:
raise ValueError("third dimension must be 3 or 4")
if bytes and xx.dtype != np.uint8:
xx = (xx * 255).astype(np.uint8)
return xx
except AttributeError:
pass
x = ma.asarray(x)
x = self.norm(x)
x = self.cmap(x, alpha=alpha, bytes=bytes)
return x
def set_array(self, A):
'Set the image array from numpy array *A*'
self._A = A
self.update_dict['array'] = True
def get_array(self):
'Return the array'
return self._A
def get_cmap(self):
'return the colormap'
return self.cmap
def get_clim(self):
'return the min, max of the color limits for image scaling'
return self.norm.vmin, self.norm.vmax
def set_clim(self, vmin=None, vmax=None):
"""
set the norm limits for image scaling; if *vmin* is a length2
sequence, interpret it as ``(vmin, vmax)`` which is used to
support setp
ACCEPTS: a length 2 sequence of floats
"""
if (vmin is not None and vmax is None and
cbook.iterable(vmin) and len(vmin)==2):
vmin, vmax = vmin
if vmin is not None: self.norm.vmin = vmin
if vmax is not None: self.norm.vmax = vmax
self.changed()
def set_cmap(self, cmap):
"""
set the colormap for luminance data
ACCEPTS: a colormap
"""
if cmap is None: cmap = get_cmap()
self.cmap = cmap
self.changed()
def set_norm(self, norm):
'set the normalization instance'
if norm is None: norm = colors.Normalize()
self.norm = norm
self.changed()
def autoscale(self):
"""
Autoscale the scalar limits on the norm instance using the
current array
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale(self._A)
self.changed()
def autoscale_None(self):
"""
Autoscale the scalar limits on the norm instance using the
current array, changing only limits that are None
"""
if self._A is None:
raise TypeError('You must first set_array for mappable')
self.norm.autoscale_None(self._A)
self.changed()
def add_checker(self, checker):
"""
Add an entry to a dictionary of boolean flags
that are set to True when the mappable is changed.
"""
self.update_dict[checker] = False
def check_update(self, checker):
"""
If mappable has changed since the last check,
return True; else return False
"""
if self.update_dict[checker]:
self.update_dict[checker] = False
return True
return False
def changed(self):
"""
Call this whenever the mappable is changed to notify all the
callbackSM listeners to the 'changed' signal
"""
self.callbacksSM.process('changed', self)
for key in self.update_dict:
self.update_dict[key] = True
| agpl-3.0 |
costypetrisor/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
linebp/pandas | pandas/plotting/_tools.py | 7 | 12779 | # being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
from math import ceil
import numpy as np
from pandas.core.dtypes.common import is_list_like
from pandas.core.index import Index
from pandas.core.series import Series
from pandas.compat import range
def format_date_labels(ax, rot):
# mini version of autofmt_xdate
try:
for label in ax.get_xticklabels():
label.set_ha('right')
label.set_rotation(rot)
fig = ax.get_figure()
fig.subplots_adjust(bottom=0.2)
except Exception: # pragma: no cover
pass
def table(ax, data, rowLabels=None, colLabels=None,
**kwargs):
"""
Helper function to convert DataFrame and Series to matplotlib.table
Parameters
----------
`ax`: Matplotlib axes object
`data`: DataFrame or Series
data for table contents
`kwargs`: keywords, optional
keyword arguments which passed to matplotlib.table.table.
If `rowLabels` or `colLabels` is not specified, data index or column
name will be used.
Returns
-------
matplotlib table object
"""
from pandas import DataFrame
if isinstance(data, Series):
data = DataFrame(data, columns=[data.name])
elif isinstance(data, DataFrame):
pass
else:
raise ValueError('Input data must be DataFrame or Series')
if rowLabels is None:
rowLabels = data.index
if colLabels is None:
colLabels = data.columns
cellText = data.values
import matplotlib.table
table = matplotlib.table.table(ax, cellText=cellText,
rowLabels=rowLabels,
colLabels=colLabels, **kwargs)
return table
def _get_layout(nplots, layout=None, layout_type='box'):
if layout is not None:
if not isinstance(layout, (tuple, list)) or len(layout) != 2:
raise ValueError('Layout must be a tuple of (rows, columns)')
nrows, ncols = layout
# Python 2 compat
ceil_ = lambda x: int(ceil(x))
if nrows == -1 and ncols > 0:
layout = nrows, ncols = (ceil_(float(nplots) / ncols), ncols)
elif ncols == -1 and nrows > 0:
layout = nrows, ncols = (nrows, ceil_(float(nplots) / nrows))
elif ncols <= 0 and nrows <= 0:
msg = "At least one dimension of layout must be positive"
raise ValueError(msg)
if nrows * ncols < nplots:
raise ValueError('Layout of %sx%s must be larger than '
'required size %s' % (nrows, ncols, nplots))
return layout
if layout_type == 'single':
return (1, 1)
elif layout_type == 'horizontal':
return (1, nplots)
elif layout_type == 'vertical':
return (nplots, 1)
layouts = {1: (1, 1), 2: (1, 2), 3: (2, 2), 4: (2, 2)}
try:
return layouts[nplots]
except KeyError:
k = 1
while k ** 2 < nplots:
k += 1
if (k - 1) * k >= nplots:
return k, (k - 1)
else:
return k, k
# copied from matplotlib/pyplot.py and modified for pandas.plotting
def _subplots(naxes=None, sharex=False, sharey=False, squeeze=True,
subplot_kw=None, ax=None, layout=None, layout_type='box',
**fig_kw):
"""Create a figure with a set of subplots already made.
This utility wrapper makes it convenient to create common layouts of
subplots, including the enclosing figure object, in a single call.
Keyword arguments:
naxes : int
Number of required axes. Exceeded axes are set invisible. Default is
nrows * ncols.
sharex : bool
If True, the X axis will be shared amongst all subplots.
sharey : bool
If True, the Y axis will be shared amongst all subplots.
squeeze : bool
If True, extra dimensions are squeezed out from the returned axis object:
- if only one subplot is constructed (nrows=ncols=1), the resulting
single Axis object is returned as a scalar.
- for Nx1 or 1xN subplots, the returned object is a 1-d numpy object
array of Axis objects are returned as numpy 1-d arrays.
- for NxM subplots with N>1 and M>1 are returned as a 2d array.
If False, no squeezing at all is done: the returned axis object is always
a 2-d array containing Axis instances, even if it ends up being 1x1.
subplot_kw : dict
Dict with keywords passed to the add_subplot() call used to create each
subplots.
ax : Matplotlib axis object, optional
layout : tuple
Number of rows and columns of the subplot grid.
If not specified, calculated from naxes and layout_type
layout_type : {'box', 'horziontal', 'vertical'}, default 'box'
Specify how to layout the subplot grid.
fig_kw : Other keyword arguments to be passed to the figure() call.
Note that all keywords not recognized above will be
automatically included here.
Returns:
fig, ax : tuple
- fig is the Matplotlib Figure object
- ax can be either a single axis object or an array of axis objects if
more than one subplot was created. The dimensions of the resulting array
can be controlled with the squeeze keyword, see above.
**Examples:**
x = np.linspace(0, 2*np.pi, 400)
y = np.sin(x**2)
# Just a figure and one subplot
f, ax = plt.subplots()
ax.plot(x, y)
ax.set_title('Simple plot')
# Two subplots, unpack the output array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(x, y)
ax1.set_title('Sharing Y axis')
ax2.scatter(x, y)
# Four polar axes
plt.subplots(2, 2, subplot_kw=dict(polar=True))
"""
import matplotlib.pyplot as plt
if subplot_kw is None:
subplot_kw = {}
if ax is None:
fig = plt.figure(**fig_kw)
else:
if is_list_like(ax):
ax = _flatten(ax)
if layout is not None:
warnings.warn("When passing multiple axes, layout keyword is "
"ignored", UserWarning)
if sharex or sharey:
warnings.warn("When passing multiple axes, sharex and sharey "
"are ignored. These settings must be specified "
"when creating axes", UserWarning,
stacklevel=4)
if len(ax) == naxes:
fig = ax[0].get_figure()
return fig, ax
else:
raise ValueError("The number of passed axes must be {0}, the "
"same as the output plot".format(naxes))
fig = ax.get_figure()
# if ax is passed and a number of subplots is 1, return ax as it is
if naxes == 1:
if squeeze:
return fig, ax
else:
return fig, _flatten(ax)
else:
warnings.warn("To output multiple subplots, the figure containing "
"the passed axes is being cleared", UserWarning,
stacklevel=4)
fig.clear()
nrows, ncols = _get_layout(naxes, layout=layout, layout_type=layout_type)
nplots = nrows * ncols
# Create empty object array to hold all axes. It's easiest to make it 1-d
# so we can just append subplots upon creation, and then
axarr = np.empty(nplots, dtype=object)
# Create first subplot separately, so we can share it if requested
ax0 = fig.add_subplot(nrows, ncols, 1, **subplot_kw)
if sharex:
subplot_kw['sharex'] = ax0
if sharey:
subplot_kw['sharey'] = ax0
axarr[0] = ax0
# Note off-by-one counting because add_subplot uses the MATLAB 1-based
# convention.
for i in range(1, nplots):
kwds = subplot_kw.copy()
# Set sharex and sharey to None for blank/dummy axes, these can
# interfere with proper axis limits on the visible axes if
# they share axes e.g. issue #7528
if i >= naxes:
kwds['sharex'] = None
kwds['sharey'] = None
ax = fig.add_subplot(nrows, ncols, i + 1, **kwds)
axarr[i] = ax
if naxes != nplots:
for ax in axarr[naxes:]:
ax.set_visible(False)
_handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey)
if squeeze:
# Reshape the array to have the final desired dimension (nrow,ncol),
# though discarding unneeded dimensions that equal 1. If we only have
# one subplot, just return it instead of a 1-element array.
if nplots == 1:
axes = axarr[0]
else:
axes = axarr.reshape(nrows, ncols).squeeze()
else:
# returned axis array will be always 2-d, even if nrows=ncols=1
axes = axarr.reshape(nrows, ncols)
return fig, axes
def _remove_labels_from_axis(axis):
for t in axis.get_majorticklabels():
t.set_visible(False)
try:
# set_visible will not be effective if
# minor axis has NullLocator and NullFormattor (default)
import matplotlib.ticker as ticker
if isinstance(axis.get_minor_locator(), ticker.NullLocator):
axis.set_minor_locator(ticker.AutoLocator())
if isinstance(axis.get_minor_formatter(), ticker.NullFormatter):
axis.set_minor_formatter(ticker.FormatStrFormatter(''))
for t in axis.get_minorticklabels():
t.set_visible(False)
except Exception: # pragma no cover
raise
axis.get_label().set_visible(False)
def _handle_shared_axes(axarr, nplots, naxes, nrows, ncols, sharex, sharey):
if nplots > 1:
if nrows > 1:
try:
# first find out the ax layout,
# so that we can correctly handle 'gaps"
layout = np.zeros((nrows + 1, ncols + 1), dtype=np.bool)
for ax in axarr:
layout[ax.rowNum, ax.colNum] = ax.get_visible()
for ax in axarr:
# only the last row of subplots should get x labels -> all
# other off layout handles the case that the subplot is
# the last in the column, because below is no subplot/gap.
if not layout[ax.rowNum + 1, ax.colNum]:
continue
if sharex or len(ax.get_shared_x_axes()
.get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
except IndexError:
# if gridspec is used, ax.rowNum and ax.colNum may different
# from layout shape. in this case, use last_row logic
for ax in axarr:
if ax.is_last_row():
continue
if sharex or len(ax.get_shared_x_axes()
.get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.xaxis)
if ncols > 1:
for ax in axarr:
# only the first column should get y labels -> set all other to
# off as we only have labels in teh first column and we always
# have a subplot there, we can skip the layout test
if ax.is_first_col():
continue
if sharey or len(ax.get_shared_y_axes().get_siblings(ax)) > 1:
_remove_labels_from_axis(ax.yaxis)
def _flatten(axes):
if not is_list_like(axes):
return np.array([axes])
elif isinstance(axes, (np.ndarray, Index)):
return axes.ravel()
return np.array(axes)
def _get_all_lines(ax):
lines = ax.get_lines()
if hasattr(ax, 'right_ax'):
lines += ax.right_ax.get_lines()
if hasattr(ax, 'left_ax'):
lines += ax.left_ax.get_lines()
return lines
def _get_xlim(lines):
left, right = np.inf, -np.inf
for l in lines:
x = l.get_xdata(orig=False)
left = min(x[0], left)
right = max(x[-1], right)
return left, right
def _set_ticks_props(axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
import matplotlib.pyplot as plt
for ax in _flatten(axes):
if xlabelsize is not None:
plt.setp(ax.get_xticklabels(), fontsize=xlabelsize)
if xrot is not None:
plt.setp(ax.get_xticklabels(), rotation=xrot)
if ylabelsize is not None:
plt.setp(ax.get_yticklabels(), fontsize=ylabelsize)
if yrot is not None:
plt.setp(ax.get_yticklabels(), rotation=yrot)
return axes
| bsd-3-clause |
brclark-usgs/flopy | autotest/t020_test.py | 2 | 4667 | # Test modflow write adn run
import numpy as np
try:
import matplotlib.pyplot as plt
if os.getenv('TRAVIS'): # are we running https://travis-ci.org/ automated tests ?
matplotlib.use('Agg') # Force matplotlib not to use any Xwindows backend
except:
plt = None
def analyticalWaterTableSolution(h1, h2, z, R, K, L, x):
h = np.zeros((x.shape[0]), np.float)
b1 = h1 - z
b2 = h2 - z
h = np.sqrt(b1 ** 2 - (x / L) * (b1 ** 2 - b2 ** 2) + (R * x / K) * (L - x)) + z
return h
def test_mfnwt_run():
import os
import platform
import flopy
exe_name = 'mfnwt'
if platform.system() == 'Windows':
exe_name = '{}.exe'.format(exe_name)
exe = flopy.which(exe_name)
if exe is None:
print('Specified executable {} does not exist in path'.format(exe_name))
return
modelname = 'watertable'
model_ws = os.path.join('temp', 't020')
if not os.path.exists(model_ws):
os.makedirs(model_ws)
# model dimensions
nlay, nrow, ncol = 1, 1, 100
# cell spacing
delr = 50.
delc = 1.
# domain length
L = 5000.
# boundary heads
h1 = 20.
h2 = 11.
# ibound
ibound = np.ones((nlay, nrow, ncol), dtype=np.int)
# starting heads
strt = np.zeros((nlay, nrow, ncol), dtype=np.float)
strt[0, 0, 0] = h1
strt[0, 0, -1] = h2
# top of the aquifer
top = 25.
# bottom of the aquifer
botm = 0.
# hydraulic conductivity
hk = 50.
# location of cell centroids
x = np.arange(0.0, L, delr) + (delr / 2.)
# location of cell edges
xa = np.arange(0, L + delr, delr)
# recharge rate
rchrate = 0.001
# calculate the head at the cell centroids using the analytical solution function
hac = analyticalWaterTableSolution(h1, h2, botm, rchrate, hk, L, x)
# calculate the head at the cell edges using the analytical solution function
ha = analyticalWaterTableSolution(h1, h2, botm, rchrate, hk, L, xa)
# ghbs
# ghb conductance
b1, b2 = 0.5 * (h1 + hac[0]), 0.5 * (h2 + hac[-1])
c1, c2 = hk * b1 * delc / (0.5 * delr), hk * b2 * delc / (0.5 * delr)
# dtype
ghb_dtype = flopy.modflow.ModflowGhb.get_default_dtype()
# build ghb recarray
stress_period_data = np.zeros((2), dtype=ghb_dtype)
stress_period_data = stress_period_data.view(np.recarray)
# fill ghb recarray
stress_period_data[0] = (0, 0, 0, h1, c1)
stress_period_data[1] = (0, 0, ncol - 1, h2, c2)
mf = flopy.modflow.Modflow(modelname=modelname, exe_name=exe, model_ws=model_ws, version='mfnwt')
dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol,
delr=delr, delc=delc,
top=top, botm=botm,
perlen=1, nstp=1, steady=True)
bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=strt)
lpf = flopy.modflow.ModflowUpw(mf, hk=hk, laytyp=1)
ghb = flopy.modflow.ModflowGhb(mf, stress_period_data=stress_period_data)
rch = flopy.modflow.ModflowRch(mf, rech=rchrate, nrchop=1)
oc = flopy.modflow.ModflowOc(mf)
nwt = flopy.modflow.ModflowNwt(mf)
mf.write_input()
# remove existing heads results, if necessary
try:
os.remove(os.path.join(model_ws, '{0}.hds'.format(modelname)))
except:
pass
# run existing model
mf.run_model()
# Read the simulated MODFLOW-2005 model results
# Create the headfile object
headfile = os.path.join(model_ws, '{0}.hds'.format(modelname))
headobj = flopy.utils.HeadFile(headfile, precision='single')
times = headobj.get_times()
head = headobj.get_data(totim=times[-1])
# Plot the results
if plt is not None:
fig = plt.figure(figsize=(16, 6))
ax = fig.add_subplot(1, 3, 1)
ax.plot(xa, ha, linewidth=8, color='0.5', label='analytical solution')
ax.plot(x, head[0, 0, :], color='red', label='MODFLOW-NWT')
leg = ax.legend(loc='lower left')
leg.draw_frame(False)
ax.set_xlabel('Horizontal distance, in m')
ax.set_ylabel('Head, in m')
ax = fig.add_subplot(1, 3, 2)
ax.plot(x, head[0, 0, :] - hac, linewidth=1, color='blue')
ax.set_xlabel('Horizontal distance, in m')
ax.set_ylabel('Error, in m')
ax = fig.add_subplot(1, 3, 3)
ax.plot(x, 100. * (head[0, 0, :] - hac) / hac, linewidth=1, color='blue')
ax.set_xlabel('Horizontal distance, in m')
ax.set_ylabel('Percent Error')
fig.savefig(os.path.join(model_ws, '{}.png'.format(modelname)))
return
if __name__ == '__main__':
test_mfnwt_run()
| bsd-3-clause |
jorik041/scikit-learn | sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| bsd-3-clause |
schets/scikit-learn | sklearn/tests/test_kernel_approximation.py | 244 | 7588 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# appreviations for easier formular
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# appreviations for easier formular
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| bsd-3-clause |
SimeonFritz/aima-python | submissions/Conklin/myNN.py | 13 | 3059 | from sklearn import datasets
from sklearn.neural_network import MLPClassifier
import traceback
from submissions.Conklin import music
class DataFrame:
data = []
feature_names = []
target = []
target_names = []
musicECHP = DataFrame()
musicECHP.data = []
targetInfo = []
list_of_songs = music.get_songs()
for song in list_of_songs:
try:
tempo = float(song['song']["tempo"])
targetInfo.append(tempo)
loudness = float(song['song']["loudness"])
fadeOut = float(song['song']["start_of_fade_out"])
fadeIn = float(song['song']["end_of_fade_in"])
duration = float(song['song']["duration"])
releaseYear = float(song['song']["year"])
musicECHP.data.append([tempo, fadeOut, fadeIn, duration, releaseYear])
except:
traceback.print_exc()
musicECHP.feature_names = [
'Loudness',
'Fade Out',
'Fade In',
'Duration',
'Release Year'
]
musicECHP.target = []
def musicTarget(speed):
if speed > 100:
return 1
return 0
for pre in targetInfo:
# choose the target
tt = musicTarget(pre)
musicECHP.target.append(tt)
musicECHP.target_names = [
'Tempo <= 100 bpm',
'Tempo > 100 bpm',
]
Examples = {
'Music': musicECHP,
}
'''
Make a custom classifier,
'''
mlpc = MLPClassifier(
hidden_layer_sizes = (1000,),
activation = 'relu',
solver='sgd',#'adam',
# alpha = 0.0001,
# batch_size='auto',
learning_rate = 'adaptive', # 'constant',
# power_t = 0.5,
max_iter = 1000, # 200,
shuffle = False,
# random_state = None,
# tol = 1e-4,
# verbose = False,
# warm_start = False,
momentum = 0.5,
# nesterovs_momentum = True,
# early_stopping = False,
# validation_fraction = 0.1,
beta_1 = 0.9,
beta_2 = 0.999,
# epsilon = 1e-8,
)
'''
Try scaling the data.
'''
musicScaled = DataFrame()
def setupScales(grid):
global min, max
min = list(grid[0])
max = list(grid[0])
for row in range(1, len(grid)):
for col in range(len(grid[row])):
cell = grid[row][col]
if cell < min[col]:
min[col] = cell
if cell > max[col]:
max[col] = cell
def scaleGrid(grid):
newGrid = []
for row in range(len(grid)):
newRow = []
for col in range(len(grid[row])):
try:
cell = grid[row][col]
scaled = (cell - min[col]) \
/ (max[col] - min[col])
newRow.append(scaled)
except:
pass
newGrid.append(newRow)
return newGrid
setupScales(musicECHP.data)
musicScaled.data = scaleGrid(musicECHP.data)
musicScaled.feature_names = musicECHP.feature_names
musicScaled.target = musicECHP.target
musicScaled.target_names = musicECHP.target_names
Examples = {
'MusicDefault': {
'frame': musicECHP,
},
'MusicSGD': {
'frame': musicECHP,
'mlpc': mlpc
},
'MusicScaled': {
'frame': musicScaled,
},
}
| mit |
Sentient07/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 46 | 9267 | import numpy as np
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils import check_array
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0,
n_jobs=-1)
with ignore_warnings(category=ConvergenceWarning):
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only,
decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[np.newaxis, 1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[np.newaxis, 1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample[np.newaxis, :])
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_input():
n_components = 100
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
Xf = check_array(X, order='F')
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
a = sparse_encode(X, V, algorithm=algo)
b = sparse_encode(Xf, V, algorithm=algo)
assert_array_almost_equal(a, b)
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
sohaibiftikhar/zeppelin | spark/src/main/resources/python/zeppelin_pyspark.py | 16 | 12106 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, getopt, traceback, json, re
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
from py4j.protocol import Py4JJavaError
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
import ast
import warnings
# for back compatibility
from pyspark.sql import SQLContext, HiveContext, Row
class Logger(object):
def __init__(self):
pass
def write(self, message):
intp.appendOutput(message)
def reset(self):
pass
def flush(self):
pass
class PyZeppelinContext(dict):
def __init__(self, zc):
self.z = zc
self._displayhook = lambda *args: None
def show(self, obj):
from pyspark.sql import DataFrame
if isinstance(obj, DataFrame):
print(self.z.showData(obj._jdf))
else:
print(str(obj))
# By implementing special methods it makes operating on it more Pythonic
def __setitem__(self, key, item):
self.z.put(key, item)
def __getitem__(self, key):
return self.z.get(key)
def __delitem__(self, key):
self.z.remove(key)
def __contains__(self, item):
return self.z.containsKey(item)
def add(self, key, value):
self.__setitem__(key, value)
def put(self, key, value):
self.__setitem__(key, value)
def get(self, key):
return self.__getitem__(key)
def getInterpreterContext(self):
return self.z.getInterpreterContext()
def input(self, name, defaultValue=""):
return self.z.input(name, defaultValue)
def select(self, name, options, defaultValue=""):
# auto_convert to ArrayList doesn't match the method signature on JVM side
tuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
iterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(tuples)
return self.z.select(name, defaultValue, iterables)
def checkbox(self, name, options, defaultChecked=None):
if defaultChecked is None:
defaultChecked = []
optionTuples = list(map(lambda items: self.__tupleToScalaTuple2(items), options))
optionIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(optionTuples)
defaultCheckedIterables = gateway.jvm.scala.collection.JavaConversions.collectionAsScalaIterable(defaultChecked)
checkedItems = gateway.jvm.scala.collection.JavaConversions.seqAsJavaList(self.z.checkbox(name, defaultCheckedIterables, optionIterables))
result = []
for checkedItem in checkedItems:
result.append(checkedItem)
return result;
def registerHook(self, event, cmd, replName=None):
if replName is None:
self.z.registerHook(event, cmd)
else:
self.z.registerHook(event, cmd, replName)
def unregisterHook(self, event, replName=None):
if replName is None:
self.z.unregisterHook(event)
else:
self.z.unregisterHook(event, replName)
def getHook(self, event, replName=None):
if replName is None:
return self.z.getHook(event)
return self.z.getHook(event, replName)
def _setup_matplotlib(self):
# If we don't have matplotlib installed don't bother continuing
try:
import matplotlib
except ImportError:
return
# Make sure custom backends are available in the PYTHONPATH
rootdir = os.environ.get('ZEPPELIN_HOME', os.getcwd())
mpl_path = os.path.join(rootdir, 'interpreter', 'lib', 'python')
if mpl_path not in sys.path:
sys.path.append(mpl_path)
# Finally check if backend exists, and if so configure as appropriate
try:
matplotlib.use('module://backend_zinline')
import backend_zinline
# Everything looks good so make config assuming that we are using
# an inline backend
self._displayhook = backend_zinline.displayhook
self.configure_mpl(width=600, height=400, dpi=72, fontsize=10,
interactive=True, format='png', context=self.z)
except ImportError:
# Fall back to Agg if no custom backend installed
matplotlib.use('Agg')
warnings.warn("Unable to load inline matplotlib backend, "
"falling back to Agg")
def configure_mpl(self, **kwargs):
import mpl_config
mpl_config.configure(**kwargs)
def __tupleToScalaTuple2(self, tuple):
if (len(tuple) == 2):
return gateway.jvm.scala.Tuple2(tuple[0], tuple[1])
else:
raise IndexError("options must be a list of tuple of 2")
class SparkVersion(object):
SPARK_1_4_0 = 10400
SPARK_1_3_0 = 10300
SPARK_2_0_0 = 20000
def __init__(self, versionNumber):
self.version = versionNumber
def isAutoConvertEnabled(self):
return self.version >= self.SPARK_1_4_0
def isImportAllPackageUnderSparkSql(self):
return self.version >= self.SPARK_1_3_0
def isSpark2(self):
return self.version >= self.SPARK_2_0_0
class PySparkCompletion:
def __init__(self, interpreterObject):
self.interpreterObject = interpreterObject
def getGlobalCompletion(self):
objectDefList = []
try:
for completionItem in list(globals().keys()):
objectDefList.append(completionItem)
except:
return None
else:
return objectDefList
def getMethodCompletion(self, text_value):
execResult = locals()
if text_value == None:
return None
completion_target = text_value
try:
if len(completion_target) <= 0:
return None
if text_value[-1] == ".":
completion_target = text_value[:-1]
exec("{} = dir({})".format("objectDefList", completion_target), globals(), execResult)
except:
return None
else:
return list(execResult['objectDefList'])
def getCompletion(self, text_value):
completionList = set()
globalCompletionList = self.getGlobalCompletion()
if globalCompletionList != None:
for completionItem in list(globalCompletionList):
completionList.add(completionItem)
if text_value != None:
objectCompletionList = self.getMethodCompletion(text_value)
if objectCompletionList != None:
for completionItem in list(objectCompletionList):
completionList.add(completionItem)
if len(completionList) <= 0:
self.interpreterObject.setStatementsFinished("", False)
else:
result = json.dumps(list(filter(lambda x : not re.match("^__.*", x), list(completionList))))
self.interpreterObject.setStatementsFinished(result, False)
client = GatewayClient(port=int(sys.argv[1]))
sparkVersion = SparkVersion(int(sys.argv[2]))
if sparkVersion.isSpark2():
from pyspark.sql import SparkSession
else:
from pyspark.sql import SchemaRDD
if sparkVersion.isAutoConvertEnabled():
gateway = JavaGateway(client, auto_convert = True)
else:
gateway = JavaGateway(client)
java_import(gateway.jvm, "org.apache.spark.SparkEnv")
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
intp = gateway.entry_point
output = Logger()
sys.stdout = output
sys.stderr = output
intp.onPythonScriptInitialized(os.getpid())
jsc = intp.getJavaSparkContext()
if sparkVersion.isImportAllPackageUnderSparkSql():
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
else:
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
java_import(gateway.jvm, "scala.Tuple2")
_zcUserQueryNameSpace = {}
jconf = intp.getSparkConf()
conf = SparkConf(_jvm = gateway.jvm, _jconf = jconf)
sc = _zsc_ = SparkContext(jsc=jsc, gateway=gateway, conf=conf)
_zcUserQueryNameSpace["_zsc_"] = _zsc_
_zcUserQueryNameSpace["sc"] = sc
if sparkVersion.isSpark2():
spark = __zSpark__ = SparkSession(sc, intp.getSparkSession())
sqlc = __zSqlc__ = __zSpark__._wrapped
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = __zSqlc__
_zcUserQueryNameSpace["spark"] = spark
_zcUserQueryNameSpace["__zSpark__"] = __zSpark__
else:
sqlc = __zSqlc__ = SQLContext(sparkContext=sc, sqlContext=intp.getSQLContext())
_zcUserQueryNameSpace["sqlc"] = sqlc
_zcUserQueryNameSpace["__zSqlc__"] = sqlc
sqlContext = __zSqlc__
_zcUserQueryNameSpace["sqlContext"] = sqlContext
completion = __zeppelin_completion__ = PySparkCompletion(intp)
_zcUserQueryNameSpace["completion"] = completion
_zcUserQueryNameSpace["__zeppelin_completion__"] = __zeppelin_completion__
z = __zeppelin__ = PyZeppelinContext(intp.getZeppelinContext())
__zeppelin__._setup_matplotlib()
_zcUserQueryNameSpace["z"] = z
_zcUserQueryNameSpace["__zeppelin__"] = __zeppelin__
while True :
req = intp.getStatements()
try:
stmts = req.statements().split("\n")
jobGroup = req.jobGroup()
jobDesc = req.jobDescription()
# Get post-execute hooks
try:
global_hook = intp.getHook('post_exec_dev')
except:
global_hook = None
try:
user_hook = __zeppelin__.getHook('post_exec')
except:
user_hook = None
nhooks = 0
for hook in (global_hook, user_hook):
if hook:
nhooks += 1
if stmts:
# use exec mode to compile the statements except the last statement,
# so that the last statement's evaluation will be printed to stdout
sc.setJobGroup(jobGroup, jobDesc)
code = compile('\n'.join(stmts), '<stdin>', 'exec', ast.PyCF_ONLY_AST, 1)
to_run_hooks = []
if (nhooks > 0):
to_run_hooks = code.body[-nhooks:]
to_run_exec, to_run_single = (code.body[:-(nhooks + 1)],
[code.body[-(nhooks + 1)]])
try:
for node in to_run_exec:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
for node in to_run_single:
mod = ast.Interactive([node])
code = compile(mod, '<stdin>', 'single')
exec(code, _zcUserQueryNameSpace)
for node in to_run_hooks:
mod = ast.Module([node])
code = compile(mod, '<stdin>', 'exec')
exec(code, _zcUserQueryNameSpace)
intp.setStatementsFinished("", False)
except Py4JJavaError:
# raise it to outside try except
raise
except:
exception = traceback.format_exc()
m = re.search("File \"<stdin>\", line (\d+).*", exception)
if m:
line_no = int(m.group(1))
intp.setStatementsFinished(
"Fail to execute line {}: {}\n".format(line_no, stmts[line_no - 1]) + exception, True)
else:
intp.setStatementsFinished(exception, True)
else:
intp.setStatementsFinished("", False)
except Py4JJavaError:
excInnerError = traceback.format_exc() # format_tb() does not return the inner exception
innerErrorStart = excInnerError.find("Py4JJavaError:")
if innerErrorStart > -1:
excInnerError = excInnerError[innerErrorStart:]
intp.setStatementsFinished(excInnerError + str(sys.exc_info()), True)
except:
intp.setStatementsFinished(traceback.format_exc(), True)
output.reset()
| apache-2.0 |
tengerye/orthogonal-denoising-autoencoder | tensorflow/demo.py | 2 | 2335 | import matplotlib
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import scipy.io
from sklearn.cross_decomposition import CCA
from orthAE import OrthdAE
# generate toy data for multi-view learning from paper "Factorized Latent Spaces with Structured Sparsity"
t = np.arange(-1, 1, 0.02)
x = np.sin(2*np.pi*t) # share latent space
x_noise = 0.02*np.sin(3.6*np.pi*t) # correlated noise
# private latent spaces
z1 = np.cos(np.pi*np.pi*t)
z2 = np.cos(5*np.pi*t)
##########################################################
# Fig.2.(a)
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(t, x, color='blue')
axarr[0].plot(t, z1, color='green')
axarr[0].plot(t, x_noise, color='red')
axarr[0].set_title('Fig.2.(a)')
axarr[1].plot(t, x, color='blue')
axarr[1].plot(t, z2, color='green')
axarr[1].plot(t, x_noise, color='red')
plt.show()
##########################################################
# shared private spaces
m1 = np.vstack((x, z1));
m2 = np.vstack((x, z2));
m1 = np.random.rand(20, 2).dot(m1)
m2 = np.random.rand(20, 2).dot(m2)
# m1 = np.matmul(np.random.rand(20, 2), m1)
# m2 = np.matmul(np.random.rand(20, 2), m2)
# m1 = np.dot(np.random.rand(20, 2), m1)
# m2 = np.dot(np.random.rand(20, 2), m2)
# add gaussian noise with mean=0, standard deviation=0.01
m1 = m1 + np.random.randn(*m1.shape)*0.01;
m2 = m2 + np.random.randn(*m2.shape)*0.01;
# add correlated noise
m1 = np.vstack((m1, x_noise))
m2 = np.vstack((m2, x_noise))
##########################################################
# Fig.2.(b)
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(t, m1.transpose())
axarr[0].set_title('Fig.2.(b)')
axarr[1].plot(t, m2.transpose())
plt.show()
##########################################################
# Fig.3 CCA
cca = CCA(n_components=3)
cca.fit(m1.T, m2.T)
X_c = cca.transform(m1.T)
fig, ax = plt.subplots()
ax.set_title('Fig.2.(c)')
# ax.set_color_cycle(['blue', 'green', 'red'])
ax.set_prop_cycle('color', ['blue', 'red', 'green'])
ax.plot(X_c)
# ax.plot(Y_c)
plt.show()
##########################################################
# Use TensorFlow.
x2 = np.concatenate((m1, m2,))
# y2 = trivial_denoising(x2)
# print('shape of y2=', np.shape(y2))
odae = OrthdAE([21, 21], [1, 1, 1])
odae.train(x2.T, max_iter=1000000)
result = odae.transform(x2.T)
plt.plot(result)
plt.show()
| apache-2.0 |
kevin-intel/scikit-learn | sklearn/_min_dependencies.py | 1 | 2235 | """All minimum dependencies for scikit-learn."""
import platform
import argparse
# numpy scipy and cython should by in sync with pyproject.toml
if platform.python_implementation() == 'PyPy':
NUMPY_MIN_VERSION = '1.19.0'
else:
NUMPY_MIN_VERSION = '1.14.6'
SCIPY_MIN_VERSION = '1.1.0'
JOBLIB_MIN_VERSION = '0.11'
THREADPOOLCTL_MIN_VERSION = '2.0.0'
PYTEST_MIN_VERSION = '5.0.1'
CYTHON_MIN_VERSION = '0.28.5'
# 'build' and 'install' is included to have structured metadata for CI.
# It will NOT be included in setup's extras_require
# The values are (version_spec, comma seperated tags)
dependent_packages = {
'numpy': (NUMPY_MIN_VERSION, 'build, install'),
'scipy': (SCIPY_MIN_VERSION, 'build, install'),
'joblib': (JOBLIB_MIN_VERSION, 'install'),
'threadpoolctl': (THREADPOOLCTL_MIN_VERSION, 'install'),
'cython': (CYTHON_MIN_VERSION, 'build'),
'matplotlib': ('2.2.2', 'benchmark, docs, examples, tests'),
'scikit-image': ('0.14.5', 'docs, examples, tests'),
'pandas': ('0.25.0', 'benchmark, docs, examples, tests'),
'seaborn': ('0.9.0', 'docs, examples'),
'memory_profiler': ('0.57.0', 'benchmark, docs'),
'pytest': (PYTEST_MIN_VERSION, 'tests'),
'pytest-cov': ('2.9.0', 'tests'),
'flake8': ('3.8.2', 'tests'),
'mypy': ('0.770', 'tests'),
'pyamg': ('4.0.0', 'tests'),
'sphinx': ('3.2.0', 'docs'),
'sphinx-gallery': ('0.7.0', 'docs'),
'numpydoc': ('1.0.0', 'docs'),
'Pillow': ('7.1.2', 'docs'),
'sphinx-prompt': ('1.3.0', 'docs'),
}
# create inverse mapping for setuptools
tag_to_packages: dict = {
extra: [] for extra in ['build', 'install', 'docs', 'examples',
'tests', 'benchmark']
}
for package, (min_version, extras) in dependent_packages.items():
for extra in extras.split(', '):
tag_to_packages[extra].append("{}>={}".format(package, min_version))
# Used by CI to get the min dependencies
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Get min dependencies for a package')
parser.add_argument('package', choices=dependent_packages)
args = parser.parse_args()
min_version = dependent_packages[args.package][0]
print(min_version)
| bsd-3-clause |
lorenzo-desantis/mne-python | mne/decoding/tests/test_csp.py | 5 | 3859 | # Author: Alexandre Gramfort <[email protected]>
# Romain Trachel <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP
from mne.utils import requires_sklearn, slow_test
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# if stop is too small pca may fail in some cases, but we're okay on this file
start, stop = 0, 8
@slow_test
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[2:9:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
csp = CSP(n_components=n_components)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data),
X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
epochs.pick_types(meg='mag', copy=False)
# test plot patterns
components = np.arange(n_components)
csp.plot_patterns(epochs.info, components=components, res=12,
show=False)
# test plot filters
csp.plot_filters(epochs.info, components=components, res=12,
show=False)
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'ledoit_wolf', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
| bsd-3-clause |
hawwestin/MSR.APO | gui/tabpicture.py | 1 | 3864 | import tkinter as tk
import matplotlib
import matplotlib.pyplot as plt
from gui.histogram import Histogram
from gui.operations.computer_vision import Vision
from gui.image_frame import ImageFrame
import app_config
matplotlib.use("TkAgg")
class TabPicture:
"""
Kompozycja obiektów Vision przechowujących obrazy do operacji.
In feature main control of tkinter tab displaying images and other data.
"""
gallery = {}
def __init__(self, tab_frame: tk.Frame, main_window: tk.Tk, name: tk.StringVar):
self.tab_frame = tab_frame
self.main_window = main_window
"""
Master Key must match menu_command _current tab politics
"""
###############
# vars
###############
self.id = tab_frame._w
TabPicture.gallery[self.id] = self
self.name = name
self.tkImage = None
res = int(app_config.main_window_resolution[:int(app_config.main_window_resolution.index('x'))])/2
###############
# Panels
###############
self.pann = tk.PanedWindow(self.tab_frame, handlesize=10, showhandle=True, handlepad=12, sashwidth=3)
self.pann.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.image_frame = tk.Frame(self.pann)
self.image_frame.pack(expand=True, fill=tk.BOTH)
self.pann.add(self.image_frame, width=res, minsize=100)
self.panel_hist = tk.Frame(self.pann)
self.panel_hist.pack(expand=True, fill=tk.BOTH)
self.pann.add(self.panel_hist, width=res, minsize=100)
###############
# Class
###############
self.vision = Vision()
self.histogram = Histogram(self.panel_hist)
self.image_canvas = ImageFrame(self.image_frame)
def __len__(self):
return TabPicture.gallery.__len__()
def __del__(self):
TabPicture.gallery.pop(self.id, None)
def persist_tmp(self):
self.vision.cvImage.image = self.vision.cvImage_tmp.image
self.refresh()
def match(self, what):
'''
Determine if this note matches the filter text.
Return true if it matches, False otherwise.
Search is case sensitive and matches both name and id.
:param what:
:return:
'''
return what == self.id or what in self.name.get()
@staticmethod
def search(finder):
'''
Find vison object in visions list
:param finder:
:return:
'''
return [TabPicture.gallery[tab] for tab in TabPicture.gallery.keys()
if TabPicture.gallery[tab].match(finder)]
def __contains__(self, item):
"""
Implement Container abstract method to check if object is in our list.
:param item:
:return:
"""
return len(self.search(item)) > 0
def open_image(self, path):
'''
Save copy of opened image for further usage.
:param path: image path
:return:
'''
if len(path) > 0:
self.vision.open_image(path)
else:
self.main_window.status_message.set("nie podano pliku")
def show_hist(self):
"""
Wyswietlenie histogramu dla danego okna. zachowanie Mathplota
zostawic . wyswietlanie dodatkowych ekranow z wykozystaniem tego
:return:
"""
# wyczyszczenie grafu przed zaladowaniem kolejnego , jak zaladowac kilka instancji do kilku obrazkow ?
plt.hist(self.vision.cvImage.image.ravel(), 256, [0, 255])
plt.show()
def refresh(self):
self.histogram(image=self.vision.cvImage.image)
self.set_panel_img()
def set_panel_img(self):
self.image_canvas(self.vision.cvImage)
def popup_image(self):
plt.imshow(self.tkImage, cmap='Greys', interpolation='bicubic')
plt.show()
| apache-2.0 |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/user_interfaces/gtk_spreadsheet_sgskip.py | 1 | 2493 | """
===============
GTK Spreadsheet
===============
Example of embedding matplotlib in an application and interacting with
a treeview to store data. Double click on an entry to update plot
data
"""
import pygtk
pygtk.require('2.0')
import gtk
from gtk import gdk
import matplotlib
matplotlib.use('GTKAgg') # or 'GTK'
from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from numpy.random import random
from matplotlib.figure import Figure
class DataManager(gtk.Window):
numRows, numCols = 20, 10
data = random((numRows, numCols))
def __init__(self):
gtk.Window.__init__(self)
self.set_default_size(600, 600)
self.connect('destroy', lambda win: gtk.main_quit())
self.set_title('GtkListStore demo')
self.set_border_width(8)
vbox = gtk.VBox(False, 8)
self.add(vbox)
label = gtk.Label('Double click a row to plot the data')
vbox.pack_start(label, False, False)
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_NEVER,
gtk.POLICY_AUTOMATIC)
vbox.pack_start(sw, True, True)
model = self.create_model()
self.treeview = gtk.TreeView(model)
self.treeview.set_rules_hint(True)
# matplotlib stuff
fig = Figure(figsize=(6, 4))
self.canvas = FigureCanvas(fig) # a gtk.DrawingArea
vbox.pack_start(self.canvas, True, True)
ax = fig.add_subplot(111)
self.line, = ax.plot(self.data[0, :], 'go') # plot the first row
self.treeview.connect('row-activated', self.plot_row)
sw.add(self.treeview)
self.add_columns()
self.add_events(gdk.BUTTON_PRESS_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK)
def plot_row(self, treeview, path, view_column):
ind, = path # get the index into data
points = self.data[ind, :]
self.line.set_ydata(points)
self.canvas.draw()
def add_columns(self):
for i in range(self.numCols):
column = gtk.TreeViewColumn('%d' % i, gtk.CellRendererText(), text=i)
self.treeview.append_column(column)
def create_model(self):
types = [float]*self.numCols
store = gtk.ListStore(*types)
for row in self.data:
store.append(row)
return store
manager = DataManager()
manager.show_all()
gtk.main()
| mit |
karthikvadla16/spark-tk | regression-tests/sparktkregtests/testcases/dicom/dicom_filter_keyword_test.py | 13 | 9391 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests dicom.filter functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import os
import dicom
import numpy
import random
from lxml import etree
class DicomFilterKeywordsTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(DicomFilterKeywordsTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = "../../../datasets/dicom/dicom_uncompressed/xml/"
self.image_directory = "../../../datasets/dicom/dicom_uncompressed/imagedata/"
self.query = ".//DicomAttribute[@keyword='KEYWORD']/Value/text()"
def test_filter_one_column_one_result_basic(self):
"""test filter with one unique key"""
# get the pandas frame for ease of access
metadata = self.dicom.metadata.to_pandas()
# grab a random row and extract the SOPInstanceUID from that record
random_row_index = random.randint(0, self.dicom.metadata.count() - 1)
random_row = metadata["metadata"][random_row_index]
xml_data = etree.fromstring(random_row.encode("ascii", "ignore"))
random_row_sopi_id = xml_data.xpath(self.query.replace("KEYWORD", "SOPInstanceUID"))[0]
# get all of the records with our randomly selected sopinstanceuid
# since sopinstanceuid is supposed to be unique for each record
# we should only get back the record which we randomly selected above
self.dicom.filter_by_keywords({"SOPInstanceUID" : random_row_sopi_id })
# check that our result is correct
# we should have gotten back from filter the row
# which we randomly selected
self.assertEqual(self.dicom.metadata.count(), 1)
pandas = self.dicom.metadata.to_pandas()["metadata"]
record = pandas[0]
self.assertEqual(str(random_row), str(record))
def test_filter_one_col_multi_result_basic(self):
"""test filter by keyword with one keyword mult record result"""
# get pandas frame for ease of access
metadata = self.dicom.metadata.to_pandas()
# grab a random row and extract the patient id
first_row = metadata["metadata"][0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
first_row_patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# filter the records ourselves to get the expected result
expected_result = self._filter({"PatientID" : first_row_patient_id })
# get all of the records with that patient id
self.dicom.filter_by_keywords({"PatientID" : first_row_patient_id })
# get the pandas frame for ease of access
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
# ensure that our expected result matches what dicom returned
self.assertEqual(len(expected_result), self.dicom.metadata.count())
for record, filtered_record in zip(expected_result, pandas_result):
self.assertEqual(record, filtered_record.encode("ascii", "ignore"))
def test_filter_multiple_columns_basic(self):
"""test filter with multiple key vals"""
# first we will generate a filter randomly by
# randomly selecting a row and extracting values that we want to use
keyword_filter = {}
metadata = self.dicom.metadata.to_pandas()["metadata"]
first_row = metadata[0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
first_row_patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
first_row_body_part = xml_data.xpath(self.query.replace("KEYWORD", "BodyPartExamined"))[0]
keyword_filter["PatientID"] = first_row_patient_id
keyword_filter["BodyPartExamined"] = first_row_body_part
# now we generate our expected result by filtering ourselves
matching_records = self._filter(keyword_filter)
# get the records which match our filter
self.dicom.filter_by_keywords(keyword_filter)
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
# finally we check to ensure that dicom's result matches our expected result
self.assertEqual(len(matching_records), self.dicom.metadata.count())
for expected_record, actual_record in zip(matching_records, pandas_result):
ascii_actual_result = actual_record.encode("ascii", "ignore")
self.assertEqual(ascii_actual_result, expected_record)
def test_filter_invalid_column(self):
"""test filter invalid key"""
self.dicom.filter_by_keywords({ "invalid keyword" : "value" })
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_multiple_invalid_columns(self):
"""test filter mult invalid keys"""
self.dicom.filter_by_keywords({ "invalid" : "bla", "another_invalid_col" : "bla" })
self.assertEqual(0, self.dicom.metadata.count())
def test_valid_keyword_zero_results(self):
"""test filter with key-value pair, key exists but no matches"""
self.dicom.filter_by_keywords({ "SOPInstanceUID" : "2" })
self.assertEqual(0, self.dicom.metadata.count())
def test_invalid_value_type(self):
"""test filter with key-value pair, key exists but value is not type of str"""
with self.assertRaisesRegexp(TypeError, "both keyword and value should be of <type 'str'>"):
self.dicom.filter_by_keywords({"SOPInstanceUID" : 2})
def test_filter_invalid_valid_col_mix(self):
"""test filter with mix of valid and invalid keys"""
# first we get a valid patient id by selecting the first row
# and extracting its patient id
first_row = self.dicom.metadata.to_pandas()["metadata"][0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
# now we ask dicom to filter using a filter which is a mix of a valid key-value
# pair and an invalid key-value pair
self.dicom.filter_by_keywords({ "PatientID" : patient_id, "Invalid" : "bla" })
# since there are no records which meet BOTH key value criterias
# we assert that 0 records were returned
self.assertEqual(0, self.dicom.metadata.count())
def test_filter_invalid_type(self):
"""test filter invalid param type"""
with self.assertRaisesRegexp(Exception, "incomplete format"):
self.dicom.filter_by_keywords(1)
self.dicom.metadata.count()
def test_filter_unicode_columns(self):
"""test filter by keyword with unicode keys"""
# the logic is the same as test_filter_one_column above
# the only difference is here we are giving the keys as unicode
# strings instead of standard python strings
metadata = self.dicom.metadata.to_pandas()
first_row = metadata["metadata"][0]
xml_data = etree.fromstring(first_row.encode("ascii", "ignore"))
first_row_patient_id = xml_data.xpath(self.query.replace("KEYWORD", "PatientID"))[0]
expected_result = self._filter({ "PatientID" : first_row_patient_id })
self.dicom.filter_by_keywords({ u'PatientID' : first_row_patient_id })
pandas_result = self.dicom.metadata.to_pandas()["metadata"]
self.assertEqual(len(expected_result), self.dicom.metadata.count())
for record, filtered_record in zip(expected_result, pandas_result):
self.assertEqual(record, filtered_record.encode("ascii", "ignore"))
def _filter(self, keywords):
"""generate our expected result by filtering the records"""
# here we are generating the expected result from the key-value
# filter so that we can compare it to what dicom returns
# we will iterate through the dicom metadata to get all of the
# records which match our key-value criteria
matching_records = []
pandas_metadata = self.dicom.metadata.to_pandas()["metadata"]
for row in pandas_metadata:
ascii_xml = row.encode("ascii", "ignore")
xml = etree.fromstring(row.encode("ascii", "ignore"))
for keyword in keywords:
this_row_keyword_value = xml.xpath(self.query.replace("KEYWORD", keyword))[0]
if this_row_keyword_value == keywords[keyword]:
if ascii_xml not in matching_records:
matching_records.append(ascii_xml)
return matching_records
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
YihaoLu/statsmodels | statsmodels/examples/ex_kernel_test_functional.py | 34 | 2246 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 08 19:03:20 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
from statsmodels.regression.linear_model import OLS
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
seed = np.random.randint(999999)
#seed = 661176
print(seed)
np.random.seed(seed)
sig_e = 0.5 #0.1
nobs, k_vars = 200, 1
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
x.sort()
order = 3
exog = x**np.arange(order + 1)
beta = np.array([1, 1, 0.1, 0.0])[:order+1] # 1. / np.arange(1, order + 2)
y_true = np.dot(exog, beta)
y = y_true + sig_e * np.random.normal(size=nobs)
endog = y
print('DGP')
print('nobs=%d, beta=%r, sig_e=%3.1f' % (nobs, beta, sig_e))
mod_ols = OLS(endog, exog[:,:2])
res_ols = mod_ols.fit()
#'cv_ls'[1000, 0.5][0.01, 0.45]
tst = smke.TestFForm(endog, exog[:,:2], bw=[0.01, 0.45], var_type='cc',
fform=lambda x,p: mod_ols.predict(p,x),
estimator=lambda y,x: OLS(y,x).fit().params,
nboot=1000)
print('bw', tst.bw)
print('tst.test_stat', tst.test_stat)
print(tst.sig)
print('tst.boots_results mean, min, max', (tst.boots_results.mean(),
tst.boots_results.min(),
tst.boots_results.max()))
print('lower tail bootstrap p-value', (tst.boots_results < tst.test_stat).mean())
print('upper tail bootstrap p-value', (tst.boots_results >= tst.test_stat).mean())
from scipy import stats
print('aymp.normal p-value (2-sided)', stats.norm.sf(np.abs(tst.test_stat))*2)
print('aymp.normal p-value (upper)', stats.norm.sf(tst.test_stat))
do_plot=True
if do_plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.')
plt.plot(x, res_ols.fittedvalues)
plt.title('OLS fit')
plt.figure()
plt.hist(tst.boots_results.ravel(), bins=20)
plt.title('bootstrap histogram or test statistic')
plt.show()
| bsd-3-clause |
pcournut/deep-learning-for-combinatorial-optimization | Solvers/TSP/Concorde/dataset.py | 1 | 3425 | import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.spatial.distance import pdist, squareform
from concorde import Solver
# Data Generator
class DataGenerator(object):
def __init__(self,solver):
"""Construct a DataGenerator."""
self.solver=solver
def solve_batch(self, coord_batch):
trip_batch = []
for sequence in coord_batch:
# Calculate optimal_tour_length
trip=[self.solver.run(sequence)]
trip_batch.append(trip)
return trip_batch
def next_batch(self, batch_size, max_length, dimension, seed=0):
"""Return the next batch of the data"""
if seed!=0:
np.random.seed(seed)
coord_batch = []
for _ in range(batch_size):
# Randomly generate data
sequence = np.random.rand(max_length, dimension).astype(np.float32) # (max_length) random numbers with (dimension) coordinates in [0,1]
# Store batch
coord_batch.append(sequence)
return coord_batch
def single_shuffled_batch(self, batch_size, max_length, dimension, seed=42):
if seed!=0:
np.random.seed(seed)
# Single sequence of coordinates
seq = np.random.rand(max_length, dimension).astype(np.float32) # (max_length) random numbers with (dimension) coordinates in [0,1]
coord_batch = []
# Randomly shuffle seq
for _ in range(batch_size):
# Shuffle sequence
sequence = np.copy(seq) ##########################
np.random.shuffle(sequence) ##########################
# Store batch
coord_batch.append(sequence )
return coord_batch
def shuffle_batch(self, coord_batch):
coord_batch_ = []
# Randomly shuffle seq
for seq in coord_batch:
# Shuffle sequence
sequence = np.copy(seq) ##########################
np.random.shuffle(sequence) ##########################
# Store batch
coord_batch_.append(sequence)
return coord_batch_
def visualize_2D_trip(self,trip):
# plot 2D graph
plt.scatter(trip[:,0], trip[:,1])
labels=np.array(list(range(len(trip))) + [0])
X = trip[labels, 0]
Y = trip[labels, 1]
plt.plot(X, Y)
for i, (x, y) in zip(labels,(zip(X,Y))):
plt.annotate(i,xy=(x, y))
plt.xlim(0,1)
plt.ylim(0,1)
plt.show()
def indexes_to_coordinates(self,coord_batch,trip_indexes):
trip = [coord_batch[i] for (i,j,k) in trip_indexes]
return np.asarray(trip)
if __name__ == "__main__":
# Config
batch_size=16
max_length=10
dimension=2
# Create Solver and Data Generator
solver = Solver(max_length)
dataset = DataGenerator(solver)
# Next batch
#coord_batch = dataset.next_batch(batch_size, max_length, dimension,seed=0)
coord_batch = dataset.single_shuffled_batch(batch_size, max_length, dimension,seed=75)
# Some nice print
print 'Coordinates: \n',coord_batch[0]
# Solve to optimality
trip_batch = dataset.solve_batch(coord_batch)
trip_coord = np.asarray(trip_batch[0][0])
print 'Optimal tour : \n',trip_coord
# 2D plot for coord batch
dataset.visualize_2D_trip(coord_batch[0])
dataset.visualize_2D_trip(trip_coord) | mit |
stevetjoa/musicsearch | mirgui.py | 1 | 40317 |
import os
import pprint
import random
import wx
import numpy
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import mir2
import mir
from matplotlib.colors import LogNorm
import main
class mirgui(wx.Frame):
""" The main frame of the application
"""
title = 'Music Search Application'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.data = [5, 6, 9, 14]
self.create_menu()
self.create_status_bar()
self.create_main_panel()
#self.textbox.SetValue(' '.join(map(str, self.data)))
self.draw_figure()
print 'Training.'
self.musicsearch = main.Search(8, 32)
for f in os.listdir('train'):
print f
x, fs, enc = mir.wavread('train/'+f)
self.musicsearch.add(x, fs, f)
print 'Done training.'
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_browse = menu_file.Append(-1, "&Import *.wav file...", "Shows a File Dialog")
self.Bind(wx.EVT_MENU, self.openfile, m_browse)
m_key = menu_file.Append(-1, "&Estimate Key...", "Estimates Key of the Entire wav file")
self.Bind(wx.EVT_MENU, self.est_key, m_key)
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
menu_edit = wx.Menu()
m_reset = menu_edit.Append(-1, "&Reset Parameters...", "Resets plot parameters to Default Values")
self.Bind(wx.EVT_MENU, self.on_reset, m_reset)
m_lognorm = menu_edit.AppendCheckItem(-1, "Log-Norm", "Plot gram values using Log Normalized spectrum")
self.Bind(wx.EVT_MENU, self.on_log_norm, m_lognorm)
m_WC1 = menu_edit.Append(-1, 'Adjust Input Plot', kind=wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU,self.which_canvas1, m_WC1)
m_WC2 = menu_edit.Append(-1, 'Adjust Output Plot', kind=wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU,self.which_canvas2, m_WC2)
menu_help = wx.Menu()
m_about = menu_help.Append(-1, "&About\tF1", "About the demo")
self.Bind(wx.EVT_MENU, self.on_about, m_about)
self.menubar.Append(menu_file, "&File")
self.menubar.Append(menu_edit, "&Edit")
self.menubar.Append(menu_help, "&Help")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Creates the main panel with all the controls on it:
* mpl canvas
* mpl navigation toolbar
* Control panel for interaction
"""
self.panel = wx.Panel(self)
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((3.0, 3.0), dpi=self.dpi)
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.canvas2= FigCanvas(self.panel, -1, self.fig)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
self.drawbutton = wx.Button(self.panel, -1, "Plot Gram")
self.Bind(wx.EVT_BUTTON, self.on_draw_button, self.drawbutton)
self.plot_select = ['Time Domain Signal', 'Spectrogram','Constant Q Spectrogram', 'Chromagram']
self.combo = wx.ComboBox(self.panel, -1, pos = (0,400), choices = self.plot_select, style=wx.ALIGN_LEFT | wx.CB_READONLY)
self.combo.SetSelection(2)
self.setbutton = wx.Button(self.panel, -1, "Set Parameters")
self.Bind(wx.EVT_BUTTON, self.on_set_button, self.setbutton)
self.record = wx.BitmapButton(self.panel, -1, wx.Bitmap('record.png'))
self.Bind(wx.EVT_BUTTON, self.on_rec, self.record)
self.play = wx.BitmapButton(self.panel, -1, wx.Bitmap('play.png'))
self.Bind(wx.EVT_BUTTON, self.on_play, self.play)
self.stop = wx.BitmapButton(self.panel, -1, wx.Bitmap('stop.png'))
self.searchbutton = wx.Button(self.panel, -1, "Search Database")
self.Bind(wx.EVT_BUTTON, self.search, self.searchbutton)
self.searchbutton1 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name="1) Sonata in A Maj., Beethoven")
self.searchbutton2 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton3 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton4 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.searchbutton5 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton6 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton7 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.searchbutton8 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton9 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton10 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.Sbuttonlist = [self.searchbutton1,self.searchbutton2,
self.searchbutton3,self.searchbutton4,
self.searchbutton5,self.searchbutton6,
self.searchbutton7,self.searchbutton8,
self.searchbutton9,self.searchbutton10]
self.Bind(wx.EVT_BUTTON, self.getmeta1, self.searchbutton1)
self.Bind(wx.EVT_BUTTON, self.getmeta2, self.searchbutton2)
self.Bind(wx.EVT_BUTTON, self.getmeta3, self.searchbutton3)
self.Bind(wx.EVT_BUTTON, self.getmeta4, self.searchbutton4)
self.Bind(wx.EVT_BUTTON, self.getmeta5, self.searchbutton5)
self.Bind(wx.EVT_BUTTON, self.getmeta6, self.searchbutton6)
self.Bind(wx.EVT_BUTTON, self.getmeta7, self.searchbutton7)
self.Bind(wx.EVT_BUTTON, self.getmeta8, self.searchbutton8)
self.Bind(wx.EVT_BUTTON, self.getmeta9, self.searchbutton9)
self.Bind(wx.EVT_BUTTON, self.getmeta10, self.searchbutton10)
#self.plt_titlestr = ''
#self.plot_title = wx.StaticText(self.panel, -1, 'text1',(30,15), style=wx.ALIGN_CENTRE)
# Create the navigation toolbar, tied to the canvas
#
self.toolbar = NavigationToolbar(self.canvas)
#
# Layout with box sizers
#
flags = wx.ALIGN_LEFT | wx.ALL | wx.GROW
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.vbox2 = wx.BoxSizer(wx.VERTICAL)
self.vbox3 = wx.BoxSizer(wx.VERTICAL)
self.vbox2.AddStretchSpacer(1)
self.vbox2.Add(self.searchbutton1, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton2, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton3, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton4, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton5, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton6, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton7, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton8, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton9, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton10, 0, border=3, flag=flags)
self.vbox2.AddStretchSpacer(1)
self.vbox3.Add(self.canvas, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.vbox3.Add(self.canvas2, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.hbox2.Add(self.vbox2, 0, wx.LEFT | wx.TOP | wx.ALIGN_LEFT| wx.GROW)
#self.panel.SetSizer(self.vbox)
#self.vbox.Fit(self)
self.hbox2.Add(self.vbox3, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.vbox.Add(self.hbox2, 0, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.AddSpacer(7)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.AddSpacer(15)
self.hbox.Add(self.combo, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.setbutton, 0, border = 3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.drawbutton, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.play, 0, flag = flags)
self.hbox.Add(self.stop, 0, flag = flags)
self.hbox.Add(self.record, 0, flag = flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.searchbutton, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.vbox.Add(self.hbox, 0, flag = wx.ALIGN_LEFT | wx.BOTTOM | wx.EXPAND |wx.GROW)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.mypath = None
self.fsz = 0.040
self.hop = 0.020
self.fmax = 44100
self.x, self.fs, self.nbits = mir2.wavread('default.wav')
#self.tmax = round(float(len(self.x))/self.fs,2)
self.rectime = 20
self.tmax = self.rectime
self.tmin = 0
self.LG_flag = 0
self.LG_str = None
self.LG_vmin = 25
self.LG_vmax = 50
self.tmin_samp = None
self.tmax_samp = None
self.WC = 1
#self.rec_input = mir2.wavread('default.wav')#None
self.rec_input = None
self.rankresults = [('Beethoven_vln_sonata5_Francescatti_01.wav',1),('adksfjghl',3)]
self.dict = {'Beethoven_vln_sonata5_Zukerman_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Oistrakh_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Francescatti_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Bach Vln Partita3 - Fischbach 2004 - 01.wav':
('Partita No. 3 - Preludio', 'J. S. Bach', 'E Major', 'Violin', 'Garrett Fischbach'),
'Bach Vln Partita3 - Fischbach 2004 - 03.wav':
('Partita No. 3 - Gavotte en Rondeau', 'J. S. Bach', 'E Major', 'Violin', 'Garrett Fischbach'),
'Bach Vln Sonata1 - Fischbach 2004 - 02.wav':
('Sonata No. 1 - Fuga', 'J. S. Bach', 'G minor', 'Violin', 'Garrett Fischbach'),
'Bach Vln Partita3 - Milstein 1955 - 01.wav':
('Partita No. 3 - Preludio', 'J. S. Bach', 'E Major', 'Violin', 'Nathan Milstein'),
'Bach Vln Partita3 - Milstein 1955 - 03.wav':
('Partita No. 3 - Gavotte en Rondeau', 'J. S. Bach', 'E Major', 'Violin', 'Nathan Milstein'),
'Bach Vln Sonata1 - Milstein 1954 - 02.wav':
('Sonata No. 1 - Fuga', 'J. S. Bach', 'G minor', 'Violin', 'Nathan Milstein'),
'brahms_rhapsody_01.wav':
('Brahms Rhapsody Op.79, No.2', 'J. Brahms','G minor','Piano','Lili Kraus'),
'brahms_rhapsody_02.wav':
('Brahms Rhapsody Op.79, No.2', 'J. Brahms','G minor','Piano','Martha Argerich'),
'debussy_toccata.wav':
('Debussy Toccata from Pour le Piano', 'C. Debussy','N/A','Piano','Boris Feiner'),
'dont_stop_believin.wav':
('Don\'t Stop Believin\'', 'Journey','E major','Vocal, Guitar, Bass, Piano, Drums','Journey'),
'lady_madonna.wav':
('Lady Madonna', 'The Beatles','E major','Vocal, Guitar, Bass, Piano, Saxophone, Drums','The Beatles'),
'let_it_be.wav':
('Let it Be', 'The Beatles','C major','Vocal, Guitar, Bass, Piano, Drums','The Beatles'),
'moonlight.wav':
('Beethoven Piano Sonata No.14', 'L. Beethoven','C# minor','Piano','Unknown'),
'office_theme.wav':
('Theme from \'The Office\'', 'Unknown','G Major','Piano','Unknown'),
'konstantine.wav':
('Konstantine', 'Andrew McMahon','D minor','Vocal, Piano','Something Corporate'),
}
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def draw_figure(self, i=0):
""" Redraws the figure
"""
if self.rec_input is None:
return
if self.mypath is None:
self.mypath = 'default.wav'
#self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
if self.WC == 2:
path = 'train/'
filename = self.rankresults[i][0]
fullpath = path + filename
self.x, self.fs, self.nbits = mir2.wavread(fullpath)
if self.WC == 1:
self.x = self.rec_input
#self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
print 'storing rec_input'
self.get_plot_type()
G = 0
self.tmax = float(len(self.x))/self.fs
self.tmin_samp = int(self.tmin*self.fs)
self.tmax_samp = int(self.tmax*self.fs)
if self.tmax_samp > len(self.x):
self.tmax_samp = len(self.x) - 1
print self.x.shape, self.fs, self.fsz, self.hop
if self.plot_type == 0:
P = self.x[self.tmin_samp:self.tmax_samp]
elif self.plot_type == 1:
G = mir2.spectrogram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
elif self.plot_type == 2:
G = mir2.qspectrogram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
elif self.plot_type == 3:
G = mir2.chromagram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
#self.plot_titlestr = self.mypath + gramtype
self.axes.clear()
if self.plot_type == 0:
self.axes.plot(P)
elif self.plot_type == 1 or 2 or 3:
if self.LG_flag == 0:
self.LG_str = None
self.axes.imshow(G.X, aspect='auto', interpolation ='nearest',origin='lower')
elif self.LG_flag == 1:
self.LG_str = 'LogNorm(vmin = 25, vmax = 50)'
self.axes.imshow(G.X, aspect='auto', interpolation ='nearest',origin='lower', norm = LogNorm()) #vmin = self.LG_vmin, vmax = self.LG_vmax))
#self.WC = 1
if self.WC == 1:
self.canvas.draw()
if self.WC == 2:
self.canvas2.draw()
def which_canvas1(self, event):
self.WC = 1
def which_canvas2(self, event):
self.WC = 2
def on_draw_button(self, event):
self.get_plot_type
self.draw_figure()
def search(self, event):
self.ranklist = ['1) ','2) ','3) ','4) ','5) ','6) ','7) ','8) ','9) ','10) ']
self.titlelist = ['Sonata', 'Polonaise in G Min., Chopin',
'Rondo No. 5 in C# Min., Bartok', 'Sonata in A Maj., Beethoven',
'Polonaise in G Min., Chopin', 'Rondo No. 5 in C# Min., Bartok',
'Sonata in A Maj., Beethoven', 'Polonaise in G Min., Chopin',
'Rondo No. 5 in C# Min., Bartok','Rondo No. 5 in C# Min., Bartok']
self.rankresults = [('Beethoven_vln_sonata5_Francescatti_01.wav',1),('adksfjghl',3)]
print self.rec_input.shape, self.fs
for i in range(10):
self.Sbuttonlist[i].SetLabel('')
self.rankresults = self.musicsearch.query(self.rec_input, self.fs)
print self.rankresults
self.metalist = range(len(self.rankresults))
for i in range(len(self.rankresults)):
self.metalist[i] = self.dict[self.rankresults[i][0]]
for i in range(min(10, len(self.metalist))):
self.Sbuttonlist[i].SetLabel(self.ranklist[i] + self.metalist[i][0])
#self.create_main_panel()
self.WC = 2
#self.getmeta1(None)
def on_set_button(self, event):
self.get_plot_type()
params_box = ParamsDialog(self, -1, '', self.fsz, self.hop, self.tmin, self.tmax, self.plot_type)
val = params_box.ShowModal()
self.fsz, self.hop, self.tmin, self.tmax = params_box.return_params()
self.draw_figure()
params_box.Destroy()
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
dlg = wx.MessageDialog(
self,
msg,
"Click!",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def on_text_enter(self, event):
self.draw_figure()
def openfile(self, event):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.wav", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
basename = os.path.basename(path)
self.SetStatusText("You selected: %s" % basename)
self.mypath = path
self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
self.rec_input = self.x
self.WC = 1
self.on_reset(self)
self.draw_figure()
dlg.Destroy()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_play(self,event):
if self.WC == 2:
mir2.play(self.x, self.fs)
elif self.WC == 1:
mir2.play(self.rec_input, self.fs)
def on_rec(self,event):
print 'Recording.'
self.rec_input = mir.micread(self.rectime)
self.WC = 1
self.draw_figure()
mir.play(self.rec_input, 44100)
def est_key(self, event):
self.statusbar.SetStatusText('Estimating Key...')
keynum = mir2.Key(self.x, self.fs)
keylist = ['C', 'C#','D','D#','E','F','F#','G','G#','A','A#','B']
self.keystr = keylist[keynum]
self.statusbar.SetStatusText('The Key is: ' + self.keystr)
def on_exit(self, event):
self.Destroy()
def on_reset(self, event):
self.fsz = 0.040
self.hop = 0.020
self.fmax = self.fs
self.tmax = round(float(len(self.x))/self.fs,2)
self.tmin = 0
self.draw_figure()
def on_log_norm(self, event):
if self.LG_flag == 0:
self.LG_flag = 1
elif self.LG_flag == 1:
self.LG_flag = 0
self.draw_figure()
def on_about(self, event):
msg = """ Content-based musical search.\n Brennan Keegan, Steve Tjoa\n Signals and Information Group\n University of Maryland\n April 30, 2011 """
dlg = wx.MessageDialog(self, msg, "About", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
def get_plot_type(self):
plotstr = self.combo.GetStringSelection()
for x in range(len(self.plot_select)):
if plotstr == self.plot_select[x]:
self.plot_type = x
def getmeta1(self, event):
if self.searchbutton1.GetLabel() == '':
return
self.draw_figure(0)
meta = self.metalist[0]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta2(self, event):
if self.searchbutton2.GetLabel() == '':
return
self.draw_figure(1)
meta = self.metalist[1]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta3(self, event):
if self.searchbutton3.GetLabel() == '':
return
self.draw_figure(2)
meta = self.metalist[2]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta4(self, event):
if self.searchbutton4.GetLabel() == '':
return
self.draw_figure(3)
meta = self.metalist[3]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta5(self, event):
if self.searchbutton5.GetLabel() == '':
return
self.draw_figure(4)
meta = self.metalist[4]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta6(self, event):
if self.searchbutton6.GetLabel() == '':
return
self.draw_figure(5)
meta = self.metalist[5]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta7(self, event):
if self.searchbutton7.GetLabel() == '':
return
self.draw_figure(6)
meta = self.metalist[6]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta8(self, event):
if self.searchbutton8.GetLabel() == '':
return
self.draw_figure(7)
meta = self.metalist[7]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta9(self, event):
if self.searchbutton9.GetLabel() == '':
return
self.draw_figure(8)
meta = self.metalist[8]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta10(self, event):
if self.searchbutton10.GetLabel() == '':
return
self.draw_figure(9)
meta = self.metalist[9]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
class ParamsDialog(wx.Dialog):
def __init__(self, parent, id, title, fsz, hop, tmin, tmax, plot_type):
wx.Dialog.__init__(self, parent, id, title)#, size = (400,500))
self.fsz, self.hop, self.tmin, self.tmax, self.plot_type = str(fsz), str(hop), str(tmin), str(tmax), plot_type
if self.plot_type == 0:
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.AddSpacer(3)
hbox1.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(9)
hbox2.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.Add(hbox2, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 1:
self.fmin, self.fmax = '0.00', '44100'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Freq. (Hz): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Freq. (Hz): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 2:
self.fmin, self.fmax = '0', '136'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Pitch (MIDI): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Pitch (MIDI): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 3:
self.fmin, self.fmax = 'C', 'B'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Pitch (Note): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Pitch (Note): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
def OnOK(self, event):
if self.plot_type != 0:
self.fsz = float(self.fsz_box.GetValue())
self.hop = float(self.hop_box.GetValue())
self.tmin =float(self.tmin_box.GetValue())
self.tmax =float(self.tmax_box.GetValue())
self.Close()
def return_params(self):
return self.fsz, self.hop, self.tmin, self.tmax
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = mirgui()
app.frame.Show()
app.frame.Maximize()
app.MainLoop()
| mit |
lanselin/pysal | pysal/esda/tests/test_getisord.py | 6 | 2987 | import unittest
import numpy as np
from .. import getisord
from ...weights.Distance import DistanceBand
from ...common import pandas
POINTS = [(10, 10), (20, 10), (40, 10), (15, 20), (30, 20), (30, 30)]
W = DistanceBand(POINTS, threshold=15)
Y = np.array([2, 3, 3.2, 5, 8, 7])
PANDAS_EXTINCT = pandas is None
class G_Tester(unittest.TestCase):
def setUp(self):
self.w = W
self.y = Y
np.random.seed(10)
def test_G(self):
g = getisord.G(self.y, self.w)
self.assertAlmostEquals(g.G, 0.55709779, places=8)
self.assertAlmostEquals(g.p_norm, 0.1729, places=4)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
np.random.seed(12345)
r1 = getisord.G.by_col(df, ['y'], w=self.w)
this_getisord = np.unique(r1.y_g.values)
this_pval = np.unique(r1.y_p_sim.values)
np.random.seed(12345)
stat = getisord.G(self.y, self.w)
self.assertAlmostEquals(this_getisord, stat._statistic)
self.assertAlmostEquals(this_pval, stat.p_sim)
class G_Local_Tester(unittest.TestCase):
def setUp(self):
self.w = W
self.y = Y
np.random.seed(10)
def test_G_Local_Binary(self):
lg = getisord.G_Local(self.y, self.w, transform='B')
self.assertAlmostEquals(lg.Zs[0], -1.0136729, places=7)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_Local_Row_Standardized(self):
lg = getisord.G_Local(self.y, self.w, transform='R')
self.assertAlmostEquals(lg.Zs[0], -0.62074534, places=7)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_star_Local_Binary(self):
lg = getisord.G_Local(self.y, self.w, transform='B', star=True)
self.assertAlmostEquals(lg.Zs[0], -1.39727626, places=8)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
def test_G_star_Row_Standardized(self):
lg = getisord.G_Local(self.y, self.w, transform='R', star=True)
self.assertAlmostEquals(lg.Zs[0], -0.62488094, places=8)
self.assertAlmostEquals(lg.p_sim[0], 0.10100000000000001, places=7)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
np.random.seed(12345)
r1 = getisord.G_Local.by_col(df, ['y'], w=self.w)
np.random.seed(12345)
stat = getisord.G_Local(self.y, self.w)
np.testing.assert_allclose(r1.y_g_local.values, stat.Gs)
np.testing.assert_allclose(r1.y_p_sim, stat.p_sim)
suite = unittest.TestSuite()
test_classes = [G_Tester, G_Local_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
moreati/pandashells | pandashells/test/p_facet_grid_test.py | 10 | 1623 | #! /usr/bin/env python
from mock import patch
from unittest import TestCase
import pandas as pd
from pandashells.bin.p_facet_grid import main
class MainTests(TestCase):
@patch(
'pandashells.bin.p_facet_grid.sys.argv',
'p.facet_grid --row c --map pl.plot --args a b'.split())
@patch('pandashells.bin.p_facet_grid.io_lib.df_from_input')
@patch('pandashells.bin.p_facet_grid.plot_lib.show')
def test_no_kwargs(self, show_mock, input_mock):
import pylab as pl
df_in = pd.DataFrame([
{'a': 1, 'b': 10, 'c': 'alpha'},
{'a': 2, 'b': 20, 'c': 'alpha'},
{'a': 3, 'b': 30, 'c': 'beta'},
{'a': 4, 'b': 40, 'c': 'beta'},
])
input_mock.return_value = df_in
main()
self.assertEqual(len(pl.gcf().axes), 2)
self.assertTrue(show_mock.called)
@patch(
'pandashells.bin.p_facet_grid.sys.argv',
(
'p.facet_grid --row c --map pl.scatter '
'--args a b --kwargs s=100'.split()
)
)
@patch('pandashells.bin.p_facet_grid.io_lib.df_from_input')
@patch('pandashells.bin.p_facet_grid.plot_lib.show')
def test_with_kwargs(self, show_mock, input_mock):
import pylab as pl
df_in = pd.DataFrame([
{'a': 1, 'b': 10, 'c': 'alpha'},
{'a': 2, 'b': 20, 'c': 'alpha'},
{'a': 3, 'b': 30, 'c': 'beta'},
{'a': 4, 'b': 40, 'c': 'beta'},
])
input_mock.return_value = df_in
main()
self.assertEqual(len(pl.gcf().axes), 2)
self.assertTrue(show_mock.called)
| bsd-2-clause |
foreversand/QSTK | Examples/Validation.py | 1 | 5524 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on February, 9, 2013
@author: Sourabh Bajaj
@contact: [email protected]
@summary: Python Validation Script
'''
# Printing what Python Version is installed : QSTK uses 2.7
import sys
import platform
print "Python Details : "
print sys.version
print "Your Python Version is : ", platform.python_version()
print "QSTK uses Python 2.7.X (2.7.3 recommended and supported)"
print "Please make sure you're using the correct python version."
print
# Printing the directory you are in
import os
print "Current Directory : ", os.path.abspath('.')
print
# Printing files in the current directory.
print "Files in the current directory"
ls_files = os.listdir('.')
for s_file in ls_files:
print s_file
print
# Testing the dependencies
# Testing numpy
try:
import numpy
print "Numpy is installed and the version used is : ", numpy.__version__
print "Please make sure you're using version >= 1.6.1"
except:
sys.exit("Error : Numpy can not be imported or not installed.")
print
# Testing matplotlib
try:
import matplotlib
print "Matplotlib is installed and version is : ", matplotlib.__version__
print "Please make sure you're using version >= 1.1.0"
except:
sys.exit("Error : Matplotlib can not be imported or not installed.")
print
# Testing Pandas
try:
import pandas
print "Pandas is installed and the version used is : ", pandas.__version__
print "Please make sure you're using version == 0.7.3"
print "IMPORTANT: No other pandas version is supported except 0.7.3"
s_pd_version = pandas.__version__
if s_pd_version[:5] != '0.7.3':
sys.exit("Error : Pandas version should be 0.7.3")
except:
print "Error : Please install Pandas 0.7.3"
sys.exit("Error : Pandas can not be imported or not installed.")
print
# Testing Scipy
try:
import scipy
print "Scipy is installed and the version used is : ", scipy.__version__
print "Please make sure you're using version >= 0.9.0"
except:
sys.exit("Error : Scipy can not be imported or not installed.")
print
# Testing Dateutil
try:
import dateutil
print "Dateutil is installed and the version used is : ", dateutil.__version__
print "Please make sure you're using version == 1.5"
except:
sys.exit("Error : Dateutil can not be imported or not installed.")
print
# Testing Setuptools
try:
import setuptools
print "Setuptools is installed and the version used is : ", setuptools.__version__
print "Please make sure you're using version >= 0.6"
except:
sys.exit("Error : Setuptools can not be imported or not installed.")
print
# # Testing CVXOPT
# try:
# import cvxopt
# print "CVXOPT is installed and can be imported"
# except:
# sys.exit("Error : CVXOPT can not be imported or not installed.")
# print
# Testing datetime
try:
import datetime as dt
print "datetime is installed and can be imported"
except:
sys.exit("Error : datetime can not be imported or not installed.")
print
# All dependencies are installed and working
print "All dependencies are installed and working\n"
# Testing import of QSTK
# Testing QSTK
try:
import QSTK
print "QSTK is installed and can be imported"
except:
sys.exit("Error : QSTK can not be imported or not installed.")
print
# Testing QSTK.qstkutil
try:
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.DataAccess as da
print "QSTK.qstkutil is installed and can be imported"
except:
exit("Error : QSTK.qstkutil can not be imported.")
print
# Testing QSTK.qstkstudy
try:
import QSTK.qstkstudy.EventProfiler
print "QSTK.qstkstudy is installed and can be imported"
except:
exit("Error : QSTK.qstkstudy can not be imported.")
print
# Checking that the data installed is correct.
# Start and End date of the charts
dt_start = dt.datetime(2012, 2, 10)
dt_end = dt.datetime(2012, 2, 24)
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
ls_symbols = ['MSFT', 'GOOG']
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
# Reading adjusted_close prices
df_close = c_dataobj.get_data(ldt_timestamps, ls_symbols, "close")
print df_close
print
print "\nCorrect Output using the Default Data should be : "
print "Assignments use this data for grading"
print " MSFT GOOG"
print "2012-02-10 16:00:00 29.90 605.91"
print "2012-02-13 16:00:00 29.98 612.20"
print "2012-02-14 16:00:00 29.86 609.76"
print "2012-02-15 16:00:00 29.66 605.56"
print "2012-02-16 16:00:00 30.88 606.52"
print "2012-02-17 16:00:00 30.84 604.64"
print "2012-02-21 16:00:00 31.03 614.00"
print "2012-02-22 16:00:00 30.86 607.94"
print "2012-02-23 16:00:00 30.96 606.11"
print
dt_test = dt.datetime(2012, 2, 15, 16)
print "Close price of MSFT on 2012/2/15 is : ", df_close['MSFT'].ix[dt_test]
if df_close['MSFT'].ix[dt_test] == 29.66:
print "Data looks correct as the close price in default data is 29.66"
else:
print "Default data used in the assisgnments has close price as 29.66"
sys.exit("Error : Data has changed so does not match data used in Assignments")
print
print "Everything works fine: You're all set."
| bsd-3-clause |
hvillanua/deep-learning | weight-initialization/helper.py | 153 | 3649 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
def hist_dist(title, distribution_tensor, hist_range=(-4, 4)):
"""
Display histogram of a TF distribution
"""
with tf.Session() as sess:
values = sess.run(distribution_tensor)
plt.title(title)
plt.hist(values, np.linspace(*hist_range, num=len(values)/2))
plt.show()
def _get_loss_acc(dataset, weights):
"""
Get losses and validation accuracy of example neural network
"""
batch_size = 128
epochs = 2
learning_rate = 0.001
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
learn_rate = tf.placeholder(tf.float32)
biases = [
tf.Variable(tf.zeros([256])),
tf.Variable(tf.zeros([128])),
tf.Variable(tf.zeros([dataset.train.labels.shape[1]]))
]
# Layers
layer_1 = tf.nn.relu(tf.matmul(features, weights[0]) + biases[0])
layer_2 = tf.nn.relu(tf.matmul(layer_1, weights[1]) + biases[1])
logits = tf.matmul(layer_2, weights[2]) + biases[2]
# Training loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Optimizer
optimizer = tf.train.AdamOptimizer(learn_rate).minimize(loss)
# Accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Measurements use for graphing loss
loss_batch = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
batch_count = int((dataset.train.num_examples / batch_size))
# The training cycle
for epoch_i in range(epochs):
for batch_i in range(batch_count):
batch_features, batch_labels = dataset.train.next_batch(batch_size)
# Run optimizer and get loss
session.run(
optimizer,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
l = session.run(
loss,
feed_dict={features: batch_features, labels: batch_labels, learn_rate: learning_rate})
loss_batch.append(l)
valid_acc = session.run(
accuracy,
feed_dict={features: dataset.validation.images, labels: dataset.validation.labels, learn_rate: 1.0})
# Hack to Reset batches
dataset.train._index_in_epoch = 0
dataset.train._epochs_completed = 0
return loss_batch, valid_acc
def compare_init_weights(
dataset,
title,
weight_init_list,
plot_n_batches=100):
"""
Plot loss and print stats of weights using an example neural network
"""
colors = ['r', 'b', 'g', 'c', 'y', 'k']
label_accs = []
label_loss = []
assert len(weight_init_list) <= len(colors), 'Too many inital weights to plot'
for i, (weights, label) in enumerate(weight_init_list):
loss, val_acc = _get_loss_acc(dataset, weights)
plt.plot(loss[:plot_n_batches], colors[i], label=label)
label_accs.append((label, val_acc))
label_loss.append((label, loss[-1]))
plt.title(title)
plt.xlabel('Batches')
plt.ylabel('Loss')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
print('After 858 Batches (2 Epochs):')
print('Validation Accuracy')
for label, val_acc in label_accs:
print(' {:7.3f}% -- {}'.format(val_acc*100, label))
print('Loss')
for label, loss in label_loss:
print(' {:7.3f} -- {}'.format(loss, label))
| mit |
acshu/transit-gui | lib/Layout.py | 1 | 51186 | # -*- coding: utf-8 -*-
from ConfigParser import ConfigParser
from ast import literal_eval
from copy import copy
from genericpath import exists
from math import atan, degrees, sin, sqrt, log10
import operator
import os
import sys
import csv
from PyQt4.QtCore import Qt, pyqtSignal, QString, QAbstractTableModel, QVariant, QEvent
from PyQt4.QtGui import QWidget, QHBoxLayout, QVBoxLayout, QPushButton, QProgressBar, QGridLayout, QLabel, QCheckBox, QFileDialog, QMessageBox, QTabWidget, QLineEdit, QPalette, QSizePolicy, QColor, QTableWidget, QAbstractItemView, QMenu, QTableWidgetItem, QTableView, QAction
import math
import gc
from matplotlib import rcParams
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from lib.Structures import Global, TaskRange
from lib.Utils import Constants, TaskImporter, flip_phase_list, uc_variable_name
from lib.FormParams import *
class Layout(QWidget):
def __init__(self):
super(Layout, self).__init__()
self.setLayout(QHBoxLayout())
self.form = InputForm()
self.result = ResultView()
self.setObjectName('Layout')
self.layout().addWidget(self.form)
self.layout().addWidget(self.result)
class InputForm(QWidget):
__instance = None
@staticmethod
def instance():
return InputForm.__instance
def __init__(self):
if not InputForm.__instance:
InputForm.__instance = self
super(InputForm, self).__init__()
Global.event.task_started.connect(self._on_task_started)
Global.event.task_completed.connect(self._on_task_completed)
Global.event.task_progressed.connect(self._on_task_progressed)
Global.event.task_range_progressed.connect(self._on_task_range_progressed)
Global.event.interface_load_task_params.connect(self._on_interface_load_task_params)
self.vl = QVBoxLayout()
self.vl.setContentsMargins(0,0,0,0)
self.setLayout(self.vl)
self.setFixedWidth(290)
self.tab = QTabWidget()
self.vl.addWidget(self.tab)
self.input_parameters = InputParameters()
self.input_parameters.ranges_state_changed.connect(self._on_ranges_state_changed)
self.tab.addTab(self.input_parameters, 'Parameters')
self.import_parameters = ImportParameters()
self.tab.addTab(self.import_parameters, 'Observation')
control_buttons = QWidget()
control_buttons.setLayout(QVBoxLayout())
control_buttons.layout().setContentsMargins(0, 0, 0, 0)
control_buttons.layout().setAlignment(Qt.AlignBottom)
self._progress = QProgressBar()
self._progress.setValue(0)
self._progress.setTextVisible(True)
self._progress.setAlignment(Qt.AlignCenter)
self._progress.hide()
control_buttons.layout().addWidget(self._progress)
self._range_progress = QProgressBar()
self._range_progress.setValue(0)
self._range_progress.setTextVisible(True)
self._range_progress.setAlignment(Qt.AlignCenter)
self._range_progress.hide()
control_buttons.layout().addWidget(self._range_progress)
self._calculate = QPushButton('Calculate')
self._calculate.clicked.connect(self._on_calculate)
control_buttons.layout().addWidget(self._calculate)
self._cancel = QPushButton('Cancel')
self._cancel.hide()
self._cancel.clicked.connect(self._on_cancel)
control_buttons.layout().addWidget(self._cancel)
self.vl.addWidget(control_buttons)
if exists("./config/last-session.ini") :
self.load_params("./config/last-session.ini")
def _on_ranges_state_changed(self, parameters):
Global.task_range().reset()
if len(parameters):
keys = parameters.keys()
for key in parameters:
Global.task_range().set_range(key, copy(parameters[key].range.values))
self._calculate.setText('Calculate ' + str(Global.task_range()._total_count) + ' variations')
else:
self._calculate.setText('Calculate')
def _on_calculate(self):
self.import_parameters.import_observation()
combination = Global.task_range().get_next_combination()
Global.task().input.semi_major_axis = self.input_parameters.semi_major_axis.getValue()
Global.task().input.star_radius = self.input_parameters.star_radius.getValue()
Global.task().input.planet_radius = self.input_parameters.planet_radius.getValue()
Global.task().input.star_temperature = self.input_parameters.star_temperature.getValue()
Global.task().input.planet_temperature = self.input_parameters.planet_temperature.getValue()
Global.task().input.darkening_law = self.input_parameters.darkening_law.value.itemData(self.input_parameters.darkening_law.value.currentIndex()).toString()
Global.task().input.darkening_coefficient_1 = self.input_parameters.darkening_coefficient_1.getValue()
Global.task().input.darkening_coefficient_2 = self.input_parameters.darkening_coefficient_2.getValue()
Global.task().input.inclination = self.input_parameters.inclination.getValue()
Global.task().input.phase_start = 0
Global.task().input.phase_end = self.input_parameters.phase_end.getValue()
Global.task().input.phase_step = self.input_parameters.phase_step.getValue()
Global.task().input.precision = 10**self.input_parameters.integration_precision.getValue()
if combination:
for param in combination:
setattr(Global.task().input, param[0], param[1])
Global.task().start()
def _on_task_range_progressed(self, progress):
self._range_progress.setFormat( str(Global.task_range().completed_count()) + ' of ' + str(Global.task_range().total_count()))
self._range_progress.setValue(math.ceil(((float(Global.task_range().completed_count()))/Global.task_range().total_count())*100))
self._on_calculate()
def _on_task_started(self, task):
self._calculate.hide()
self._progress.show()
self._progress.setValue(0)
self._cancel.show()
self.tab.setDisabled(True)
if Global.task_range().total_count():
self._range_progress.show()
if Global.task_range().completed_count() == 0:
self._range_progress.setFormat('0 of ' + str(Global.task_range().total_count()))
self._range_progress.setValue(0)
def _on_task_progressed(self, task, progress):
self._progress.setValue(progress)
def _on_task_completed(self, task):
if Global.task_range().total_count() and Global.task_range().completed_count():
return
self._calculate.show()
self._progress.hide()
self._progress.setValue(0)
self._cancel.hide()
self.tab.setDisabled(False)
self._range_progress.hide()
self._range_progress.setValue(0)
def _on_cancel(self):
Global.task().stop()
self._calculate.show()
self._progress.hide()
self._progress.setValue(0)
self._range_progress.hide()
self._range_progress.setValue(0)
self._cancel.hide()
self.tab.setDisabled(False)
def _on_interface_load_task_params(self, task):
self.input_parameters.semi_major_axis.value.setValue(task.input.semi_major_axis)
self.input_parameters.star_radius.value.setValue(task.input.star_radius)
self.input_parameters.planet_radius.value.setValue(task.input.planet_radius)
self.input_parameters.star_temperature.value.setValue(task.input.star_temperature)
self.input_parameters.planet_temperature.value.setValue(task.input.planet_temperature)
self.input_parameters.inclination.value.setValue(task.input.inclination)
darkening_law_index = 0
for item in DarkeningLaw.items:
if item[1] == task.input.darkening_law:
break
darkening_law_index += 1
self.input_parameters.darkening_law.value.setCurrentIndex(darkening_law_index)
self.input_parameters.darkening_coefficient_1.value.setValue(task.input.darkening_coefficient_1)
self.input_parameters.darkening_coefficient_2.value.setValue(task.input.darkening_coefficient_2)
self.input_parameters.phase_end.value.setValue(task.input.phase_end)
self.input_parameters.phase_step.value.setValue(task.input.phase_step)
self.input_parameters.integration_precision.value.setValue(log10(task.input.precision))
for parameter_name in copy(self.input_parameters.range_parameters):
parameter = getattr(self.input_parameters, parameter_name)
if parameter.range:
parameter.range.set_active(False)
self.repaint()
def load_params(self, filename):
config = ConfigParser()
config.read(filename)
self._normalize_config(config)
# Input Parameters
self._load_config_param(config, 'input', 'semi_major_axis')
self._load_config_param(config, 'input', 'star_radius')
self._load_config_param(config, 'input', 'planet_radius')
self._load_config_param(config, 'input', 'star_temperature')
self._load_config_param(config, 'input', 'planet_temperature')
self._load_config_param(config, 'input', 'inclination')
self._load_config_param(config, 'input', 'darkening_law')
self._load_config_param(config, 'input', 'darkening_coefficient_1')
self._load_config_param(config, 'input', 'darkening_coefficient_2')
self._load_config_param(config, 'input', 'phase_end')
self._load_config_param(config, 'input', 'phase_step')
self._load_config_param(config, 'input', 'integration_precision')
# Import Parameters
if config.has_option('import', 'filename') and config.get('import', 'filename'):
if '/data/' in config.get('import', 'filename') and config.get('import', 'filename').index('/data/') == 0:
self.import_parameters.filename = os.getcwd().replace('\\', '/') + config.get('import', 'filename')
else:
self.import_parameters.filename = config.get('import', 'filename')
self.import_parameters.update_file_label()
if config.has_option('import', 'jd2phase') and config.getboolean('import', 'jd2phase') == True :
self.import_parameters.hjd_to_phases.setCheckState(Qt.Checked)
if config.has_option('import', 'jd2phase_tzero') :
self.import_parameters.time_zero.setValue(config.getfloat('import', 'jd2phase_tzero'))
if config.has_option('import', 'jd2phase_period') :
self.import_parameters.period.setValue(config.getfloat('import', 'jd2phase_period'))
if config.has_option('import', 'mag2flux') and config.getboolean('import', 'mag2flux') == True :
self.import_parameters.magnitude_to_flux.setCheckState(Qt.Checked)
if config.has_option('import', 'mag2flux_mag') :
self.import_parameters.magnitude_max.setValue(config.getfloat('import', 'mag2flux_mag'))
# Fixes painting bug with range buttons when loading new file
# the active ranges stayed active even if they are inactive
self.repaint()
def _normalize_config(self, config):
if config.has_option('input', 'darkening_1'):
config.set('input', 'darkening_coefficient_1', config.get('input', 'darkening_1'))
config.remove_option('input', 'darkening_1')
if config.has_option('input', 'darkening_2'):
config.set('input', 'darkening_coefficient_2', config.get('input', 'darkening_2'))
config.remove_option('input', 'darkening_2')
if config.has_option('input', 'precision'):
config.set('input', 'integration_precision', config.get('input', 'precision'))
config.remove_option('input', 'precision')
def _load_config_param(self, config, section, name):
param = getattr(self.input_parameters, name)
if config.has_option(section, name):
if type(param.value) is QComboBox:
param.value.setCurrentIndex(config.getint(section, name))
else:
param.value.setValue(literal_eval(config.get(section, name)))
if param.range:
_from = _to = _step = _values = None
_active = False
if config.has_option(section, name + '_range_from'):
_from = literal_eval(config.get(section, name + '_range_from'))
if config.has_option(section, name + '_range_to'):
_to = literal_eval(config.get(section, name + '_range_to'))
if config.has_option(section, name + '_range_step'):
_step = literal_eval(config.get(section, name + '_range_step'))
if config.has_option(section, name + '_range_values'):
_values = literal_eval(config.get(section, name + '_range_values'))
if config.has_option(section, name + '_range_active'):
_active = config.getboolean(section, name + '_range_active')
if _values:
param.range.set_range(_values)
elif _from and _to and _step:
param.range.set_range(_from, _to, _step)
param.range.set_active(_active)
def _save_config_param(self, config, section, name):
param = getattr(self.input_parameters, name)
if type(param.value) is QComboBox:
config.set(section, name, param.value.currentIndex())
else:
config.set(section, name, param.getValue())
if param.range:
if param.range.range_type == RangeButton.TYPE_STEP and \
param.range.range_from and param.range.range_to and param.range.range_step:
config.set(section, name + '_range_from', param.range.range_from)
config.set(section, name + '_range_to', param.range.range_to)
config.set(section, name + '_range_step', param.range.range_step)
elif param.range.range_type == RangeButton.TYPE_VALUES and param.range.values:
config.set(section, name + '_range_values', param.range.values)
if param.range.is_active():
config.set(section, name + '_range_active', param.range.is_active())
def save_params(self, filename):
config = ConfigParser()
config.add_section('input')
# Input Parameters
self._save_config_param(config, 'input', 'semi_major_axis')
self._save_config_param(config, 'input', 'star_radius')
self._save_config_param(config, 'input', 'planet_radius')
self._save_config_param(config, 'input', 'star_temperature')
self._save_config_param(config, 'input', 'planet_temperature')
self._save_config_param(config, 'input', 'inclination')
self._save_config_param(config, 'input', 'darkening_law')
self._save_config_param(config, 'input', 'darkening_coefficient_1')
self._save_config_param(config, 'input', 'darkening_coefficient_2')
self._save_config_param(config, 'input', 'phase_end')
self._save_config_param(config, 'input', 'phase_step')
self._save_config_param(config, 'input', 'integration_precision')
config.add_section('import')
if os.getcwd().replace('\\', '/') in str(self.import_parameters.filename) and str(self.import_parameters.filename).index(os.getcwd().replace('\\', '/')) == 0 :
save_file_path = str(self.import_parameters.filename).replace(os.getcwd().replace('\\', '/'), '')
else:
save_file_path = str(self.import_parameters.filename)
config.set('import', 'filename', save_file_path)
config.set('import', 'jd2phase', self.import_parameters.hjd_to_phases.checkState() == Qt.Checked)
config.set('import', 'jd2phase_tzero', self.import_parameters.time_zero.value())
config.set('import', 'jd2phase_period', self.import_parameters.period.value())
config.set('import', 'mag2flux', self.import_parameters.magnitude_to_flux.checkState() == Qt.Checked)
config.set('import', 'mag2flux_mag', self.import_parameters.magnitude_max.value())
with open(filename, 'wb') as configfile:
config.write(configfile)
pass
class InputParameters(QWidget):
ranges_state_changed = pyqtSignal(dict)
def __init__(self):
QWidget.__init__(self)
self.range_parameters = dict()
self.grid = QGridLayout()
self.grid.setAlignment(Qt.AlignTop)
self.grid.setColumnStretch(2, 2)
self.setLayout(self.grid)
# Semi-major axis
self.semi_major_axis = self.add_triplet(SemiMajorAxis(), 1)
self.semi_major_axis.range.clicked.connect(lambda: self._on_range_clicked('semi_major_axis'))
self.semi_major_axis.range.state_changed.connect(self.semi_major_axis.value.setDisabled)
self.semi_major_axis.range.state_changed.connect(lambda: self._on_range_changed('semi_major_axis'))
# Star radius
self.star_radius = self.add_triplet(StarRadiusAU(), 2)
self.star_radius_rs = self.add_triplet(StarRadiusRS(), 3)
self.star_radius.range.clicked.connect(lambda: self._on_range_clicked('star_radius'))
self.star_radius.range.state_changed.connect(self.star_radius.value.setDisabled)
self.star_radius.range.state_changed.connect(self.star_radius_rs.value.setDisabled)
self.star_radius.range.state_changed.connect(lambda: self._on_range_changed('star_radius'))
self.star_radius.value.valueChanged.connect(self._on_star_radius_change)
self.star_radius_rs.value.valueChanged.connect(self._on_star_radius_rs_change)
# Planet radius
self.planet_radius = self.add_triplet(PlanetRadiusAU(), 4)
self.planet_radius_rj = self.add_triplet(PlanetRadiusRJ(), 5)
self.planet_radius.range.clicked.connect(lambda: self._on_range_clicked('planet_radius'))
self.planet_radius.range.state_changed.connect(self.planet_radius.value.setDisabled)
self.planet_radius.range.state_changed.connect(self.planet_radius_rj.value.setDisabled)
self.planet_radius.range.state_changed.connect(lambda: self._on_range_changed('planet_radius'))
self.planet_radius.value.valueChanged.connect(self._on_planet_radius_change)
self.planet_radius_rj.value.valueChanged.connect(self._on_planet_radius_rj_change)
# Star temperature
self.star_temperature = self.add_triplet(StarTemperature(), 6)
self.star_temperature.range.clicked.connect(lambda: self._on_range_clicked('star_temperature'))
self.star_temperature.range.state_changed.connect(self.star_temperature.value.setDisabled)
self.star_temperature.range.state_changed.connect(lambda: self._on_range_changed('star_temperature'))
# Planet temperature
self.planet_temperature = self.add_triplet(PlanetTemperature(), 7)
self.planet_temperature.range.clicked.connect(lambda: self._on_range_clicked('planet_temperature'))
self.planet_temperature.range.state_changed.connect(self.planet_temperature.value.setDisabled)
self.planet_temperature.range.state_changed.connect(lambda: self._on_range_changed('planet_temperature'))
# Inclination
self.inclination = self.add_triplet(Inclination(), 8)
self.inclination.range.clicked.connect(lambda: self._on_range_clicked('inclination'))
self.inclination.range.state_changed.connect(self.inclination.value.setDisabled)
self.inclination.range.state_changed.connect(lambda: self._on_range_changed('inclination'))
# Darkening law
self.darkening_law = self.add_triplet(DarkeningLaw(), 9)
self.darkening_law.range.clicked.connect(lambda: self._on_range_clicked('darkening_law'))
self.darkening_law.range.state_changed.connect(self.darkening_law.value.setDisabled)
self.darkening_law.range.state_changed.connect(lambda: self._on_range_changed('darkening_law'))
# Darkening coefficients
self.darkening_coefficient_1 = self.add_triplet(DarkeningCoefficient('Dark. coefficient 1:', ''), 10)
self.darkening_coefficient_1.range.clicked.connect(lambda: self._on_range_clicked('darkening_coefficient_1'))
self.darkening_coefficient_1.range.state_changed.connect(self.darkening_coefficient_1.value.setDisabled)
self.darkening_coefficient_1.range.state_changed.connect(lambda: self._on_range_changed('darkening_coefficient_1'))
self.darkening_coefficient_2 = self.add_triplet(DarkeningCoefficient('Dark. coefficient 2:', ''), 11)
self.darkening_coefficient_2.range.clicked.connect(lambda: self._on_range_clicked('darkening_coefficient_2'))
self.darkening_coefficient_2.range.state_changed.connect(self.darkening_coefficient_2.value.setDisabled)
self.darkening_coefficient_2.range.state_changed.connect(lambda: self._on_range_changed('darkening_coefficient_2'))
# Phase end
self.phase_end = self.add_triplet(PhaseEnd(), 12)
# Phase step
self.phase_step = self.add_triplet(PhaseStep(), 13)
# integration precision
self.integration_precision = self.add_triplet(IntegrationPrecision(), 14)
def _on_star_radius_change(self, value):
self.star_radius_rs.value.blockSignals(True)
self.star_radius_rs.value.setValue(Constants.au_to_rs(value))
self.star_radius_rs.value.blockSignals(False)
def _on_star_radius_rs_change(self, value):
self.star_radius.value.blockSignals(True)
self.star_radius.value.setValue(Constants.rs_to_au(value))
self.star_radius.value.blockSignals(False)
def _on_planet_radius_change(self, value):
self.planet_radius_rj.value.blockSignals(True)
self.planet_radius_rj.value.setValue(Constants.au_to_rj(value))
self.planet_radius_rj.value.blockSignals(False)
def _on_planet_radius_rj_change(self, value):
self.planet_radius.value.blockSignals(True)
self.planet_radius.value.setValue(Constants.rj_to_au(value))
self.planet_radius.value.blockSignals(False)
def _on_range_clicked(self, name):
if not getattr(self, name).range.is_active():
if getattr(self, name) == self.darkening_law:
dialog = getattr(sys.modules[__name__], uc_variable_name(name) + 'RangeDialog')(getattr(self, name).range.values)
else:
dialog = getattr(sys.modules[__name__], uc_variable_name(name) + 'RangeDialog')(getattr(self, name).range.range_from, getattr(self, name).range.range_to, getattr(self, name).range.range_step)
dialog.accepted.connect(lambda: self._on_range_accepted(name))
dialog.rejected.connect(lambda: self._on_range_rejected(name))
dialog.display()
else:
self._on_range_rejected(name)
pass
def _on_range_accepted(self, name):
if getattr(self, name) == self.darkening_law:
getattr(self, name).range.set_range(self.sender().values())
else:
getattr(self, name).range.set_range(getattr(self.sender(), name + '_from').getValue(),
getattr(self.sender(), name + '_to').getValue(),
getattr(self.sender(), name + '_step').getValue())
getattr(self, name).range.set_active(True)
def _on_range_rejected(self, name):
getattr(self, name).range.set_active(False)
if name == 'planet_radius':
self.planet_radius_rj.value.setDisabled(False)
def _on_range_changed(self, name):
if getattr(self, name).range.is_active():
self.range_parameters[name] = getattr(self, name)
elif self.range_parameters.has_key(name):
del self.range_parameters[name]
self.ranges_state_changed.emit(self.range_parameters)
def add_triplet(self, triplet, position):
self.grid.addWidget(triplet.label, position, 0)
self.grid.addWidget(triplet.range, position, 1)
self.grid.addWidget(triplet.value, position, 2)
self.grid.addWidget(triplet.unit, position, 3)
return triplet
class ImportParameters(QWidget):
def __init__(self):
QWidget.__init__(self)
self.filename = ''
self.import_phases = []
self.import_values = []
grid = QGridLayout()
grid.setAlignment(Qt.AlignTop)
grid.setColumnStretch(1,1)
self.setLayout(grid)
self.filename_label = QLabel('No file selected')
self.file_browse = QPushButton('Browse...')
self.file_browse.setFixedWidth(85)
self.file_browse.clicked.connect(self._on_file_browse)
self.file_clear = QPushButton('Clear')
self.file_clear.setFixedWidth(85)
self.file_clear.clicked.connect(self._on_file_clear)
self.file_clear.setHidden(True)
grid.addWidget(self.filename_label, 1, 0, 1, 0)
grid.addWidget(self.file_browse, 1, 3)
grid.addWidget(self.file_clear, 1, 3)
self.hjd_to_phases = QCheckBox('Convert HJD to phases')
self.hjd_to_phases.stateChanged.connect(self._on_hjd_state_changed)
grid.addWidget(self.hjd_to_phases, 2, 0, 1, 0)
self.time_zero_label = QLabel('T<sub>0</sub>')
self.time_zero_label.setFixedWidth(20)
self.time_zero = CustomDoubleSpinBox()
self.time_zero.setSingleStep(0.01)
self.time_zero.setDecimals(10)
self.time_zero.setAccelerated(True)
self.time_zero.setDisabled(True)
self.time_zero.setMinimum(0)
self.time_zero.setFixedWidth(200)
self.time_zero.setRange(0, sys.float_info.max)
grid.addWidget(self.time_zero_label, 3, 0)
grid.addWidget(self.time_zero, 3, 1)
self.period_label = QLabel('P')
self.period_label.setFixedWidth(20)
self.period = CustomDoubleSpinBox()
self.period.setFixedWidth(200)
self.period.setDisabled(True)
self.period.setRange(0, sys.float_info.max)
self.period.setDecimals(10)
grid.addWidget(self.period_label, 4, 0)
grid.addWidget(self.period, 4, 1)
self.magnitude_to_flux = QCheckBox('Convert magnitude to flux')
self.magnitude_to_flux.stateChanged.connect(self._on_magnitude_state_changed)
grid.addWidget(self.magnitude_to_flux, 5, 0, 1, 0)
self.magnitude_max_label = QLabel('Mag')
self.magnitude_max = CustomDoubleSpinBox()
self.magnitude_max.setSingleStep(0.01)
self.magnitude_max.setDecimals(10)
self.magnitude_max.setAccelerated(True)
self.magnitude_max.setDisabled(True)
self.magnitude_max.setMinimum(0)
self.magnitude_max.setFixedWidth(105)
grid.addWidget(self.magnitude_max_label, 6, 0)
grid.addWidget(self.magnitude_max, 6, 1)
self.redraw = QPushButton("Redraw")
self.redraw.clicked.connect(self._on_redraw)
grid.addWidget(self.redraw, 6,3)
def _on_file_browse(self):
directory = "" if self.filename is None else QString(str("/").join(str(self.filename).split("/")[:-1]))
types = TaskImporter.get_formats()
filters = []
for value in types :
filters.append(value.upper() + " (*." + value + ")")
filters.append("All files (*.*)")
self.filename = QFileDialog.getOpenFileName(self, 'Open file', directory=directory, filter=";;".join(filters))
self.update_file_label()
def _on_file_clear(self):
self.filename = ''
self.update_file_label()
def update_file_label(self):
if self.filename :
self.filename_label.setText(self.filename.split("/")[-1])
self.file_clear.setHidden(False)
self.redraw.setDisabled(False)
else:
self.filename_label.setText('No file selected')
self.file_clear.setHidden(True)
self.redraw.setDisabled(True)
pass
def import_observation(self):
if not self.filename :
return
try:
phases, values = TaskImporter.load_file(self.filename)
# convert JD time to phases
if self.hjd_to_phases.checkState() == Qt.Checked:
if self.time_zero.value() <= 0 :
QMessageBox.warning(self, "Error", 'Invalid parameter "T<sub>0</sub>"!')
return
if self.period.value() <= 0 :
QMessageBox.warning(self, "Error", 'Invalid parameter "P"!')
return
for (index, phase) in enumerate(phases):
phases[index] = (phase - self.time_zero.value()) / self.period.value() % 1
# convert magnitude to flux
if self.magnitude_to_flux.checkState() == Qt.Checked:
for (index, value) in enumerate(values):
values[index] = 10**(-(value - self.magnitude_max.value())/2.5)
phases = flip_phase_list(phases)
# TODO Detrending
#slope = (values[8] - values[-8])/(phases[8] - phases[-8])
#angle = atan(slope)
#
#for index, value in enumerate(values):
# hyp = sqrt(abs((phases[-8] - phases[index])**2 - (values[-8] - values[index])**2))
# print hyp
# values[index] += hyp * sin(angle)
self.import_phases = phases
self.import_values = values
Global.event.data_imported.emit(self.import_phases, self.import_values)
except:
QMessageBox.critical(self, "Import error", "Error importing data!\nError: " + str(sys.exc_info()[1]))
raise
def _on_redraw(self):
if not self.filename :
QMessageBox.warning(self, "Import file", "Please import file first")
return
self.import_observation()
Global.event.interface_redraw_clicked.emit()
pass
def _on_hjd_state_changed(self, state):
if state == Qt.Checked:
self.time_zero.setDisabled(False)
self.period.setDisabled(False)
else:
self.time_zero.setDisabled(True)
self.period.setDisabled(True)
pass
def _on_magnitude_state_changed(self, state):
if state == Qt.Checked:
self.magnitude_max.setDisabled(False)
else:
self.magnitude_max.setDisabled(True)
pass
class ResultView(QTabWidget):
def __init__(self):
super(ResultView, self).__init__()
self.results = ResultsTab()
self.addTab(self.results, 'Results')
self.plot = ResultPlot()
self.addTab(self.plot, 'Plot')
self.data = ResultTab()
self.addTab(self.data, 'Data')
self.setCurrentIndex(1)
Global.event.task_selected.connect(self._on_task_selected)
def _on_task_selected(self, task):
self.setCurrentIndex(1)
class ResultPlot(QWidget):
def __init__(self):
super(ResultPlot, self).__init__()
vl = QVBoxLayout()
self.plot = Plot()
vl.setAlignment(Qt.AlignTop)
vl.addWidget(self.plot)
self.residual_plot = ResidualPlot()
self.residual_plot.setFixedHeight(150)
vl.addWidget(self.residual_plot)
hl = QHBoxLayout()
#hl.setAlignment(Qt.AlignHCenter)
self.chi2_label = QLabel('chi^2')
self.chi2_label.setFixedWidth(30)
self.chi2_label.hide();
self.chi2_value = QLineEdit()
self.chi2_value.setAlignment(Qt.AlignRight)
self.chi2_value.setFixedWidth(120)
self.chi2_value.hide()
auto_plot = QCheckBox('Auto plot finished result')
auto_plot.stateChanged.connect(self._on_auto_plot_state_changed)
hl.addWidget(auto_plot, Qt.AlignLeft)
hl.addWidget(self.chi2_label)
hl.addWidget(self.chi2_value)
hl.setStretch(1, 0)
vl.addLayout(hl)
self.setLayout(vl)
def _on_auto_plot_state_changed(self, checked_state):
checked_state = True if checked_state else False
Global.event.interface_auto_plot_state_changed.emit(checked_state)
class Plot(FigureCanvas):
__instance = None
def __init__(self):
Global.event.task_selected.connect(self._on_task_selected)
Global.event.task_deleted.connect(self._on_task_deleted)
Global.event.tasks_list_updated.connect(self._on_tasks_list_updated)
self.task = None
self.last_x_limit = []
self.axes = None
bg_color = str(QPalette().color(QPalette.Active, QPalette.Window).name())
rcParams.update({'font.size': 10})
self.figure = Figure(facecolor=bg_color, edgecolor=bg_color)
self.figure.hold(False)
super(Plot, self).__init__(self.figure)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.updateGeometry()
def _on_task_selected(self, task):
self.set_task(task)
self.redraw()
def _on_task_deleted(self, task):
if self.task == task:
self.set_task(None)
self.clear()
ResultTab.instance().set_data([], [], [], [])
def _on_tasks_list_updated(self):
if not len(Global.tasks()):
self.set_task(None)
self.clear()
ResultTab.instance().set_data([], [], [], [])
@staticmethod
def instance():
return Plot.__instance
def set_task(self, task):
self.task = task
def clear(self):
self.figure.clf()
self.figure.clear()
gc.collect()
def redraw(self):
self.clear()
self.axes = self.figure.add_subplot(1, 1, 1)
self.axes.grid(True)
self.axes.set_xlabel('Phase')
self.axes.set_ylabel('Flux')
result_phases = []
result_values = []
import_phases = []
import_values = []
keys = sorted(self.task.result.data().keys())
for key in keys:
if self.task.result.data()[key]['result_value'] is not None:
result_phases.append(key)
result_values.append(self.task.result.data()[key]['result_value'])
if self.task.result.data()[key]['import_value'] is not None:
import_phases.append(key)
import_values.append(self.task.result.data()[key]['import_value'])
ResultTab.instance().set_data(result_phases, result_values, import_phases, import_values)
if not result_phases and not import_phases :
return
y_r_min = 1
y_r_max = 0
x_r_max = 0
y_i_min = 1
y_i_max = 0
x_i_max = 0
if result_values :
y_r_min = min(result_values)
y_r_max = max(result_values)
x_r_max = max(abs(min(result_phases)), abs(max(result_phases)))
if import_values :
y_i_min = min(import_values)
y_i_max = max(import_values)
x_i_max = max(abs(min(import_phases)), abs(max(import_phases)))
y_max = max(y_r_max, y_i_max)
y_min = min(y_r_min, y_i_min)
x_max = max(x_r_max, x_i_max)
y_pad = ((y_max - y_min) / 100) * 10
x_pad = (x_max / 100) * 10
if y_min == y_max:
y_min += 1
y_max -= 1
self.axes.set_autoscaley_on(False)
self.axes.set_ylim([y_min - y_pad, y_max + y_pad])
self.last_x_limit = [-(x_max + x_pad), x_max + x_pad]
Global.event.plot_x_limit_changed.emit(self.last_x_limit)
self.axes.set_autoscalex_on(False)
self.axes.set_xlim(self.last_x_limit)
if len(result_phases):
self.axes.plot(result_phases, result_values, color='b', label="Prediction")
if len(import_phases):
self.axes.scatter(import_phases, import_values, s=1, color='r', label='Observation')
self.draw()
class ResidualPlot(FigureCanvas):
__instance = None
def __init__(self):
Global.event.task_selected.connect(self._on_task_selected)
Global.event.plot_x_limit_changed.connect(self._on_x_limit_changed)
Global.event.task_deleted.connect(self._on_task_deleted)
Global.event.tasks_list_updated.connect(self._on_tasks_list_updated)
self.task = None
self.axes = None
self.last_x_limit = []
self.chi2s = []
bg_color = str(QPalette().color(QPalette.Active, QPalette.Window).name())
rcParams.update({'font.size': 10})
self.figure = Figure(facecolor=bg_color, edgecolor=bg_color)
self.figure.hold(False)
super(ResidualPlot, self).__init__(self.figure)
self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.updateGeometry()
self.hide()
def _on_task_selected(self, task):
self.set_task(task)
self.redraw()
def _on_task_deleted(self, task):
if self.task == task:
self.set_task(None)
self.clear()
def _on_tasks_list_updated(self):
if not len(Global.tasks()):
self.set_task(None)
self.clear()
def set_task(self, task):
self.task = task
def clear(self):
self.figure.clf()
self.figure.clear()
self.draw()
self.parent().chi2_label.hide()
self.parent().chi2_value.hide()
self.hide()
gc.collect()
def redraw(self):
self.clear()
if self.task.result.chi2 is None:
self.parent().chi2_label.hide()
self.parent().chi2_value.hide()
self.hide()
return
self.chi2s.append(self.task.result.chi2)
self.show()
self.parent().chi2_label.show()
self.parent().chi2_value.show()
self.axes = self.figure.add_subplot(1, 1, 1)
self.axes.grid(False)
self.figure.set_alpha(0)
self.axes.set_xlabel('Phase')
self.axes.set_ylabel('Residual')
phases = []
delta_values = []
keys = sorted(self.task.result.data().keys())
for key in keys:
if self.task.result.data()[key]['delta_value'] is not None:
phases.append(key)
delta_values.append(self.task.result.data()[key]['delta_value'])
y_max = max(abs(min(delta_values)), abs(max(delta_values)))
y_pad = (y_max / 100) * 10
self.axes.set_autoscaley_on(False)
self.axes.set_ylim([-(y_max + y_pad), y_max + y_pad])
self.axes.set_autoscalex_on(False)
self.axes.set_xlim(self.last_x_limit)
color = QColor(0,0,0)
min_chi2 = min(self.chi2s)
if len(self.chi2s) == 1 :
color = QColor(0,0,0)
elif self.task.result.chi2 <= min_chi2 :
color = QColor(0,139,0)
else:
color = QColor(255,0,0)
self.axes.axhline(y=0, ls='--', linewidth=0.5, color='black')
self.axes.scatter(phases, delta_values, s=0.5, color='r')
palette = self.parent().chi2_value.palette()
palette.setColor(QPalette.Active, QPalette.Text, color)
self.parent().chi2_value.setPalette(palette)
self.parent().chi2_value.setText(str(self.task.result.chi2))
self.draw()
def _on_x_limit_changed(self, limit):
self.last_x_limit = limit
class ResultTab(QWidget):
__instance = None
def __init__(self):
super(QWidget, self).__init__()
if ResultTab.__instance is None :
ResultTab.__instance = self
self.phases = []
self.values = []
self.import_phases = []
self.import_values = []
self.export = None
self.vl = QVBoxLayout()
header = ['Phase', 'Synthetic', 'Observation', 'Delta']
self.table = QTableWidget()
self.table.verticalHeader().setVisible(False)
self.table.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.table.setColumnCount(len(header))
self.table.setHorizontalHeaderLabels(header)
self.vl.addWidget(self.table)
hl = QHBoxLayout()
hl.setAlignment(Qt.AlignRight)
export_button = QPushButton('Export...')
export_menu = QMenu()
export_menu.addAction('\\t separated').triggered.connect(lambda : self._on_export('\t'))
export_menu.addAction(', separated').triggered.connect(lambda : self._on_export(','))
export_menu.addAction('; separated').triggered.connect(lambda : self._on_export(';'))
export_button.setMenu(export_menu)
hl.addWidget(export_button)
self.vl.addLayout(hl)
self.setLayout(self.vl)
def set_data(self, phases, values, import_phases, import_values):
self.phases = phases
self.values = values
self.import_phases = import_phases
self.import_values = import_values
self.table.setRowCount(len(self.phases))
for (index, phase) in enumerate(self.phases):
phase_item = QTableWidgetItem('%.12f' % phase)
phase_item.setTextAlignment(Qt.AlignRight)
value_item = QTableWidgetItem('%.12f' % self.values[index])
value_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 0, phase_item)
self.table.setItem(index, 1, value_item)
if phase in import_phases :
import_index = import_phases.index(phase)
value_item = QTableWidgetItem('%.12f' % import_values[import_index])
value_item.setTextAlignment(Qt.AlignRight)
delta_flux_item = QTableWidgetItem('%.12f' % (import_values[import_index] - self.values[index]))
delta_flux_item.setTextAlignment(Qt.AlignRight)
self.table.setItem(index, 2, value_item)
self.table.setItem(index, 3, delta_flux_item)
def _on_export(self, separator):
self.export = ExportDatDialog(self.phases, self.values, self.import_phases, self.import_values, separator)
pass
@staticmethod
def instance():
return ResultTab.__instance
class ExportDatDialog(QFileDialog):
def __init__(self, phases, values, import_phases, import_values, separator):
super(ExportDatDialog, self).__init__()
self.setWindowTitle('Export DAT')
#self.setWindowIcon(QIcon('assets/export.png'))
self.resize(500, 400)
self.setFileMode(QFileDialog.AnyFile)
filename = self.getSaveFileName(directory='result.dat', filter='DAT (*.dat);;')
try:
with open(filename, 'wb') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=separator)
for index, phase in enumerate(phases):
row = []
row.append('%.12f' % phase)
row.append('%.12f' % values[index])
if phase in import_phases :
import_index = import_phases.index(phase)
row.append('%.12f' % import_values[import_index])
row.append('%.12f' % (import_values[import_index] - values[index]))
csv_writer.writerow(row)
except:
QMessageBox.warning(self, "Error", "Error exporting!\nError: " + str(sys.exc_info()[1]))
raise
class ResultsTab(QWidget):
def __init__(self):
QWidget.__init__(self)
vl = QVBoxLayout()
self.setLayout(vl)
table = ResultsTable()
vl.addWidget(table)
hl = QHBoxLayout()
hl.setAlignment(Qt.AlignRight)
vl.addLayout(hl)
delete_all_button = QPushButton('Delete all')
delete_all_button.clicked.connect(self._on_delete_all_clicked)
hl.addWidget(delete_all_button)
def _on_delete_all_clicked(self):
Global.event.interface_delete_all_results_clicked.emit()
class ResultsTable(QTableView):
def __init__(self):
QTableView.__init__(self)
self.last_sort_column = 0
self.last_sort_order = Qt.AscendingOrder
self.last_selected_row = 0
self.last_scroll_position = 0
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.setSortingEnabled(True)
self.verticalHeader().setVisible(False)
self.horizontalHeader().setHighlightSections(False)
self.horizontalHeader().setMovable(True)
self.horizontalHeader().setContextMenuPolicy(Qt.CustomContextMenu)
self.horizontalHeader().customContextMenuRequested.connect(self._on_header_menu)
self.doubleClicked.connect(self._on_row_double_clicked)
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self._on_row_menu)
Global.event.tasks_list_updated.connect(self._on_tasks_list_updated)
def keyPressEvent(self, event):
if event.type() == QEvent.KeyPress and event.key() == Qt.Key_Delete:
row = self.currentIndex().row()
task = self.get_task_by_row(row)
if task:
self.delete_task_by_id(task.id)
elif event.type() == QEvent.KeyPress and (event.key() == Qt.Key_Enter or event.key() == Qt.Key_Return):
Global.event.task_selected.emit(self.get_task_by_row(self.currentIndex().row()))
else:
return QTableView.keyPressEvent(self, event)
def _on_header_menu(self, point):
menu = QMenu()
for index, title in enumerate(self.model().header):
action = QAction(self)
action.setData(index)
action.setText(title)
action.setCheckable(True)
action.setChecked(False if self.isColumnHidden(index) else True)
action.triggered.connect(self._on_header_menu_action)
menu.addAction(action)
menu.popup(self.mapToGlobal(point))
menu.exec_()
def _on_header_menu_action(self, checked):
index = self.sender().data().toInt()[0]
if checked:
self.showColumn(index)
else:
self.hideColumn(index)
def _on_row_menu(self, point):
row = self.rowAt(point.y())
task = self.get_task_by_row(row)
if row < 0 or task is None:
return
menu = QMenu()
load_action = QAction(self)
load_action.setData(task.id)
load_action.setText("Load parameters")
load_action.triggered.connect(self._on_load_params_action)
menu.addAction(load_action)
delete_action = QAction(self)
delete_action.setData(task.id)
delete_action.setText('Delete')
delete_action.triggered.connect(self._on_row_delete_action)
menu.addAction(delete_action)
menu.popup(self.mapToGlobal(point))
menu.exec_()
def _on_load_params_action(self):
id = self.sender().data().toInt()[0]
Global.event.interface_load_task_params.emit(self.get_task_by_id(id))
def _on_row_delete_action(self):
id = self.sender().data().toInt()[0]
self.delete_task_by_id(id)
def delete_task_by_id(self, id):
task = self.get_task_by_id(id)
if task:
Global.delete_task(task)
def get_task_by_id(self, id):
for task in self.model().tasks:
if task.id == id:
return task
return None
def get_task_by_row(self, row):
if self.model() and -1 < row < len(self.model().tasks_data):
return self.get_task_by_id(self.model().tasks_data[row][0])
return None
def _on_tasks_list_updated(self):
if self.model():
self.last_sort_column = self.model().last_sort_column
self.last_sort_order = self.model().last_sort_order
self.last_selected_row = self.currentIndex().row()
self.last_scroll_position = self.verticalScrollBar().sliderPosition()
self.setModel(ResultsTableModel(Global.tasks()))
self.sortByColumn(self.last_sort_column, self.last_sort_order)
self.resizeColumnsToContents()
self.horizontalHeader().setStretchLastSection(True)
self.selectRow(self.last_selected_row)
self.verticalScrollBar().setSliderPosition(self.last_scroll_position)
def _on_row_double_clicked(self, index):
target_id = self.model().tasks_data[index.row()][0]
for task in self.model().tasks:
if task.id == target_id:
Global.event.task_selected.emit(task)
break
class ResultsTableModel(QAbstractTableModel):
def __init__(self, tasks):
QAbstractTableModel.__init__(self)
self.tasks = tasks
self.tasks_data = []
self.last_sort_column = 0
self.last_sort_order = Qt.AscendingOrder
self.header = ['#',
'Sma',
'Rs',
'Rp',
'Ts',
'Tp',
'Inc.',
'Darkening law',
'chi^2']
for task in tasks:
self.tasks_data.append([task.id,
task.input.semi_major_axis,
task.input.star_radius,
task.input.planet_radius,
task.input.star_temperature,
task.input.planet_temperature,
task.input.inclination,
task.input.darkening_law + '(' + str(task.input.darkening_coefficient_1) + ', ' + str(task.input.darkening_coefficient_2) + ')',
task.result.chi2])
def rowCount(self, parent):
return len(self.tasks_data)
def columnCount(self, parent):
return len(self.tasks_data[0]) if len(self.tasks_data) else 0
def data(self, index, role):
if not index.isValid():
return QVariant()
elif role == Qt.TextAlignmentRole:
return QVariant(Qt.AlignRight | Qt.AlignVCenter)
elif role != Qt.DisplayRole:
return QVariant()
return QVariant(self.tasks_data[index.row()][index.column()])
def headerData(self, col, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return QVariant(self.header[col])
return QVariant()
def sort(self, col, order):
self.last_sort_column = col
self.last_sort_order = order
self.layoutAboutToBeChanged.emit()
self.tasks_data = sorted(self.tasks_data, key=operator.itemgetter(col))
if order == Qt.DescendingOrder:
self.tasks_data.reverse()
self.layoutChanged.emit() | mit |
amozie/amozie | quantzie/template.py | 1 | 1412 | import six
import timeit
from quantdigger import *
from quantdigger.digger import finance, plotting
from quantdigger.digger.analyze import AnalyzeFrame
from quantzie.strategy.common import *
import matplotlib.pyplot as plt
class Stg1(Strategy):
def on_init(self, ctx):
ctx.test = TEST(ctx.close, 'test')
def on_bar(self, ctx):
# ctx.switch_to_pcontract('600056.SH-1.DAY')
pass
def on_exit(self, ctx):
pass
if __name__ == '__main__':
start = timeit.default_timer()
# ConfigUtil.set(source='tushare')
ConfigUtil.set(source='cached-tushare',
cache_path='E:/_cache_tushare')
set_symbols(['600056.SH-1.Day'], '2016-09-01')
profile = add_strategy([Stg1('Stg1')], {'capital': 100000.0})
run()
stop = timeit.default_timer()
six.print_('using time: %d seconds' % (stop - start))
curve0 = finance.create_equity_curve(profile.all_holdings(0))
curve = finance.create_equity_curve(profile.all_holdings())
# AnalyzeFrame(profile)
plotting.plot_strategy(profile.data(0),
{
1: [profile.technicals(0)]
},
profile.deals(0), curve.equity.values,
profile.marks(0))
# plotting.plot_curves([curve.networth])
six.print_(finance.summary_stats(curve, 252))
plt.show()
| apache-2.0 |
eickenberg/scikit-learn | examples/svm/plot_separating_hyperplane.py | 62 | 1274 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
dmargala/qusp | qusp/wavelength.py | 1 | 4707 | """
Provides support for working with BOSS wavelengths
Examples
--------
Add sky lines to quasar spectrum plot:
>>> qusp.wavelength.draw_lines(qusp.wavelength.load_wavelengths('sky'))
Get a combined spectrum's fiducial pixel offset:
>>> offset = qusp.wavelength.get_fiducial_pixel_index_offset(np.log10(combined.wave[0]))
Construct a fiducial pixel wavelength array:
>>> wave = qusp.wavelength.get_fiducial_wavelength(np.arange(4800))
--------------
"""
import math
import os
import numpy as np
class Wavelength(float):
"""
A Wavelength is a float.
Args:
value (float): wavelength value
"""
def __init__(self, value):
float.__init__(value)
def __new__(cls, value, *args, **kwargs):
return float.__new__(cls, value)
def observed(self, redshift):
"""
Args:
redshift (float): source redshift
Returns:
the shifted observed wavelength.
"""
return self*(1+redshift)
def rest(self, redshift):
"""
Args:
redshift (float): source redshift
Returns:
the shifted rest wavelength.
"""
return self/(1+redshift)
class LabeledWavelength(Wavelength):
"""
A LabeledWavelength is a Wavelength with a label attribute
"""
def __init__(self, value, label):
Wavelength.__init__(self, value)
self.label = label
def __str__(self):
return str((self, self.label))
def load_wavelengths(filename, ignore_labels=False):
"""
Loads a list of wavelengths from the specified file
Args:
filename (str): wavelength data filename
Returns:
wavelengths (list)
"""
# Get the path that this module was loaded from.
my_path = os.path.dirname(os.path.abspath(__file__))
# Build the path where the filter curves should be.
wavelegths_path = os.path.join(
os.path.dirname(my_path), 'data', 'wavelengths')
full_path = os.path.join(wavelegths_path, '%s.dat' % filename)
if ignore_labels:
wavelengths = np.genfromtxt(full_path, usecols=0)
return [Wavelength(wave) for wave in wavelengths]
else:
wavelengths = np.genfromtxt(
full_path,
dtype={'names':('wavelengths', 'labels'), 'formats':(float, 'S100')},
usecols=(0, 1))
return [LabeledWavelength(*wave) for wave in wavelengths]
def get_fiducial_wavelength(pixel_index, coeff1=1e-4, log10lam0=np.log10(3500.26)):
"""
Returns the wavelength at the center of the specified index
of the BOSS co-add fiducial wavelength grid.
Args:
pixel_index (int): index of the BOSS co-add fiducial wavelength grid.
Returns:
wavelength (float): central wavelength of the specified index on the fiducial wavelength grid
"""
return np.power(10.0, log10lam0 + coeff1*pixel_index)
def get_fiducial_pixel_index_offset(loglam, coeff1=1e-4, log10lam0=np.log10(3500.26)):
"""
Returns the pixel index offset from the start of the
BOSS co-add fiducial wavelength grid.
Args:
coeff0 (float): central wavelength (log10) of first pixel
coeff1 (float, optional): log10 dispersion per pixel
Returns:
pixel index offset from the start of the fiducial wavelength grid.
"""
return np.round((loglam-log10lam0)/coeff1).astype(int)
def draw_lines(waves, offset=0, delta=.1, **kwargs):
"""
Draws vertical lines on the current plot.
"""
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
wavemin, wavemax = plt.gca().get_xlim()
transform = transforms.blended_transform_factory(
plt.gca().transData, plt.gca().transAxes)
for index, wave in enumerate(waves):
if wave < wavemin or wave > wavemax:
continue
plt.axvline(wave, **kwargs)
try:
plt.text(wave, offset+(index%2)*delta, wave.label,
transform=transform, horizontalalignment='left')
except AttributeError:
pass
if __name__ == '__main__':
# Tests for wavelengths module
wave1216 = Wavelength(1216)
assert wave1216.observed(2.5) == 4256, 'observed wavelength error'
wave5472 = Wavelength(5472)
assert wave5472.rest(3.5) == 1216, 'rest wavelength error'
lambda0 = get_fiducial_wavelength(0)
assert lambda0 == 3500.26, 'fiducial wavelength origin error'
lambda101 = get_fiducial_wavelength(101)
offset101 = get_fiducial_pixel_index_offset(math.log10(lambda101))
assert offset101 == 101, 'fiducial pixel index offset error'
waves = load_wavelengths('balmer')
for wave in waves:
print waves.label
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.