repo_name
stringlengths 7
79
| path
stringlengths 4
179
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 959
798k
| license
stringclasses 15
values |
---|---|---|---|---|---|
saltastro/pysalt
|
saltfirst/saltfirst.py
|
1
|
27572
|
############################### LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
############################################################################
#!/usr/bin/env python
#
#
# SALTFIRST--SALTFIRST provides first look capability and quick reductions for
# SALT data. The task initiates a GUI which then monitors the data directories
# for SCAM and RSS. Whenever, a new data file is created, the program will
# identify the file, process the file, display the file in ds9, and compute any
# important statistics for the file. The GUI should be able to display basic
# information about the data as well as print an observing log.
#
# Author Version Date
# -----------------------------------------------
# S M Crawford (SAAO) 0.1 16 Mar 2010
# Ensure python 2.5 compatibility
from __future__ import with_statement
import os, shutil, time, ftplib, glob
from astropy.io import fits as pyfits
import pickle
import numpy as np
import scipy as sp
import warnings
# Gui library imports
from PyQt4 import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg
from pyraf import iraf
from pyraf.iraf import pysalt
import saltsafekey as saltkey
import saltsafeio as saltio
import saltsafemysql as saltmysql
import saltstat
#import plugins
from quickclean import quickclean
from quickphot import quickphot
from quickspec import quickspec, quickap
from display import display, regions
from seeing import seeing_stats
from sdbloadobslog import sdbloadobslog
from fpcal import fpcal
from findcal import findcal
from fastmode import runfast
from sdbloadfits import sdbloadfits
from saltgui import ImageDisplay, MplCanvas
from saltsafelog import logging
from salterror import SaltError, SaltIOError
from OrderedDict import OrderedDict
from ImageWidget import ImageWidget
from SpectraViewWidget import SpectraViewWidget
from InfoWidget import InfoWidget
from DQWidget import DQWidget
from ObsLogWidget import ObsLogWidget
from SpectraViewWidget import SpectraViewWidget
from ObsLogWidget import headerList, printList
debug=True
# -----------------------------------------------------------
# core routine
def saltfirst(obsdate, imdir, prodir, server='smtp.saao.ac.za', readme='readme.fast.template', sdbhost='sdb.salt', sdbname='sdb', sdbuser='', password='',imreduce=True, sexfile='/home/ccd/tools/qred.sex', update=True, clobber=False,logfile='salt.log',verbose=True):
#Move into the working directory
if os.path.isdir(prodir):
if clobber:
shutil.rmtree(prodir)
os.mkdir(prodir)
else:
os.mkdir(prodir)
os.chdir(prodir)
with logging(logfile,debug) as log:
#create GUI
App = QtGui.QApplication([])
#Add information to gui
aw=FirstWindow(obsdate, imdir, prodir, server=server, readme=readme, sdbhost=sdbhost, sdbname=sdbname, sdbuser=sdbuser, password=password, imreduce=imreduce, sexfile=sexfile, update=update, clobber=clobber, log=log, verbose=verbose)
aw.setMinimumHeight(800)
aw.setMinimumWidth(500)
aw.show()
# Start application event loop
exit=App.exec_()
# Check if GUI was executed succesfully
if exit!=0:
raise SaltError('SALTFIRST GUI has unexpected exit status '+str(exit))
class FirstWindow(QtGui.QMainWindow):
def __init__(self, obsdate, imdir, prodir, server='smtp.saao.ac.za', readme='readme.fast.template', \
sdbhost='sdb.salt', sdbname='sdb', sdbuser='', \
password='', hmin=350, wmin=400, cmap='gray', \
sexfile='/home/ccd/tools/qred.sex', update=True,
scale='zscale', contrast=0.1, imreduce=True, clobber=False, log=None, verbose=True):
#set up the variables
self.obsdate=obsdate
self.imdir=imdir
self.prodir=prodir
self.imreduce=imreduce
self.clobber=clobber
self.scamwatch=True
self.rsswatch=True
self.hrswatch=True
self.hrbwatch=True
self.objsection=None
self.sdbhost=sdbhost
self.sdbname=sdbname
self.sdbuser=sdbuser
self.password=password
self.server=server
self.readme=readme
self.sexfile=sexfile
self.update=update
self.headfiles=[]
self.pickle_file='%s_obslog.p' % self.obsdate
# Setup widget
QtGui.QMainWindow.__init__(self)
# Set main widget
self.main = QtGui.QWidget(self)
# Set window title
self.setWindowTitle("SALTFIRST")
#set up observation log from database
self.create_obslog()
#look for any initial data
self.checkfordata(self.obsdate, self.imdir)
#example data
#image='../salt/scam/data/2006/1016/raw/S200610160009.fits'
#self.hdu=saltio.openfits(image)
#name=getbasename(self.hdu)
#imlist=getimagedetails(self.hdu)
#obsdict={}
#obsdict[name]=imlist
#set up each of the tabs
if len(self.obsdict)>0:
name=self.obsdict.order()[-1]
imlist=self.obsdict[name]
else:
name=''
imlist=[]
self.hdu=None
self.infoTab=InfoWidget(name, imlist)
self.dqTab=DQWidget(name, imlist)
#self.imageTab=ImageWidget(self.hdu, hmin=hmin, wmin=wmin, cmap=cmap, scale=scale, contrast=contrast)
self.specTab=SpectraViewWidget(None, None, None, hmin=hmin, wmin=wmin)
self.obsTab=ObsLogWidget(self.obsdict, obsdate=self.obsdate)
#create the tabs
self.tabWidget=QtGui.QTabWidget()
self.tabWidget.addTab(self.infoTab, 'Info')
self.tabWidget.addTab(self.dqTab, 'DQ')
#self.tabWidget.addTab(self.imageTab, 'Image')
self.tabWidget.addTab(self.specTab, 'Spectra')
self.tabWidget.addTab(self.obsTab, 'Log')
#create button to reset the filewatcher
self.checkButton = QtGui.QPushButton("Check for Data")
self.checkButton.clicked.connect(self.clickfordata)
#layout the widgets
mainLayout = QtGui.QVBoxLayout(self.main)
mainLayout.addWidget(self.tabWidget)
mainLayout.addWidget(self.checkButton)
#set up thrading
self.threadlist=[]
self.thread=QtCore.QThread()
self.nothread=False
#add the file watching capability
self.addwatcher()
#add a timer to check on data and update obslog in database
self.ctimer=QtCore.QTimer()
ctime=5*60*1000
self.ctimer.start(ctime)
self.connect(self.ctimer, QtCore.SIGNAL("timeout()"), self.updatetime)
#add signal catches
self.connect(self, QtCore.SIGNAL('updatespec(QString)'), self.updatespecview)
self.connect(self.thread, QtCore.SIGNAL('finishedthread(QString)'), self.updatetabs)
self.connect(self.obsTab, QtCore.SIGNAL('cellclicked(QString)'), self.updatetabs)
self.connect(self.obsTab, QtCore.SIGNAL('updateobslogdb(QString)'), self.updateobslogdb)
self.connect(self.obsTab, QtCore.SIGNAL('updatecals(QString)'), self.updatecals)
self.connect(self.specTab, QtCore.SIGNAL('updateextract(int,int)'), self.updateextract)
# Set the main widget as the central widget
self.setCentralWidget(self.main)
# Destroy widget on close
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
def create_obslog(self):
"""Check to see if there are any files in the database, and if so, create the observing log"""
if os.path.isfile(self.pickle_file) and 0:
self.obsdict = pickle.load( open( self.pickle_file, "rb" ) )
else:
self.obsdict = OrderedDict()
def updateextract(self, y1, y2):
print y1, y2
name=self.specTab.name
iminfo=self.obsdict[name]
lampid=iminfo[headerList.index('LAMPID')].strip().upper()
objsection='[%i:%i]' % (y1, y2)
if self.specTab.defaultBox.checkState():
print "Updating Object Section"
self.objsection=objsection
else:
self.objsection=None
outpath='./'
outfile=outpath+'smbxp'+name
logfile='saltclean.log'
verbose=True
y1, y2=quickap(outfile, objsection=objsection, clobber=True, logfile=logfile, verbose=verbose)
#quickspec(outfile, lampid, findobj=False, objsection=objsection, clobber=True, logfile=logfile, verbose=verbose)
self.specTab.updaterange(y1,y2)
self.updatespecview(name)
def updatetime(self):
"""Check to see if the data or logs need updating"""
print "Checking for updates at %s" % time.asctime()
#check for any new data
self.clickfordata('')
#update the obstab to the sdb
self.obsTab.printfornightlog()
self.updatecals()
def updateobslogdb(self, logstr):
#print logstr
print "Updating Obslog for ", self.obsdate
sdbloadobslog(logstr, self.obsdate, self.sdbhost, self.sdbname, self.sdbuser, self.password)
pickle.dump(self.obsdict, open(self.pickle_file, 'wb'))
def updatecals(self):
print "Loading Calibration Data"
findcal(self.obsdate, self.sdbhost, self.sdbname, self.sdbuser, self.password)
def updateimlist(self, name, key, value):
print "UPDATE:", name, key, value
def updatespecview(self, name):
name = str(name)
print "UPDATING SPECVIEW with %s" % name
specfile='./smbxp'+name.split('.fits')[0]+'.txt'
warr, farr, snarr=np.loadtxt(specfile, usecols=(0,1,2), unpack=True)
self.specTab.loaddata(warr, farr, snarr, name)
self.specTab.redraw_canvas()
def converttoname(self, infile):
"""Given a file name, find the raw salt file name"""
def updatetabs(self, infile):
name=str(infile)
imlist=self.obsdict[name]
detmode=imlist[headerList.index('DETMODE')].strip().upper()
obsmode=imlist[headerList.index('OBSMODE')].strip().upper()
#update the information panel
try:
self.infoTab.update(name, self.obsdict[name])
print "UPDATING tabs with %s" % name
except Exception, e:
print e
return
if self.thread.isRunning() and self.nothread:
self.nothread=False
#update the DQ tab
try:
self.dqTab.updatetab(name, self.obsdict[name])
#self.dqTab=DQWidget(name, self.obsdict[name])
#self.tabWidget.removeTab(1)
#self.tabWidget.insertTab(1, self.dqTab, 'DQ')
except Exception, e:
print e
return
#display the image
try:
if name.startswith('S') or name.startswith('P'):
rfile='mbxp'+name
cfile=rfile.replace('.fits', '.cat')
else:
rfile = name + 's'
cfile = None
display(rfile, cfile)
except Exception, e:
print e
#update the spectra plot
if obsmode=='SPECTROSCOPY':
self.updatespecview(name)
def clickfordata(self, event):
#print self.watcher, dir(self.watcher)
#look for new data
self.checkfordata(self.obsdate, self.imdir)
#reset the obslog
self.obsTab.set_obsdict(self.obsdict)
#print self.obsTab.obsdict.keys()
self.obsTab.obstable.setRowCount(self.obsTab.nrow)
for i in range(self.obsTab.nrow):
self.obsTab.setrow(i)
#reset the watcher
self.disconnect(self.watcher, QtCore.SIGNAL('directoryChanged (const QString&)'), self.newfileEvent)
self.addwatcher()
def addwatcher(self):
self.watcher=QtCore.QFileSystemWatcher(self)
watchpath=[]
if self.scamwatch:
watchpath.append(self.scamdir)
else:
watchpath.append('%sscam/data/%s/' % (self.imdir, self.obsdate[0:4]))
if self.rsswatch:
watchpath.append(self.rssdir)
else:
watchpath.append('%srss/data/%s/' % (self.imdir, self.obsdate[0:4]))
if self.hrswatch:
watchpath.append(self.hrsdir)
else:
watchpath.append('%shrdet/data/%s/' % (self.imdir, self.obsdate[0:4]))
if self.hrbwatch:
watchpath.append(self.hrbdir)
else:
watchpath.append('%shbdet/data/%s/' % (self.imdir, self.obsdate[0:4]))
print watchpath
self.watcher.addPaths(watchpath)
self.connect(self.watcher, QtCore.SIGNAL('directoryChanged (const QString&)'), self.newfileEvent)
#watcher.directoryChanged.connect(self.newfileEvent)
#self.connect(watcher, QtCore.SIGNAL("fileChanged(const QString&)"), self.newfileEvent)
def newfileEvent(self, event):
"""Handles the event when a new file is created"""
#look for new files
edir='%s' % event
if not self.scamwatch and edir.count('scam'):
self.watcher.addPath(self.scamdir)
edir=self.scamdir
if not self.rsswatch and edir.count('rss'):
self.watcher.addPath(self.rssdir)
edir=self.rssdir
#Perhaps edit to turn off?
if self.hrswatch and edir.count('hrdet'):
self.watcher.addPath(self.hrsdir)
edir=self.hrsdir
if self.hrbwatch and edir.count('hbdet'):
self.watcher.addPath(self.hrbdir)
edir=self.hrbdir
#check the directory for new files
files=glob.glob(edir+'*')
files.sort()
newfile=self.findnewfile(files)
if not newfile: return
#skip over an files that are .bin files
if newfile.count('.bin'):
msg="Sorry I can't handle slotmode files like %s, yet" % files[-1]
print msg
return
print newfile
if not newfile.count('.fit'): return
#see if the new file can be opened and added to obsdict
name=self.addtoobsdict(newfile)
print 'Added to obs:', name
#if it fails, return
if name is None: return
print edir
#check to see if it is a new file and if so, add it to the files
#if not return
if edir==self.scamdir:
if len(self.scamfiles)==len(files): return
self.scamfiles.append(newfile)
if edir==self.rssdir:
if len(self.rssfiles)==len(files): return
self.rssfiles.append(newfile)
if edir==self.hrsdir:
if len(self.hrsfiles)==len(files): return
self.hrsfiles.append(newfile)
if edir==self.hrbdir:
if len(self.hrbfiles)==len(files): return
self.hrbfiles.append(newfile)
self.allfiles=self.scamfiles+self.rssfiles+self.hrsfiles+self.hrbfiles
#update tables
self.updatetables(name)
#add head files to the list
if newfile.count('.head'):
self.headfiles.append(newfile)
#start to reduct the data
if self.imreduce and newfile.count('.fit') and newfile.count(self.obsdate):
self.newname=name
self.newfile=newfile
self.thread.run=self.runcleandata
print 'Setting up thread'
self.thread.start()
print 'Thread Started'
def runcleandata(self):
self.nothread=True
self.obsdict[self.newname]=self.cleandata(self.newfile, iminfo=self.obsdict[self.newname],
clobber=self.clobber, display_image=True)
if self.nothread:
self.nothread=False
print "emitting signal"
self.thread.emit(QtCore.SIGNAL("finishedthread(QString)"), self.newname)
def updatetables(self, name):
#update the Observing log table
self.obsTab.addobsdict(name, self.obsdict[name])
def findnewfile(self, files):
"""Find the newest file in a directory and return it"""
newfile=None
for l in files:
if l not in self.allfiles and not l.count('disk.file') and not l.count('log'):
newfile=l
return newfile
def checkfordata(self, obsdate, imdir):
"""If we are starting up, look for any existing data and load that data"""
#set up some of the data
self.obsdate=obsdate
self.imdir=imdir
if imdir[-1]!='/': imdir += '/'
#set up the directories scam data
self.scamdir='%sscam/data/%s/%s/raw/' % (imdir, obsdate[0:4], obsdate[4:])
#self.scamdir='%sscam/data/%s/' % (imdir, obsdate[0:4])
if os.path.isdir(self.scamdir):
self.scamfiles=glob.glob(self.scamdir+'S*')
self.scamfiles.sort()
else:
#createdirectories(self.scamdir)
self.scamwatch=False
self.scamfiles=[]
#set up the RSS files
self.rssdir ='%srss/data/%s/%s/raw/' % (imdir, obsdate[0:4], obsdate[4:])
if os.path.isdir(self.rssdir):
self.rssfiles=glob.glob(self.rssdir+'P*')
self.rssfiles.sort()
else:
#createdirectories(self.rssdir)
self.rsswatch=False
self.rssfiles=[]
#set up the HRS files
self.hrsdir ='%shrdet/data/%s/%s/raw/' % (imdir, obsdate[0:4], obsdate[4:])
if os.path.isdir(self.hrsdir) and self.hrswatch:
self.hrsfiles=glob.glob(self.hrsdir+'R*')
self.hrsfiles.sort()
else:
#createdirectories(self.rssdir)
self.hrswatch=False
self.hrsfiles=[]
#set up the HRS files
self.hrbdir ='%shbdet/data/%s/%s/raw/' % (imdir, obsdate[0:4], obsdate[4:])
if os.path.isdir(self.hrbdir) and self.hrbwatch:
self.hrbfiles=glob.glob(self.hrbdir+'H*')
self.hrbfiles.sort()
else:
#createdirectories(self.rssdir)
self.hrbwatch=False
self.hrbfiles=[]
self.allfiles=self.scamfiles+self.rssfiles+self.hrsfiles+self.hrbfiles
#create the obsdict
for i in range(len(self.allfiles)):
if self.allfiles[i][-5:]==".fits" and self.allfiles[i].count(self.obsdate):
name=os.path.basename(self.allfiles[i])
if name not in self.obsdict.keys(): # or not os.path.isfile('mbxp'+name):
name=self.addtoobsdict(self.allfiles[i])
if self.imreduce:
self.obsdict[name]=self.cleandata(self.allfiles[i], iminfo=self.obsdict[name],
clobber=self.clobber)
elif self.allfiles[i].count(".head"):
name=self.addtoobsdict(self.allfiles[i])
self.headfiles.append(self.allfiles[i])
elif self.allfiles[i][-4:]==".fit": #for hrs
name=os.path.basename(self.allfiles[i])
if name not in self.obsdict.keys(): # or not os.path.isfile('mbxp'+name):
name=self.addtoobsdict(self.allfiles[i])
if self.imreduce:
self.obsdict[name]=self.cleandata(self.allfiles[i], iminfo=self.obsdict[name],
reduce_image=False, clobber=self.clobber)
elif self.allfiles[i].count(".bin"):
msg="Sorry I can't handle slotmode files like %s, yet" % self.allfiles[i]
print msg
def addtoobsdict(self, infile):
try:
warnings.warn('error')
self.hdu=saltio.openfits(infile)
self.hdu.verify('exception')
warnings.warn('default')
name=getbasename(self.hdu)
imlist=getimagedetails(self.hdu)
self.hdu.close()
except IndexError:
time.sleep(10)
name=self.addtoobsdict(infile)
return name
except Exception, e:
print 'Returning none due to: %s' % (str(e))
return None
self.obsdict[name]=imlist
return name
def cleandata(self, filename, iminfo=None, prodir='.', interp='linear', cleanup=True,
clobber=False,
logfile='saltclean.log', reduce_image=True,
display_image=False, verbose=True):
"""Start the process to reduce the data and produce a single mosaicked image"""
#print filename
status=0
#create the input file name
infile=os.path.basename(filename)
rawpath=os.path.dirname(filename)
outpath='./'
outfile=outpath+'mbxp'+infile
#print infile, rawpath, outpath
#If it is a bin file, pre-process the data
if filename.count('.bin'):
print "I can't handle this yet"
#ignore bcam files
if infile.startswith('B'):
return iminfo
#check to see if it exists and return if clobber is no
if os.path.isfile(outfile) and not clobber: return iminfo
#handle HRS data
print filename
if infile.startswith('H') or infile.startswith('R'):
outfile = os.path.basename(filename) +'s'
print filename, outfile
if not os.path.isfile(outfile): os.symlink(filename, outfile)
#display the image
if display_image:
print "Displaying %s" % outfile
try:
display(outfile)
except Exception, e:
print e
try:
log=None #open(logfile, 'a')
sdb=saltmysql.connectdb(self.sdbhost, self.sdbname, self.sdbuser, self.password)
sdbloadfits(outfile, sdb, log, False)
print 'SDBLOADFITS: SUCCESS'
except Exception, e:
print 'SDBLOADFITSERROR:', e
return iminfo
if filename.count('.txt'): return iminfo
#remove frame transfer data
#detmode=iminfo[headerList.index('DETMODE')].strip().upper()
#if detmode=='FT' or detmode=='FRAME TRANSFER': return iminfo
#reduce the data
if reduce_image:
try:
quickclean(filename, interp, cleanup, clobber, logfile, verbose)
except Exception, e:
print e
return iminfo
#load the data into the SDB
if self.sdbhost and self.update:
try:
log=None #open(logfile, 'a')
sdb=saltmysql.connectdb(self.sdbhost, self.sdbname, self.sdbuser, self.password)
sdbloadfits(filename, sdb, log, False)
print 'SDBLOADFITS: SUCCESS'
except Exception, e:
print 'SDBLOADFITSERROR:', e
#display the image
if display_image:
print "Displaying %s" % outfile
try:
display(outfile)
except Exception, e:
print e
#if the images are imaging data, run sextractor on them
name=iminfo[0]
propcode=iminfo[headerList.index('PROPID')].strip().upper()
obsmode=iminfo[headerList.index('OBSMODE')].strip().upper()
detmode=iminfo[headerList.index('DETMODE')].strip().upper()
obstype=iminfo[headerList.index('CCDTYPE')].strip().upper()
target=iminfo[headerList.index('OBJECT')].strip().upper()
lampid=iminfo[headerList.index('LAMPID')].strip().upper()
print detmode
if (obsmode=='IMAGING' or obsmode=='FABRY-PEROT' ) and (detmode=='NORMAL' or detmode=='FT' or detmode=='FRAME TRANSFER'):
i=headerList.index('CCDSUM')
ccdbin=int(iminfo[i].split()[0])
pix_scale=0.14*ccdbin
r_ap=1.5/pix_scale
#measure the photometry
print "RUNNING PHOTOMETRY"
quickphot(outfile, r_ap, pix_scale, self.sexfile, clobber, logfile, verbose)
#load the regions
#if display_image: regions(outfile)
#measure the background statistics
#hdu=pyfits.open(outfile)
#bmean, bmidpt, bstd=saltstat.iterstat(hdu[1].data, 5, 3)
bmean, bmidpt, bstd=(-1,-1,-1)
#hdu.close()
print "---------Background Statistics---------"
print "%10s %10s %10s" % ('Mean', 'MidPoint', 'STD')
print "%10.2f %10.2f %10.2f" % (bmean, bmidpt, bstd)
iminfo[headerList.index('BMEAN')]='%f' % (bmean)
iminfo[headerList.index('BMIDPT')]='%f' % (bmidpt)
iminfo[headerList.index('BSTD')]='%f' % (bstd)
#measure the seeing
outtxt=outfile.replace('fits', 'cat')
try:
mag_arr, fwhm_arr=np.loadtxt(outtxt, usecols=(2,10), unpack=True)
mean, std, norm, peak=seeing_stats(fwhm_arr)
see=mean*pix_scale
nsources=len(mag_arr)
except:
see=-1
nsources=-1
iminfo[headerList.index('NSOURCES')]='%i' % nsources
iminfo[headerList.index('SEEING')]='%f' % see
#self.emit(QtCore.SIGNAL("updateimlist(str,str,str)"), (name, 'SEEING', '%f' % see))
#self.emit(QtCore.SIGNAL("updatespec(QString)"), name)
#If the images are spectral images, run specreduce on them
if obsmode=='SPECTROSCOPY': # and not(target in ['FLAT', 'BIAS']):
solfile = iraf.osfn('pysalt$data/rss/RSSwave.db')
print solfile
y1,y2=quickspec(outfile, lampid, solfile=solfile, objsection=self.objsection, findobj=True, clobber=True, logfile=logfile, verbose=verbose)
print y1,y2
specfile=outpath+'smbxp'+infile.split('.fits')[0]+'.txt'
#In here, so it doesn't break when the first checkdata runs
try:
self.specTab.updaterange(y1,y2)
self.emit(QtCore.SIGNAL("updatespec(QString)"), infile)
except Exception,e:
message="SALTFIRST--ERROR: Could not wavelength calibrate %s because %s" % (infile, e)
fout=open(logfile, 'a')
fout.write(message)
print message
if obsmode=='FABRY-PEROT' and obstype=='ARC':
try:
flatimage='/home/ccd/smc/FPFLAT.fits'
profile=os.path.basename(outfile)
fpcal(profile, flatimage=flatimage, minflat=18000, niter=5, bthresh=5, displayimage=True, clobber=True, logfile=logfile, verbose=verbose)
except Exception,e:
message="SALTFIRST--ERROR: Could not calibrate FP data te %s because %s" % (infile, e)
fout=open(logfile, 'a')
fout.write(message)
print message
#check for fast mode operation
if self.update:
runfast(name, propcode,self.obsdate,self.server, self.readme, self.sdbhost,self.sdbname, self.sdbuser, self.password)
return iminfo
def createdirectories(f):
"""Step through all the levels of a path and creates all the directories"""
d=f.split('/')
for i in range(len(d)):
odir=''.join('%s/' % x for x in d[:i])
if odir:
if not os.path.isdir(odir):
os.mkdir(odir)
def getbasename(hdu):
return os.path.basename(hdu._HDUList__file.name)
def getimagedetails(hdu):
"""Return all the pertinant image header details"""
filename=hdu._HDUList__file.name
imlist=[filename]
print filename
for k in headerList[1:]:
try:
value=saltkey.get(k, hdu[0])
except SaltIOError:
value=''
imlist.append(value)
return imlist
# -----------------------------------------------------------
# main code
#parfile = iraf.osfn("saltfirst$saltfirst.par")
#t = iraf.IrafTaskFactory(taskname="saltfirst",value=parfile,function=saltfirst, pkgname='pipetools')
|
bsd-3-clause
|
kaiserroll14/301finalproject
|
main/pandas/stats/moments.py
|
9
|
39221
|
"""
Provides rolling statistical moments and related descriptive
statistics implemented in Cython
"""
from __future__ import division
from functools import wraps
from collections import defaultdict
from numpy import NaN
import numpy as np
from pandas.core.api import DataFrame, Series, Panel, notnull
import pandas.algos as algos
import pandas.core.common as pdcom
from pandas.util.decorators import Substitution, Appender
__all__ = ['rolling_count', 'rolling_max', 'rolling_min',
'rolling_sum', 'rolling_mean', 'rolling_std', 'rolling_cov',
'rolling_corr', 'rolling_var', 'rolling_skew', 'rolling_kurt',
'rolling_quantile', 'rolling_median', 'rolling_apply',
'rolling_corr_pairwise', 'rolling_window',
'ewma', 'ewmvar', 'ewmstd', 'ewmvol', 'ewmcorr', 'ewmcov',
'expanding_count', 'expanding_max', 'expanding_min',
'expanding_sum', 'expanding_mean', 'expanding_std',
'expanding_cov', 'expanding_corr', 'expanding_var',
'expanding_skew', 'expanding_kurt', 'expanding_quantile',
'expanding_median', 'expanding_apply', 'expanding_corr_pairwise']
#------------------------------------------------------------------------------
# Docs
# The order of arguments for the _doc_template is:
# (header, args, kwargs, returns, notes)
_doc_template = """
%s
Parameters
----------
%s%s
Returns
-------
%s
%s
"""
_roll_kw = """window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Set the labels at the center of the window.
how : string, default '%s'
Method for down- or re-sampling
"""
_roll_notes = r"""
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
_ewm_kw = r"""com : float. optional
Center of mass: :math:`\alpha = 1 / (1 + com)`,
span : float, optional
Specify decay in terms of span, :math:`\alpha = 2 / (span + 1)`
halflife : float, optional
Specify decay in terms of halflife, :math:`\alpha = 1 - exp(log(0.5) / halflife)`
min_periods : int, default 0
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
adjust : boolean, default True
Divide by decaying adjustment factor in beginning periods to account for
imbalance in relative weightings (viewing EWMA as a moving average)
how : string, default 'mean'
Method for down- or re-sampling
ignore_na : boolean, default False
Ignore missing values when calculating weights;
specify True to reproduce pre-0.15.0 behavior
"""
_ewm_notes = r"""
Notes
-----
Either center of mass, span or halflife must be specified
EWMA is sometimes specified using a "span" parameter `s`, we have that the
decay parameter :math:`\alpha` is related to the span as
:math:`\alpha = 2 / (s + 1) = 1 / (1 + c)`
where `c` is the center of mass. Given a span, the associated center of mass is
:math:`c = (s - 1) / 2`
So a "20-day EWMA" would have center 9.5.
When adjust is True (default), weighted averages are calculated using weights
(1-alpha)**(n-1), (1-alpha)**(n-2), ..., 1-alpha, 1.
When adjust is False, weighted averages are calculated recursively as:
weighted_average[0] = arg[0];
weighted_average[i] = (1-alpha)*weighted_average[i-1] + alpha*arg[i].
When ignore_na is False (default), weights are based on absolute positions.
For example, the weights of x and y used in calculating the final weighted
average of [x, None, y] are (1-alpha)**2 and 1 (if adjust is True), and
(1-alpha)**2 and alpha (if adjust is False).
When ignore_na is True (reproducing pre-0.15.0 behavior), weights are based on
relative positions. For example, the weights of x and y used in calculating
the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
More details can be found at
http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions
"""
_expanding_kw = """min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
"""
_type_of_input_retval = "y : type of input argument"
_flex_retval = """y : type depends on inputs
DataFrame / DataFrame -> DataFrame (matches on columns) or Panel (pairwise)
DataFrame / Series -> Computes result for each column
Series / Series -> Series"""
_pairwise_retval = "y : Panel whose items are df1.index values"
_unary_arg = "arg : Series, DataFrame\n"
_binary_arg_flex = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray, optional
if not supplied then will default to arg1 and produce pairwise output
"""
_binary_arg = """arg1 : Series, DataFrame, or ndarray
arg2 : Series, DataFrame, or ndarray
"""
_pairwise_arg = """df1 : DataFrame
df2 : DataFrame
"""
_pairwise_kw = """pairwise : bool, default False
If False then only matching columns between arg1 and arg2 will be used and
the output will be a DataFrame.
If True then all pairwise combinations will be calculated and the output
will be a Panel in the case of DataFrame inputs. In the case of missing
elements, only complete pairwise observations will be used.
"""
_ddof_kw = """ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
"""
_bias_kw = r"""bias : boolean, default False
Use a standard estimation bias correction
"""
def rolling_count(arg, window, freq=None, center=False, how=None):
"""
Rolling count of number of non-NaN observations inside provided window.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
rolling_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
arg = _conv_timerule(arg, freq, how)
if not center:
window = min(window, len(arg))
return_hook, values = _process_data_structure(arg, kill_inf=False)
converted = np.isfinite(values).astype(float)
result = rolling_sum(converted, window, min_periods=0,
center=center) # already converted
# putmask here?
result[np.isnan(result)] = 0
return return_hook(result)
@Substitution("Unbiased moving covariance.", _binary_arg_flex,
_roll_kw%'None'+_pairwise_kw+_ddof_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_cov(arg1, arg2=None, window=None, min_periods=None, freq=None,
center=False, pairwise=None, how=None, ddof=1):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
def _get_cov(X, Y):
mean = lambda x: rolling_mean(x, window, min_periods, center=center)
count = rolling_count(X + Y, window, center=center)
bias_adj = count / (count - ddof)
return (mean(X * Y) - mean(X) * mean(Y)) * bias_adj
rs = _flex_binary_moment(arg1, arg2, _get_cov, pairwise=bool(pairwise))
return rs
@Substitution("Moving sample correlation.", _binary_arg_flex,
_roll_kw%'None'+_pairwise_kw, _flex_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr(arg1, arg2=None, window=None, min_periods=None, freq=None,
center=False, pairwise=None, how=None):
if window is None and isinstance(arg2, (int, float)):
window = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
elif arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise # only default unset
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
def _get_corr(a, b):
num = rolling_cov(a, b, window, min_periods, freq=freq,
center=center)
den = (rolling_std(a, window, min_periods, freq=freq,
center=center) *
rolling_std(b, window, min_periods, freq=freq,
center=center))
return num / den
return _flex_binary_moment(arg1, arg2, _get_corr, pairwise=bool(pairwise))
def _flex_binary_moment(arg1, arg2, f, pairwise=False):
if not (isinstance(arg1,(np.ndarray, Series, DataFrame)) and
isinstance(arg2,(np.ndarray, Series, DataFrame))):
raise TypeError("arguments to moment function must be of type "
"np.ndarray/Series/DataFrame")
if isinstance(arg1, (np.ndarray, Series)) and \
isinstance(arg2, (np.ndarray,Series)):
X, Y = _prep_binary(arg1, arg2)
return f(X, Y)
elif isinstance(arg1, DataFrame):
def dataframe_from_int_dict(data, frame_template):
result = DataFrame(data, index=frame_template.index)
if len(result.columns) > 0:
result.columns = frame_template.columns[result.columns]
return result
results = {}
if isinstance(arg2, DataFrame):
if pairwise is False:
if arg1 is arg2:
# special case in order to handle duplicate column names
for i, col in enumerate(arg1.columns):
results[i] = f(arg1.iloc[:, i], arg2.iloc[:, i])
return dataframe_from_int_dict(results, arg1)
else:
if not arg1.columns.is_unique:
raise ValueError("'arg1' columns are not unique")
if not arg2.columns.is_unique:
raise ValueError("'arg2' columns are not unique")
X, Y = arg1.align(arg2, join='outer')
X = X + 0 * Y
Y = Y + 0 * X
res_columns = arg1.columns.union(arg2.columns)
for col in res_columns:
if col in X and col in Y:
results[col] = f(X[col], Y[col])
return DataFrame(results, index=X.index, columns=res_columns)
elif pairwise is True:
results = defaultdict(dict)
for i, k1 in enumerate(arg1.columns):
for j, k2 in enumerate(arg2.columns):
if j<i and arg2 is arg1:
# Symmetric case
results[i][j] = results[j][i]
else:
results[i][j] = f(*_prep_binary(arg1.iloc[:, i], arg2.iloc[:, j]))
p = Panel.from_dict(results).swapaxes('items', 'major')
if len(p.major_axis) > 0:
p.major_axis = arg1.columns[p.major_axis]
if len(p.minor_axis) > 0:
p.minor_axis = arg2.columns[p.minor_axis]
return p
else:
raise ValueError("'pairwise' is not True/False")
else:
results = {}
for i, col in enumerate(arg1.columns):
results[i] = f(*_prep_binary(arg1.iloc[:, i], arg2))
return dataframe_from_int_dict(results, arg1)
else:
return _flex_binary_moment(arg2, arg1, f)
@Substitution("Deprecated. Use rolling_corr(..., pairwise=True) instead.\n\n"
"Pairwise moving sample correlation", _pairwise_arg,
_roll_kw%'None', _pairwise_retval, _roll_notes)
@Appender(_doc_template)
def rolling_corr_pairwise(df1, df2=None, window=None, min_periods=None,
freq=None, center=False):
import warnings
msg = "rolling_corr_pairwise is deprecated, use rolling_corr(..., pairwise=True)"
warnings.warn(msg, FutureWarning, stacklevel=2)
return rolling_corr(df1, df2, window=window, min_periods=min_periods,
freq=freq, center=center,
pairwise=True)
def _rolling_moment(arg, window, func, minp, axis=0, freq=None, center=False,
how=None, args=(), kwargs={}, **kwds):
"""
Rolling statistical measure using supplied function. Designed to be
used with passed-in Cython array-based functions.
Parameters
----------
arg : DataFrame or numpy ndarray-like
window : Number of observations used for calculating statistic
func : Cython function to compute rolling statistic on raw series
minp : int
Minimum number of observations required to have a value
axis : int, default 0
freq : None or string alias / date offset object, default=None
Frequency to conform to before computing statistic
center : boolean, default False
Whether the label should correspond with center of window
how : string, default 'mean'
Method for down- or re-sampling
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input
"""
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
if values.size == 0:
result = values.copy()
else:
# actually calculate the moment. Faster way to do this?
offset = int((window - 1) / 2.) if center else 0
additional_nans = np.array([np.NaN] * offset)
calc = lambda x: func(np.concatenate((x, additional_nans)) if center else x,
window, minp=minp, args=args, kwargs=kwargs,
**kwds)
if values.ndim > 1:
result = np.apply_along_axis(calc, axis, values)
else:
result = calc(values)
if center:
result = _center_window(result, window, axis)
return return_hook(result)
def _center_window(rs, window, axis):
if axis > rs.ndim-1:
raise ValueError("Requested axis is larger then no. of argument "
"dimensions")
offset = int((window - 1) / 2.)
if offset > 0:
if isinstance(rs, (Series, DataFrame, Panel)):
rs = rs.slice_shift(-offset, axis=axis)
else:
lead_indexer = [slice(None)] * rs.ndim
lead_indexer[axis] = slice(offset, None)
rs = np.copy(rs[tuple(lead_indexer)])
return rs
def _process_data_structure(arg, kill_inf=True):
if isinstance(arg, DataFrame):
return_hook = lambda v: type(arg)(v, index=arg.index,
columns=arg.columns)
values = arg.values
elif isinstance(arg, Series):
values = arg.values
return_hook = lambda v: Series(v, arg.index, name=arg.name)
else:
return_hook = lambda v: v
values = arg
if not issubclass(values.dtype.type, float):
values = values.astype(float)
if kill_inf:
values = values.copy()
values[np.isinf(values)] = np.NaN
return return_hook, values
#------------------------------------------------------------------------------
# Exponential moving moments
def _get_center_of_mass(com, span, halflife):
valid_count = len([x for x in [com, span, halflife] if x is not None])
if valid_count > 1:
raise Exception("com, span, and halflife are mutually exclusive")
if span is not None:
# convert span to center of mass
com = (span - 1) / 2.
elif halflife is not None:
# convert halflife to center of mass
decay = 1 - np.exp(np.log(0.5) / halflife)
com = 1 / decay - 1
elif com is None:
raise Exception("Must pass one of com, span, or halflife")
return float(com)
@Substitution("Exponentially-weighted moving average", _unary_arg, _ewm_kw,
_type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewma(arg, com=None, span=None, halflife=None, min_periods=0, freq=None,
adjust=True, how=None, ignore_na=False):
arg = _conv_timerule(arg, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _ewma(v):
return algos.ewma(v, com, int(adjust), int(ignore_na), int(min_periods))
return_hook, values = _process_data_structure(arg)
if values.size == 0:
output = values.copy()
else:
output = np.apply_along_axis(_ewma, 0, values)
return return_hook(output)
@Substitution("Exponentially-weighted moving variance", _unary_arg,
_ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmvar(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
freq=None, how=None, ignore_na=False, adjust=True):
arg = _conv_timerule(arg, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _ewmvar(v):
return algos.ewmcov(v, v, com, int(adjust), int(ignore_na), int(min_periods), int(bias))
return_hook, values = _process_data_structure(arg)
if values.size == 0:
output = values.copy()
else:
output = np.apply_along_axis(_ewmvar, 0, values)
return return_hook(output)
@Substitution("Exponentially-weighted moving std", _unary_arg,
_ewm_kw+_bias_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmstd(arg, com=None, span=None, halflife=None, min_periods=0, bias=False,
ignore_na=False, adjust=True):
result = ewmvar(arg, com=com, span=span, halflife=halflife,
min_periods=min_periods, bias=bias, adjust=adjust, ignore_na=ignore_na)
return _zsqrt(result)
ewmvol = ewmstd
@Substitution("Exponentially-weighted moving covariance", _binary_arg_flex,
_ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcov(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
bias=False, freq=None, pairwise=None, how=None, ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _get_ewmcov(X, Y):
# X and Y have the same structure (and NaNs) when called from _flex_binary_moment()
return_hook, x_values = _process_data_structure(X)
return_hook, y_values = _process_data_structure(Y)
cov = algos.ewmcov(x_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), int(bias))
return return_hook(cov)
result = _flex_binary_moment(arg1, arg2, _get_ewmcov,
pairwise=bool(pairwise))
return result
@Substitution("Exponentially-weighted moving correlation", _binary_arg_flex,
_ewm_kw+_pairwise_kw, _type_of_input_retval, _ewm_notes)
@Appender(_doc_template)
def ewmcorr(arg1, arg2=None, com=None, span=None, halflife=None, min_periods=0,
freq=None, pairwise=None, how=None, ignore_na=False, adjust=True):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and com is None:
com = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
arg1 = _conv_timerule(arg1, freq, how)
arg2 = _conv_timerule(arg2, freq, how)
com = _get_center_of_mass(com, span, halflife)
def _get_ewmcorr(X, Y):
# X and Y have the same structure (and NaNs) when called from _flex_binary_moment()
return_hook, x_values = _process_data_structure(X)
return_hook, y_values = _process_data_structure(Y)
cov = algos.ewmcov(x_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
x_var = algos.ewmcov(x_values, x_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
y_var = algos.ewmcov(y_values, y_values, com, int(adjust), int(ignore_na), int(min_periods), 1)
corr = cov / _zsqrt(x_var * y_var)
return return_hook(corr)
result = _flex_binary_moment(arg1, arg2, _get_ewmcorr,
pairwise=bool(pairwise))
return result
def _zsqrt(x):
result = np.sqrt(x)
mask = x < 0
if isinstance(x, DataFrame):
if mask.values.any():
result[mask] = 0
else:
if mask.any():
result[mask] = 0
return result
def _prep_binary(arg1, arg2):
if not isinstance(arg2, type(arg1)):
raise Exception('Input arrays must be of the same type!')
# mask out values, this also makes a common index...
X = arg1 + 0 * arg2
Y = arg2 + 0 * arg1
return X, Y
#----------------------------------------------------------------------
# Python interface to Cython functions
def _conv_timerule(arg, freq, how):
types = (DataFrame, Series)
if freq is not None and isinstance(arg, types):
# Conform to whatever frequency needed.
arg = arg.resample(freq, how=how)
return arg
def _require_min_periods(p):
def _check_func(minp, window):
if minp is None:
return window
else:
return max(p, minp)
return _check_func
def _use_window(minp, window):
if minp is None:
return window
else:
return minp
def _rolling_func(func, desc, check_minp=_use_window, how=None, additional_kw=''):
if how is None:
how_arg_str = 'None'
else:
how_arg_str = "'%s"%how
@Substitution(desc, _unary_arg, _roll_kw%how_arg_str + additional_kw,
_type_of_input_retval, _roll_notes)
@Appender(_doc_template)
@wraps(func)
def f(arg, window, min_periods=None, freq=None, center=False, how=how,
**kwargs):
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=center, how=how, **kwargs)
return f
rolling_max = _rolling_func(algos.roll_max, 'Moving maximum.', how='max')
rolling_min = _rolling_func(algos.roll_min, 'Moving minimum.', how='min')
rolling_sum = _rolling_func(algos.roll_sum, 'Moving sum.')
rolling_mean = _rolling_func(algos.roll_mean, 'Moving mean.')
rolling_median = _rolling_func(algos.roll_median_c, 'Moving median.',
how='median')
_ts_std = lambda *a, **kw: _zsqrt(algos.roll_var(*a, **kw))
rolling_std = _rolling_func(_ts_std, 'Moving standard deviation.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
rolling_var = _rolling_func(algos.roll_var, 'Moving variance.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
rolling_skew = _rolling_func(algos.roll_skew, 'Unbiased moving skewness.',
check_minp=_require_min_periods(3))
rolling_kurt = _rolling_func(algos.roll_kurt, 'Unbiased moving kurtosis.',
check_minp=_require_min_periods(4))
def rolling_quantile(arg, window, quantile, min_periods=None, freq=None,
center=False):
"""Moving quantile.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
def call_cython(arg, window, minp, args=(), kwargs={}):
minp = _use_window(minp, window)
return algos.roll_quantile(arg, window, minp, quantile)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=center)
def rolling_apply(arg, window, func, min_periods=None, freq=None,
center=False, args=(), kwargs={}):
"""Generic moving function application.
Parameters
----------
arg : Series, DataFrame
window : int
Size of the moving window. This is the number of observations used for
calculating the statistic.
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
offset = int((window - 1) / 2.) if center else 0
def call_cython(arg, window, minp, args, kwargs):
minp = _use_window(minp, window)
return algos.roll_generic(arg, window, minp, offset, func, args, kwargs)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
center=False, args=args, kwargs=kwargs)
def rolling_window(arg, window=None, win_type=None, min_periods=None,
freq=None, center=False, mean=True,
axis=0, how=None, **kwargs):
"""
Applies a moving window of type ``window_type`` and size ``window``
on the data.
Parameters
----------
arg : Series, DataFrame
window : int or ndarray
Weighting window specification. If the window is an integer, then it is
treated as the window length and win_type is required
win_type : str, default None
Window type (see Notes)
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
center : boolean, default False
Whether the label should correspond with center of window
mean : boolean, default True
If True computes weighted mean, else weighted sum
axis : {0, 1}, default 0
how : string, default 'mean'
Method for down- or re-sampling
Returns
-------
y : type of input argument
Notes
-----
The recognized window types are:
* ``boxcar``
* ``triang``
* ``blackman``
* ``hamming``
* ``bartlett``
* ``parzen``
* ``bohman``
* ``blackmanharris``
* ``nuttall``
* ``barthann``
* ``kaiser`` (needs beta)
* ``gaussian`` (needs std)
* ``general_gaussian`` (needs power, width)
* ``slepian`` (needs width).
By default, the result is set to the right edge of the window. This can be
changed to the center of the window by setting ``center=True``.
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
if isinstance(window, (list, tuple, np.ndarray)):
if win_type is not None:
raise ValueError(('Do not specify window type if using custom '
'weights'))
window = pdcom._asarray_tuplesafe(window).astype(float)
elif pdcom.is_integer(window): # window size
if win_type is None:
raise ValueError('Must specify window type')
try:
import scipy.signal as sig
except ImportError:
raise ImportError('Please install scipy to generate window weight')
win_type = _validate_win_type(win_type, kwargs) # may pop from kwargs
window = sig.get_window(win_type, window).astype(float)
else:
raise ValueError('Invalid window %s' % str(window))
minp = _use_window(min_periods, len(window))
arg = _conv_timerule(arg, freq, how)
return_hook, values = _process_data_structure(arg)
if values.size == 0:
result = values.copy()
else:
offset = int((len(window) - 1) / 2.) if center else 0
additional_nans = np.array([np.NaN] * offset)
f = lambda x: algos.roll_window(np.concatenate((x, additional_nans)) if center else x,
window, minp, avg=mean)
result = np.apply_along_axis(f, axis, values)
if center:
result = _center_window(result, len(window), axis)
return return_hook(result)
def _validate_win_type(win_type, kwargs):
# may pop from kwargs
arg_map = {'kaiser': ['beta'],
'gaussian': ['std'],
'general_gaussian': ['power', 'width'],
'slepian': ['width']}
if win_type in arg_map:
return tuple([win_type] +
_pop_args(win_type, arg_map[win_type], kwargs))
return win_type
def _pop_args(win_type, arg_names, kwargs):
msg = '%s window requires %%s' % win_type
all_args = []
for n in arg_names:
if n not in kwargs:
raise ValueError(msg % n)
all_args.append(kwargs.pop(n))
return all_args
def _expanding_func(func, desc, check_minp=_use_window, additional_kw=''):
@Substitution(desc, _unary_arg, _expanding_kw + additional_kw,
_type_of_input_retval, "")
@Appender(_doc_template)
@wraps(func)
def f(arg, min_periods=1, freq=None, **kwargs):
window = max(len(arg), min_periods) if min_periods else len(arg)
def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
minp = check_minp(minp, window)
return func(arg, window, minp, **kwds)
return _rolling_moment(arg, window, call_cython, min_periods, freq=freq,
**kwargs)
return f
expanding_max = _expanding_func(algos.roll_max, 'Expanding maximum.')
expanding_min = _expanding_func(algos.roll_min, 'Expanding minimum.')
expanding_sum = _expanding_func(algos.roll_sum, 'Expanding sum.')
expanding_mean = _expanding_func(algos.roll_mean, 'Expanding mean.')
expanding_median = _expanding_func(algos.roll_median_c, 'Expanding median.')
expanding_std = _expanding_func(_ts_std, 'Expanding standard deviation.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
expanding_var = _expanding_func(algos.roll_var, 'Expanding variance.',
check_minp=_require_min_periods(1),
additional_kw=_ddof_kw)
expanding_skew = _expanding_func(algos.roll_skew, 'Unbiased expanding skewness.',
check_minp=_require_min_periods(3))
expanding_kurt = _expanding_func(algos.roll_kurt, 'Unbiased expanding kurtosis.',
check_minp=_require_min_periods(4))
def expanding_count(arg, freq=None):
"""
Expanding count of number of non-NaN observations.
Parameters
----------
arg : DataFrame or numpy ndarray-like
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
Returns
-------
expanding_count : type of caller
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
return rolling_count(arg, len(arg), freq=freq)
def expanding_quantile(arg, quantile, min_periods=1, freq=None):
"""Expanding quantile.
Parameters
----------
arg : Series, DataFrame
quantile : float
0 <= quantile <= 1
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
return rolling_quantile(arg, len(arg), quantile, min_periods=min_periods,
freq=freq)
@Substitution("Unbiased expanding covariance.", _binary_arg_flex,
_expanding_kw+_pairwise_kw+_ddof_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_cov(arg1, arg2=None, min_periods=1, freq=None, pairwise=None, ddof=1):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_cov(arg1, arg2, window,
min_periods=min_periods, freq=freq,
pairwise=pairwise, ddof=ddof)
@Substitution("Expanding sample correlation.", _binary_arg_flex,
_expanding_kw+_pairwise_kw, _flex_retval, "")
@Appender(_doc_template)
def expanding_corr(arg1, arg2=None, min_periods=1, freq=None, pairwise=None):
if arg2 is None:
arg2 = arg1
pairwise = True if pairwise is None else pairwise
elif isinstance(arg2, (int, float)) and min_periods is None:
min_periods = arg2
arg2 = arg1
pairwise = True if pairwise is None else pairwise
window = max((len(arg1) + len(arg2)), min_periods) if min_periods else (len(arg1) + len(arg2))
return rolling_corr(arg1, arg2, window,
min_periods=min_periods,
freq=freq, pairwise=pairwise)
@Substitution("Deprecated. Use expanding_corr(..., pairwise=True) instead.\n\n"
"Pairwise expanding sample correlation", _pairwise_arg,
_expanding_kw, _pairwise_retval, "")
@Appender(_doc_template)
def expanding_corr_pairwise(df1, df2=None, min_periods=1, freq=None):
import warnings
msg = "expanding_corr_pairwise is deprecated, use expanding_corr(..., pairwise=True)"
warnings.warn(msg, FutureWarning, stacklevel=2)
return expanding_corr(df1, df2, min_periods=min_periods,
freq=freq, pairwise=True)
def expanding_apply(arg, func, min_periods=1, freq=None,
args=(), kwargs={}):
"""Generic expanding function application.
Parameters
----------
arg : Series, DataFrame
func : function
Must produce a single value from an ndarray input
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA).
freq : string or DateOffset object, optional (default None)
Frequency to conform the data to before computing the statistic. Specified
as a frequency string or DateOffset object.
args : tuple
Passed on to func
kwargs : dict
Passed on to func
Returns
-------
y : type of input argument
Notes
-----
The `freq` keyword is used to conform time series data to a specified
frequency by resampling the data. This is done with the default parameters
of :meth:`~pandas.Series.resample` (i.e. using the `mean`).
"""
window = max(len(arg), min_periods) if min_periods else len(arg)
return rolling_apply(arg, window, func, min_periods=min_periods, freq=freq,
args=args, kwargs=kwargs)
|
gpl-3.0
|
jakobworldpeace/scikit-learn
|
sklearn/cluster/tests/test_hierarchical.py
|
33
|
20167
|
"""
Several basic tests for hierarchical clustering procedures
"""
# Authors: Vincent Michel, 2010, Gael Varoquaux 2012,
# Matteo Visconti di Oleggio Castello 2014
# License: BSD 3 clause
from tempfile import mkdtemp
import shutil
from functools import partial
import numpy as np
from scipy import sparse
from scipy.cluster import hierarchy
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.cluster import ward_tree
from sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration
from sklearn.cluster.hierarchical import (_hc_cut, _TREE_BUILDERS,
linkage_tree)
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.metrics.pairwise import PAIRED_DISTANCES, cosine_distances,\
manhattan_distances, pairwise_distances
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.neighbors.graph import kneighbors_graph
from sklearn.cluster._hierarchical import average_merge, max_merge
from sklearn.utils.fast_dict import IntFloatDict
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns
def test_linkage_misc():
# Misc tests on linkage
rng = np.random.RandomState(42)
X = rng.normal(size=(5, 5))
assert_raises(ValueError, AgglomerativeClustering(linkage='foo').fit, X)
assert_raises(ValueError, linkage_tree, X, linkage='foo')
assert_raises(ValueError, linkage_tree, X, connectivity=np.ones((4, 4)))
# Smoke test FeatureAgglomeration
FeatureAgglomeration().fit(X)
# test hierarchical clustering on a precomputed distances matrix
dis = cosine_distances(X)
res = linkage_tree(dis, affinity="precomputed")
assert_array_equal(res[0], linkage_tree(X, affinity="cosine")[0])
# test hierarchical clustering on a precomputed distances matrix
res = linkage_tree(X, affinity=manhattan_distances)
assert_array_equal(res[0], linkage_tree(X, affinity="manhattan")[0])
def test_structured_linkage_tree():
# Check that we obtain the correct solution for structured linkage trees.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
# Avoiding a mask with only 'True' entries
mask[4:7, 4:7] = 0
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for tree_builder in _TREE_BUILDERS.values():
children, n_components, n_leaves, parent = \
tree_builder(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
# Check that ward_tree raises a ValueError with a connectivity matrix
# of the wrong shape
assert_raises(ValueError,
tree_builder, X.T, np.ones((4, 4)))
# Check that fitting with no samples raises an error
assert_raises(ValueError,
tree_builder, X.T[:0], connectivity)
def test_unstructured_linkage_tree():
# Check that we obtain the correct solution for unstructured linkage trees.
rng = np.random.RandomState(0)
X = rng.randn(50, 100)
for this_X in (X, X[0]):
# With specified a number of clusters just for the sake of
# raising a warning and testing the warning code
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, ward_tree, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
for tree_builder in _TREE_BUILDERS.values():
for this_X in (X, X[0]):
with ignore_warnings():
children, n_nodes, n_leaves, parent = assert_warns(
UserWarning, tree_builder, this_X.T, n_clusters=10)
n_nodes = 2 * X.shape[1] - 1
assert_equal(len(children) + n_leaves, n_nodes)
def test_height_linkage_tree():
# Check that the height of the results of linkage tree is sorted.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
for linkage_func in _TREE_BUILDERS.values():
children, n_nodes, n_leaves, parent = linkage_func(X.T, connectivity)
n_nodes = 2 * X.shape[1] - 1
assert_true(len(children) + n_leaves == n_nodes)
def test_agglomerative_clustering_wrong_arg_memory():
# Test either if an error is raised when memory is not
# either a str or a joblib.Memory instance
rng = np.random.RandomState(0)
n_samples = 100
X = rng.randn(n_samples, 50)
memory = 5
clustering = AgglomerativeClustering(memory=memory)
assert_raises(ValueError, clustering.fit, X)
def test_agglomerative_clustering():
# Check that we obtain the correct number of clusters with
# agglomerative clustering.
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
n_samples = 100
X = rng.randn(n_samples, 50)
connectivity = grid_to_graph(*mask.shape)
for linkage in ("ward", "complete", "average"):
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage=linkage)
clustering.fit(X)
# test caching
try:
tempdir = mkdtemp()
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity,
memory=tempdir,
linkage=linkage)
clustering.fit(X)
labels = clustering.labels_
assert_true(np.size(np.unique(labels)) == 10)
finally:
shutil.rmtree(tempdir)
# Turn caching off now
clustering = AgglomerativeClustering(
n_clusters=10, connectivity=connectivity, linkage=linkage)
# Check that we obtain the same solution with early-stopping of the
# tree building
clustering.compute_full_tree = False
clustering.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering.labels_,
labels), 1)
clustering.connectivity = None
clustering.fit(X)
assert_true(np.size(np.unique(clustering.labels_)) == 10)
# Check that we raise a TypeError on dense matrices
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=sparse.lil_matrix(
connectivity.toarray()[:10, :10]),
linkage=linkage)
assert_raises(ValueError, clustering.fit, X)
# Test that using ward with another metric than euclidean raises an
# exception
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=connectivity.toarray(),
affinity="manhattan",
linkage="ward")
assert_raises(ValueError, clustering.fit, X)
# Test using another metric than euclidean works with linkage complete
for affinity in PAIRED_DISTANCES.keys():
# Compare our (structured) implementation to scipy
clustering = AgglomerativeClustering(
n_clusters=10,
connectivity=np.ones((n_samples, n_samples)),
affinity=affinity,
linkage="complete")
clustering.fit(X)
clustering2 = AgglomerativeClustering(
n_clusters=10,
connectivity=None,
affinity=affinity,
linkage="complete")
clustering2.fit(X)
assert_almost_equal(normalized_mutual_info_score(clustering2.labels_,
clustering.labels_),
1)
# Test that using a distance matrix (affinity = 'precomputed') has same
# results (with connectivity constraints)
clustering = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
linkage="complete")
clustering.fit(X)
X_dist = pairwise_distances(X)
clustering2 = AgglomerativeClustering(n_clusters=10,
connectivity=connectivity,
affinity='precomputed',
linkage="complete")
clustering2.fit(X_dist)
assert_array_equal(clustering.labels_, clustering2.labels_)
def test_ward_agglomeration():
# Check that we obtain the correct solution in a simplistic case
rng = np.random.RandomState(0)
mask = np.ones([10, 10], dtype=np.bool)
X = rng.randn(50, 100)
connectivity = grid_to_graph(*mask.shape)
agglo = FeatureAgglomeration(n_clusters=5, connectivity=connectivity)
agglo.fit(X)
assert_true(np.size(np.unique(agglo.labels_)) == 5)
X_red = agglo.transform(X)
assert_true(X_red.shape[1] == 5)
X_full = agglo.inverse_transform(X_red)
assert_true(np.unique(X_full[0]).size == 5)
assert_array_almost_equal(agglo.transform(X_full), X_red)
# Check that fitting with no samples raises a ValueError
assert_raises(ValueError, agglo.fit, X[:0])
def assess_same_labelling(cut1, cut2):
"""Util for comparison with scipy"""
co_clust = []
for cut in [cut1, cut2]:
n = len(cut)
k = cut.max() + 1
ecut = np.zeros((n, k))
ecut[np.arange(n), cut] = 1
co_clust.append(np.dot(ecut, ecut.T))
assert_true((co_clust[0] == co_clust[1]).all())
def test_scikit_vs_scipy():
# Test scikit linkage with full connectivity (i.e. unstructured) vs scipy
n, p, k = 10, 5, 3
rng = np.random.RandomState(0)
# Not using a lil_matrix here, just to check that non sparse
# matrices are well handled
connectivity = np.ones((n, n))
for linkage in _TREE_BUILDERS.keys():
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out = hierarchy.linkage(X, method=linkage)
children_ = out[:, :2].astype(np.int)
children, _, n_leaves, _ = _TREE_BUILDERS[linkage](X, connectivity)
cut = _hc_cut(k, children, n_leaves)
cut_ = _hc_cut(k, children_, n_leaves)
assess_same_labelling(cut, cut_)
# Test error management in _hc_cut
assert_raises(ValueError, _hc_cut, n_leaves + 1, children, n_leaves)
def test_connectivity_propagation():
# Check that connectivity in the ward tree is propagated correctly during
# merging.
X = np.array([(.014, .120), (.014, .099), (.014, .097),
(.017, .153), (.017, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .153), (.018, .153), (.018, .153),
(.018, .152), (.018, .149), (.018, .144)])
connectivity = kneighbors_graph(X, 10, include_self=False)
ward = AgglomerativeClustering(
n_clusters=4, connectivity=connectivity, linkage='ward')
# If changes are not propagated correctly, fit crashes with an
# IndexError
ward.fit(X)
def test_ward_tree_children_order():
# Check that children are ordered in the same way for both structured and
# unstructured versions of ward_tree.
# test on five random datasets
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X)
out_structured = ward_tree(X, connectivity=connectivity)
assert_array_equal(out_unstructured[0], out_structured[0])
def test_ward_linkage_tree_return_distance():
# Test return_distance option on linkage and ward trees
# test that return_distance when set true, gives same
# output on both structured and unstructured clustering.
n, p = 10, 5
rng = np.random.RandomState(0)
connectivity = np.ones((n, n))
for i in range(5):
X = .1 * rng.normal(size=(n, p))
X -= 4. * np.arange(n)[:, np.newaxis]
X -= X.mean(axis=1)[:, np.newaxis]
out_unstructured = ward_tree(X, return_distance=True)
out_structured = ward_tree(X, connectivity=connectivity,
return_distance=True)
# get children
children_unstructured = out_unstructured[0]
children_structured = out_structured[0]
# check if we got the same clusters
assert_array_equal(children_unstructured, children_structured)
# check if the distances are the same
dist_unstructured = out_unstructured[-1]
dist_structured = out_structured[-1]
assert_array_almost_equal(dist_unstructured, dist_structured)
for linkage in ['average', 'complete']:
structured_items = linkage_tree(
X, connectivity=connectivity, linkage=linkage,
return_distance=True)[-1]
unstructured_items = linkage_tree(
X, linkage=linkage, return_distance=True)[-1]
structured_dist = structured_items[-1]
unstructured_dist = unstructured_items[-1]
structured_children = structured_items[0]
unstructured_children = unstructured_items[0]
assert_array_almost_equal(structured_dist, unstructured_dist)
assert_array_almost_equal(
structured_children, unstructured_children)
# test on the following dataset where we know the truth
# taken from scipy/cluster/tests/hierarchy_test_data.py
X = np.array([[1.43054825, -7.5693489],
[6.95887839, 6.82293382],
[2.87137846, -9.68248579],
[7.87974764, -6.05485803],
[8.24018364, -6.09495602],
[7.39020262, 8.54004355]])
# truth
linkage_X_ward = np.array([[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 9.10208346, 4.],
[7., 9., 24.7784379, 6.]])
linkage_X_complete = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.96742194, 4.],
[7., 9., 18.77445997, 6.]])
linkage_X_average = np.array(
[[3., 4., 0.36265956, 2.],
[1., 5., 1.77045373, 2.],
[0., 2., 2.55760419, 2.],
[6., 8., 6.55832839, 4.],
[7., 9., 15.44089605, 6.]])
n_samples, n_features = np.shape(X)
connectivity_X = np.ones((n_samples, n_samples))
out_X_unstructured = ward_tree(X, return_distance=True)
out_X_structured = ward_tree(X, connectivity=connectivity_X,
return_distance=True)
# check that the labels are the same
assert_array_equal(linkage_X_ward[:, :2], out_X_unstructured[0])
assert_array_equal(linkage_X_ward[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_unstructured[4])
assert_array_almost_equal(linkage_X_ward[:, 2], out_X_structured[4])
linkage_options = ['complete', 'average']
X_linkage_truth = [linkage_X_complete, linkage_X_average]
for (linkage, X_truth) in zip(linkage_options, X_linkage_truth):
out_X_unstructured = linkage_tree(
X, return_distance=True, linkage=linkage)
out_X_structured = linkage_tree(
X, connectivity=connectivity_X, linkage=linkage,
return_distance=True)
# check that the labels are the same
assert_array_equal(X_truth[:, :2], out_X_unstructured[0])
assert_array_equal(X_truth[:, :2], out_X_structured[0])
# check that the distances are correct
assert_array_almost_equal(X_truth[:, 2], out_X_unstructured[4])
assert_array_almost_equal(X_truth[:, 2], out_X_structured[4])
def test_connectivity_fixing_non_lil():
# Check non regression of a bug if a non item assignable connectivity is
# provided with more than one component.
# create dummy data
x = np.array([[0, 0], [1, 1]])
# create a mask with several components to force connectivity fixing
m = np.array([[True, False], [False, True]])
c = grid_to_graph(n_x=2, n_y=2, mask=m)
w = AgglomerativeClustering(connectivity=c, linkage='ward')
assert_warns(UserWarning, w.fit, x)
def test_int_float_dict():
rng = np.random.RandomState(0)
keys = np.unique(rng.randint(100, size=10).astype(np.intp))
values = rng.rand(len(keys))
d = IntFloatDict(keys, values)
for key, value in zip(keys, values):
assert d[key] == value
other_keys = np.arange(50).astype(np.intp)[::2]
other_values = 0.5 * np.ones(50)[::2]
other = IntFloatDict(other_keys, other_values)
# Complete smoke test
max_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
average_merge(d, other, mask=np.ones(100, dtype=np.intp), n_a=1, n_b=1)
def test_connectivity_callable():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(
connectivity=partial(kneighbors_graph, n_neighbors=3, include_self=False))
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_connectivity_ignores_diagonal():
rng = np.random.RandomState(0)
X = rng.rand(20, 5)
connectivity = kneighbors_graph(X, 3, include_self=False)
connectivity_include_self = kneighbors_graph(X, 3, include_self=True)
aglc1 = AgglomerativeClustering(connectivity=connectivity)
aglc2 = AgglomerativeClustering(connectivity=connectivity_include_self)
aglc1.fit(X)
aglc2.fit(X)
assert_array_equal(aglc1.labels_, aglc2.labels_)
def test_compute_full_tree():
# Test that the full tree is computed if n_clusters is small
rng = np.random.RandomState(0)
X = rng.randn(10, 2)
connectivity = kneighbors_graph(X, 5, include_self=False)
# When n_clusters is less, the full tree should be built
# that is the number of merges should be n_samples - 1
agc = AgglomerativeClustering(n_clusters=2, connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - 1)
# When n_clusters is large, greater than max of 100 and 0.02 * n_samples.
# we should stop when there are n_clusters.
n_clusters = 101
X = rng.randn(200, 2)
connectivity = kneighbors_graph(X, 10, include_self=False)
agc = AgglomerativeClustering(n_clusters=n_clusters,
connectivity=connectivity)
agc.fit(X)
n_samples = X.shape[0]
n_nodes = agc.children_.shape[0]
assert_equal(n_nodes, n_samples - n_clusters)
def test_n_components():
# Test n_components returned by linkage, average and ward tree
rng = np.random.RandomState(0)
X = rng.rand(5, 5)
# Connectivity matrix having five components.
connectivity = np.eye(5)
for linkage_func in _TREE_BUILDERS.values():
assert_equal(ignore_warnings(linkage_func)(X, connectivity)[1], 5)
def test_agg_n_clusters():
# Test that an error is raised when n_clusters <= 0
rng = np.random.RandomState(0)
X = rng.rand(20, 10)
for n_clus in [-1, 0]:
agc = AgglomerativeClustering(n_clusters=n_clus)
msg = ("n_clusters should be an integer greater than 0."
" %s was provided." % str(agc.n_clusters))
assert_raise_message(ValueError, msg, agc.fit, X)
|
bsd-3-clause
|
ChanChiChoi/scikit-learn
|
examples/text/document_classification_20newsgroups.py
|
222
|
10500
|
"""
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
|
bsd-3-clause
|
LennonLab/Emergence
|
figure_code/MacroecologyPatterns/TaylorsLaw.py
|
8
|
1412
|
from __future__ import division
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
from scipy import stats
mydir = os.path.expanduser('~/GitHub/Emergence')
tools = os.path.expanduser(mydir + "/tools")
_lw = 2
sz = 20
df = pd.read_csv(mydir + '/results/simulated_data/SimData.csv')
df2 = pd.DataFrame({'length' : df['length'].groupby(df['sim']).mean()})
df2['NS'] = np.log10(df['avg.pop.size'].groupby(df['sim']).mean())
df2['var'] = np.log10(df['pop.var'].groupby(df['sim']).mean())
df2 = df2[df2['var'] > 1]
#### plot figure ###############################################################
fs = 14
fig = plt.figure(figsize=(3, 2))
fig.add_subplot(1, 1, 1)
Nlist = df2['NS'].tolist()
Vlist = df2['var'].tolist()
plt.scatter(Nlist, Vlist, lw=_lw, color='0.7', s = sz)
m, b, r, p, std_err = stats.linregress(Nlist, Vlist)
Nlist = np.array(Nlist)
plt.plot(Nlist, m*Nlist + b, '-', color='k', label='$z$ = '+str(round(m,2)), lw=_lw)
xlab = r"$log_{10}$"+'(mean)'
ylab = r"$log_{10}$"+'(variance)'
plt.xlabel(xlab, fontsize=fs)
plt.tick_params(axis='both', labelsize=fs-3)
plt.ylabel(ylab, fontsize=fs)
plt.legend(loc='best', fontsize=fs-3, frameon=False)
#### Final Format and Save #####################################################
plt.subplots_adjust(wspace=0.4, hspace=0.4)
plt.savefig(mydir + '/results/figures/TaylorsLaw.png', dpi=200, bbox_inches = "tight")
plt.close()
|
mit
|
Joshuaalbert/IonoTomo
|
src/ionotomo/notebooks/deep_unwrap/deep_unwrap.py
|
1
|
15823
|
import numpy as np
import tensorflow as tf
import logging
logging.basicConfig(format='%(asctime)s %(message)s')
import pylab as plt
import cmocean
from scipy.spatial import cKDTree
from ionotomo.tomography.pipeline import Pipeline
from ionotomo.settings import TFSettings
from timeit import default_timer
from ionotomo import *
import astropy.coordinates as ac
import astropy.units as au
import gpflow as gp
import sys
import h5py
import threading
from timeit import default_timer
#%matplotlib notebook
from concurrent import futures
from functools import partial
from threading import Lock
import astropy.units as au
import astropy.time as at
from collections import deque
from doubly_stochastic_dgp.dgp import DGP
from ionotomo.bayes.gpflow_contrib import GPR_v2,Gaussian_v2
from scipy.cluster.vq import kmeans2
from scipy.spatial.distance import pdist,squareform
import os
def get_only_vars_in_model(variables, model):
reader = tf.train.NewCheckpointReader(model)
var_to_shape_map = reader.get_variable_to_shape_map()
vars_in_model = [k for k in sorted(var_to_shape_map)]
out_vars = []
for var in variables:
v = var.name.split(":")[0]
if v in vars_in_model:
if tuple(var.shape.as_list()) != reader.get_tensor(v).shape:
logging.warning("{} has shape mis-match: {} {}".format(v,
tuple(var.shape.as_list()), reader.get_tensor(v).shape))
continue
out_vars.append(var)
return out_vars
class AIUnwrap(object):
def __init__(self,project_dir,datapack):
self.project_dir = os.path.abspath(project_dir)
try:
os.makedirs(self.project_dir)
except:
pass
if isinstance(datapack, str):
datapack = DataPack(filename=datapack)
self.datapack = datapack
X = np.array([self.datapack.directions.ra.deg,self.datapack.directions.dec.deg]).T
self.Nd = X.shape[0]
self.Nt = 10
coords = np.zeros([self.Nt, self.Nd,3])
for j in range(self.Nt):
for k in range(X.shape[0]):
coords[j,k,0] = j*8
coords[j,k,1:] = X[k,:]
self.X = coords.reshape((self.Nt*self.Nd,3))
def train(self,run_id, num_examples, max_steps=1000, minibatch_size=32, keep_prob=0.9,
learning_rate=1e-3,disp_period=5.,patience=10,load_model=None,test_split=0.25):
ls = np.array([np.random.uniform(low=40.,high=120,size=num_examples),
np.random.uniform(low=0.25,high=1.,size=num_examples)]).T
variance = 10**np.random.uniform(low=-2,high=-0.5,size=num_examples)
noise = 10**np.random.uniform(low=-3,high=-1,size=num_examples)
tf.reset_default_graph()
graph = self._build_train_graph()
model_folder = os.path.join(self.project_dir,"model_{}".format(run_id))
model_name = os.path.join(model_folder,"model")
with tf.Session(graph=graph) as sess,\
tf.summary.FileWriter(os.path.join(self.project_dir,"summary_{}".format(run_id)), graph) as writer:
sess.run(tf.global_variables_initializer())
if load_model is not None:
try:
self.load_params(sess,load_model)
except:
logging.warning("Could not load {} saved model".format(load_model))
num_test =int(test_split*num_examples)
num_train = num_examples - num_test
logging.warning("Using num_train: {} num_test {}".format(num_train,num_test))
last_train_loss = np.inf
last_test_loss = np.inf
train_losses = deque(maxlen=num_train//minibatch_size)
predict_losses = deque(maxlen=num_train//minibatch_size)
patience_cond = deque(maxlen=patience)
step = sess.run(self.global_step)
proceed = True
test_h_val, train_h_val = sess.run([self.test_h,self.train_h])
while proceed and step < max_steps:
feed_dict = {
self.ls_pl : ls,
self.variance_pl : variance,
self.noise_pl : noise,
self.num_test : num_test,
self.minibatch_size : minibatch_size}
sess.run([self.train_init,self.test_init],
feed_dict = feed_dict)
# Train loop
t0 = default_timer()
t = t0
while True:
lr_feed = self._get_learning_rate(last_test_loss, learning_rate)
feed_dict = {
self.learning_rate: lr_feed,
self.keep_prob : keep_prob,
self.handle: train_h_val
}
sess.run(self.metric_initializer)
try:
train_loss, step, summary, _, acc = sess.run([self.total_loss, self.global_step,
self.train_summary,self.train_op, self.acc], feed_dict=feed_dict)
train_losses.append(train_loss)
writer.add_summary(summary, global_step=step)
if default_timer() - t > disp_period:
logging.warning("Minibatch \tStep {:5d}\tloss {:.2e}\tacc {:.2e}".format(step, np.mean(train_losses),acc))
t = default_timer()
except tf.errors.OutOfRangeError:
break
last_train_loss = np.mean(train_losses)
samples_per_sec = num_train / (default_timer() - t0)
ms_per_sample = 1000./samples_per_sec
logging.warning("Speed\t{:.1f} samples/sec. [{:.1f} ms/sample]"\
.format(samples_per_sec,ms_per_sample))
# Test loop
test_losses = []
while True:
feed_dict = {self.handle: test_h_val,
self.keep_prob : 1.
}
sess.run(self.metric_initializer)
try:
test_loss, step, summary, acc = sess.run([self.total_loss, self.global_step, self.test_summary, self.acc], feed_dict=feed_dict)
test_losses.append(test_loss)
writer.add_summary(summary, global_step=step)
except tf.errors.OutOfRangeError:
break
last_test_loss = -acc#np.mean(test_losses)
patience_cond.append(last_test_loss)
if len(patience_cond) == patience and np.min(patience_cond) == patience_cond[0]:
proceed = False
logging.warning("Validation\tStep {:5d}\tloss {:.2e}\tacc {:.2e}".format(step, np.mean(test_losses),acc))
save_path = self.save_params(sess,model_name)
def _build_train_graph(self,graph=None):
graph = graph or tf.Graph()
with graph.as_default():
self.ls_pl = tf.placeholder(tf.float32,shape=(None,2),
name='ls')
self.variance_pl = tf.placeholder(tf.float32,shape=(None,),
name='variance')
self.noise_pl = tf.placeholder(tf.float32,shape=(None,),
name='noise')
self.create_iterators(self.ls_pl,self.variance_pl,self.noise_pl)
self.f_latent, self.labels,self.weights = self.data_tensors
self.f_latent.set_shape([None,self.X.shape[0]])
self.f_latent = tf.reshape(self.f_latent,(-1, self.Nt, self.Nd))
self.labels.set_shape([None,self.X.shape[0]])
self.weights.set_shape([None,self.X.shape[0]])
with tf.variable_scope("predict") as scope:
self.keep_prob = tf.placeholder(tf.float32, shape=(),name='keep_prob')
cell = tf.contrib.rnn.MultiRNNCell([
tf.contrib.rnn.DropoutWrapper(
(
tf.nn.rnn_cell.LSTMCell(self.Nd*3,activation=tf.nn.relu)),
output_keep_prob=self.keep_prob),
tf.contrib.rnn.DropoutWrapper(
tf.contrib.rnn.ResidualWrapper(
tf.nn.rnn_cell.LSTMCell(self.Nd*3,activation=tf.nn.relu)),
output_keep_prob=self.keep_prob),
tf.contrib.rnn.DropoutWrapper(
(
tf.nn.rnn_cell.LSTMCell(self.Nd*13,activation=tf.nn.relu)),
output_keep_prob=self.keep_prob),
tf.contrib.rnn.DropoutWrapper(
tf.contrib.rnn.ResidualWrapper(
tf.nn.rnn_cell.LSTMCell(self.Nd*13,activation=tf.nn.relu)),
output_keep_prob=self.keep_prob)])
predict, state = tf.nn.dynamic_rnn(cell,self.f_latent,dtype=tf.float32)
self.predict = tf.reshape(predict,(-1,self.Nt*self.Nd,13))
# self.predict = tf.reshape(tf.layers.dense(predict,self.Nt*self.Nd*13),(-1,self.Nt*self.Nd,13))
self._build_metrics(self.labels,self.predict)
self.acc = tf.identity(self.acc_update)
with tf.variable_scope('train') as scope:
self.total_loss = tf.reduce_mean(self.weights * \
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.predict,labels=self.labels))
self.learning_rate = tf.placeholder(tf.float32, shape=(),name='learning_rate')
opt = tf.train.AdamOptimizer(self.learning_rate)
train_vars = tf.trainable_variables()
grad_and_vars = opt.compute_gradients(self.total_loss, train_vars)
clipped,_ = tf.clip_by_global_norm([g for g,_ in grad_and_vars], 1.)
grad_and_vars = zip(clipped, train_vars)
self.global_step = tf.train.get_or_create_global_step()
self.train_op = opt.apply_gradients(grad_and_vars,self.global_step)
with tf.variable_scope("summaries"):
self.train_summary = tf.summary.merge([
###
# scalars
tf.summary.scalar("acc",self.acc,family='train'),
tf.summary.scalar("prec",self.prec_update,family='train'),
tf.summary.scalar("loss",self.total_loss,family='train')
])
self.test_summary = tf.summary.merge([
###
# scalars
tf.summary.scalar("acc",self.acc,family='test'),
tf.summary.scalar("prec",self.prec_update,family='test'),
tf.summary.scalar("loss",self.total_loss,family='test')
])
return graph
def _build_metrics(self,true,predict):
with tf.variable_scope("metrics") as scope:
predict_labels = tf.cast(tf.argmax(predict,axis=-1), tf.int32)
self.acc, self.acc_update = tf.metrics.accuracy(true,predict_labels)
self.prec, self.prec_update = tf.metrics.precision(true,predict_labels)
#self.metric_update_op = tf.group([acc_update,prec_update])
self.metric_initializer = \
tf.variables_initializer(
var_list=tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES,scope=scope.name))
def create_iterators(self,lengthscale, variance, noise):
"""Will take the first num_test from batch axis as test set"""
with tf.name_scope("datasets"):
self.num_test = tf.placeholder(tf.int64,shape=(),name='num_test')
self.minibatch_size = tf.placeholder(tf.int64,shape=(),name='minibatch_size')
#img, mask, border_weights, prior, bb, num_masks
output_types = [tf.float32,tf.int32,tf.float32]
dataset = tf.data.Dataset.from_tensor_slices((lengthscale, variance, noise))
dataset = dataset.map(lambda lengthscale, variance, noise: \
tuple(tf.py_func(self.load_train_example,[lengthscale, variance, noise],
output_types)),num_parallel_calls=None)
test_dataset = dataset.take(self.num_test).batch(self.num_test)
train_dataset = dataset.skip(self.num_test).batch(self.minibatch_size)
test_iterator = test_dataset.make_initializable_iterator()
train_iterator = train_dataset.make_initializable_iterator()
self.train_init, self.test_init = train_iterator.initializer,test_iterator.initializer
self.handle = tf.placeholder(tf.string,shape=[])
iterator = tf.data.Iterator.from_string_handle(self.handle, train_dataset.output_types, train_dataset.output_shapes)
self.data_tensors = iterator.get_next()
self.test_h, self.train_h = test_iterator.string_handle(),train_iterator.string_handle()
def _get_learning_rate(self, rec_loss, lr):
if np.sqrt(rec_loss) < 0.5:
return lr/2.
elif np.sqrt(rec_loss) < 0.4:
return lr/3.
elif np.sqrt(rec_loss) < 0.3:
return lr/4.
elif np.sqrt(rec_loss) < 0.2:
return lr/5.
elif np.sqrt(rec_loss) < 0.15:
return lr/6.
elif np.sqrt(rec_loss) < 0.1:
return lr/10.
return lr
def load_params(self,sess, model):
with sess.graph.as_default():
all_vars = tf.trainable_variables()
load_vars = get_only_vars_in_model(all_vars,model)
saver = tf.train.Saver(load_vars)
saver.restore(sess,model)
def save_params(self,sess, model):
with sess.graph.as_default():
all_vars = tf.trainable_variables()
saver = tf.train.Saver(all_vars)
save_path = saver.save(sess,model)
return save_path
def load_train_example(self, ls, variance, noise):
tec_conversion = -8.4480e9# rad Hz/tecu
X = self.X.copy()
X[:,0] /= ls[0]
X[:,1:] /= ls[1]
pd = pdist(X,metric='sqeuclidean')
pd *= -1.
K = variance*np.exp(squareform(pd))
tec = np.random.multivariate_normal(np.zeros(self.X.shape[0]),cov = K)
tec += noise*np.random.normal(size=tec.shape)
phase = tec*tec_conversion / 150e6
phase = phase.reshape((self.Nt,self.Nd))
phase -= phase.mean(axis=1)[:,None]
phase = phase.reshape((self.Nt*self.Nd,))
phase_wrap = np.angle(np.exp(1j*phase))
jumps = ((phase - phase_wrap)/(2*np.pi)) + 6
jumps[jumps < 0] = 0
jumps[jumps > 12 ] = 12
where = jumps!=6
weights = np.ones(tec.shape)
#weights[where] += tec.size - np.sum(where)
return phase.astype(np.float32), jumps.astype(np.int32), weights.astype(np.float32)
am = AIUnwrap("projects", "../../data/rvw_datapack_full_phase_dec27_unwrap.hdf5")
#am.load_train_example([100,1],0.1**2,0.001)
am.train(0, num_examples = 10000, max_steps=10000, minibatch_size=200,
keep_prob=0.9, learning_rate=1e-3,disp_period=5.,patience=5,load_model='projects/model_0/model',
test_split=200./10000.)
|
apache-2.0
|
pratapvardhan/scikit-learn
|
examples/classification/plot_lda_qda.py
|
29
|
4952
|
"""
====================================================================
Linear and Quadratic Discriminant Analysis with confidence ellipsoid
====================================================================
Plot the confidence ellipsoids of each class and decision boundary
"""
print(__doc__)
from scipy import linalg
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import colors
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
###############################################################################
# colormap
cmap = colors.LinearSegmentedColormap(
'red_blue_classes',
{'red': [(0, 1, 1), (1, 0.7, 0.7)],
'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)],
'blue': [(0, 0.7, 0.7), (1, 1, 1)]})
plt.cm.register_cmap(cmap=cmap)
###############################################################################
# generate datasets
def dataset_fixed_cov():
'''Generate 2 Gaussians samples with the same covariance matrix'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -0.23], [0.83, .23]])
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C) + np.array([1, 1])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
def dataset_cov():
'''Generate 2 Gaussians samples with different covariance matrices'''
n, dim = 300, 2
np.random.seed(0)
C = np.array([[0., -1.], [2.5, .7]]) * 2.
X = np.r_[np.dot(np.random.randn(n, dim), C),
np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])]
y = np.hstack((np.zeros(n), np.ones(n)))
return X, y
###############################################################################
# plot functions
def plot_data(lda, X, y, y_pred, fig_index):
splot = plt.subplot(2, 2, fig_index)
if fig_index == 1:
plt.title('Linear Discriminant Analysis')
plt.ylabel('Data with fixed covariance')
elif fig_index == 2:
plt.title('Quadratic Discriminant Analysis')
elif fig_index == 3:
plt.ylabel('Data with varying covariances')
tp = (y == y_pred) # True Positive
tp0, tp1 = tp[y == 0], tp[y == 1]
X0, X1 = X[y == 0], X[y == 1]
X0_tp, X0_fp = X0[tp0], X0[~tp0]
X1_tp, X1_fp = X1[tp1], X1[~tp1]
# class 0: dots
plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', color='red')
plt.plot(X0_fp[:, 0], X0_fp[:, 1], '.', color='#990000') # dark red
# class 1: dots
plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', color='blue')
plt.plot(X1_fp[:, 0], X1_fp[:, 1], '.', color='#000099') # dark blue
# class 0 and 1 : areas
nx, ny = 200, 100
x_min, x_max = plt.xlim()
y_min, y_max = plt.ylim()
xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx),
np.linspace(y_min, y_max, ny))
Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z = Z[:, 1].reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes',
norm=colors.Normalize(0., 1.))
plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k')
# means
plt.plot(lda.means_[0][0], lda.means_[0][1],
'o', color='black', markersize=10)
plt.plot(lda.means_[1][0], lda.means_[1][1],
'o', color='black', markersize=10)
return splot
def plot_ellipse(splot, mean, cov, color):
v, w = linalg.eigh(cov)
u = w[0] / linalg.norm(w[0])
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
# filled Gaussian at 2 standard deviation
ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5,
180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
splot.set_xticks(())
splot.set_yticks(())
def plot_lda_cov(lda, splot):
plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red')
plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue')
def plot_qda_cov(qda, splot):
plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red')
plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue')
###############################################################################
for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]):
# Linear Discriminant Analysis
lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True)
y_pred = lda.fit(X, y).predict(X)
splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1)
plot_lda_cov(lda, splot)
plt.axis('tight')
# Quadratic Discriminant Analysis
qda = QuadraticDiscriminantAnalysis(store_covariances=True)
y_pred = qda.fit(X, y).predict(X)
splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2)
plot_qda_cov(qda, splot)
plt.axis('tight')
plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis')
plt.show()
|
bsd-3-clause
|
xuewei4d/scikit-learn
|
examples/ensemble/plot_voting_probas.py
|
23
|
3121
|
"""
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
.. currentmodule:: sklearn
Plot the class probabilities of the first sample in a toy dataset predicted by
three different classifiers and averaged by the
:class:`~ensemble.VotingClassifier`.
First, three examplary classifiers are initialized
(:class:`~linear_model.LogisticRegression`, :class:`~naive_bayes.GaussianNB`,
and :class:`~ensemble.RandomForestClassifier`) and used to initialize a
soft-voting :class:`~ensemble.VotingClassifier` with weights `[1, 1, 5]`, which
means that the predicted probabilities of the
:class:`~ensemble.RandomForestClassifier` count 5 times as much as the weights
of the other classifiers when the averaged probability is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(max_iter=1000, random_state=123)
clf2 = RandomForestClassifier(n_estimators=100, random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width,
color='green', edgecolor='k')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width,
color='lightgreen', edgecolor='k')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width,
color='blue', edgecolor='k')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width,
color='steelblue', edgecolor='k')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
stevesimmons/pydata-ams2017-pandas-and-dask-from-the-inside
|
pfi.py
|
2
|
6207
|
# Pandas from the Inside
# PyData DC Tutorial - Friday 7 October 2016
#
# Stephen Simmons - [email protected]
# http://github.com/stevesimmons
#
# Requires python3, pandas and numpy.
# Best with pandas 0.18.1 or 0.19.0.
# Pandas 0.18.0 requires a workaround for an indexing bug.
import csv
import os
import numpy as np
import pandas as pd
# Don't wrap tables
pd.options.display.max_rows = 20
pd.options.display.width = 200
def main(name='bg3.txt'):
print("numpy=%s; pandas=%s" % (np.__version__, pd.__version__))
# Download sample data from www.afltables.com if not present
if not os.path.exists(name):
download_sample_data(names=[name])
# Part 1 - Load sample data as a DataFrame (1 game => 1 row)
raw_df = load_data(name)
# Part 2 - Reshape to give team scores (1 game => 2 rows)
scores_df = prepare_game_scores(raw_df)
# Parts 3 and 4 - GroupBy to get Wins/Draws/Losses/Points
ladder_df = calc_team_ladder(scores_df)
print(ladder_df)
def download_sample_data(names=('bg3.txt', 'bg7.txt')):
'''
Download results and attendance stats for every AFL match
since 1897 from www.afltables.com into files
'bg3.txt' and 'bg7.txt' in the current directory.
'''
import urllib.request
base_url = 'http://afltables.com/afl/stats/biglists/'
for filename in names:
url = base_url + filename
print("Downloading from %s" % url)
txt = urllib.request.urlopen(url).read()
with open(filename, 'wb') as f:
f.write(txt)
print("Wrote %d bytes to %s" % (len(txt), filename))
def load_data(name='bg3.txt'):
'''
Pandas DataFrames from loading csv files bg3.txt (games) or
bg7.txt (attendance) csvs downloaded from www.afltables.com.
'''
if name == 'bg3.txt':
# Scores with rounds
# - GameNum ends with '.', single space for nums > 100k
# - Rounds are 'R1'-'R22' or 'QF', 'PF', 'GF'.
# - Three grand finals were drawn and replayed the next week
# - Scores are strings '12.5.65' with goals/behinds/points
# - Venue may end with a '.', e.g. 'M.C.G.' though always at EOL
cols = 'GameNum Date Round HomeTeam HomeScore AwayTeam AwayScore Venue'
sep = '[. ] +'
sep = '[. ] +'
elif name == 'bg7.txt':
# Attendance stats
# - RowNum ends with '.', single space for nums > 100k
# - Spectators ends with '*' for finals games
# - Venue may end with a '.', e.g. 'M.C.G.'
# - Dates are 'dd-Mmm-yyyy'.
# - Date/Venue unique, except for two days in 1980s, when
# M.C.G. hosted games at 2pm and 5pm with same num of spectators.
cols = 'RowNum Spectators HomeTeam HomeScore AwayTeam AwayScore Venue Date'
sep = '(?:(?<=[0-9])[.*] +)|(?: +)'
else:
raise ValueError("Unexpected data file")
df = pd.read_csv(name, skiprows=2, sep=sep,
names=cols.split(), parse_dates=['Date'],
quoting=csv.QUOTE_NONE, engine='python')
return df
def prepare_game_scores(df):
'''
DataFrame with rows giving each team's results in a game
(1 game -> 2 rows for home and away teams)
'''
scores_raw = df.drop('GameNum', axis=1).set_index(['Date', 'Venue', 'Round'])
# Convert into sections for both teams
home_teams = scores_raw['HomeTeam'].rename('Team')
away_teams = scores_raw['AwayTeam'].rename('Team')
# Split the score strings into Goals/Behinds, and points For and Against
regex = '(?P<G>\d+).(?P<B>\d+).(?P<F>\d+)'
home_scores = scores_raw['HomeScore'].str.extract(regex, expand=True).astype(int)
away_scores = scores_raw['AwayScore'].str.extract(regex, expand=True).astype(int)
home_scores['A'] = away_scores['F']
away_scores['A'] = home_scores['F']
home_games = pd.concat([home_teams, home_scores], axis=1)
away_games = pd.concat([away_teams, away_scores], axis=1)
scores = home_games.append(away_games).sort_index().set_index('Team', append=True)
# scores = pd.concat([home_games, away_games], axis=0).sort_index()
# Rather than moving Team to MultiIndex with scores.set_index('Team', append=True),
# keep it as a data column so we can see what an inhomogeneous DataFrame looks like.
return scores
def calc_team_ladder(scores_df, year=2016):
'''
DataFrame with championship ladder with round-robin games for the given year.
Wins, draws and losses are worth 4, 2 and 0 points respectively.
'''
# Select a subset of the rows
# df.loc[] matches dates as strings like '20160506' or '2016'.
# Note here rounds are simple strings so sort with R1 < R10 < R2 < .. < R9
# (we could change this with a CategoricalIndex)
if pd.__version__ > '0.18.0':
# MultiIndex slicing works ok
scores2 = scores_df.sort_index()
x = scores2.loc(axis=0)[str(year), :, 'R1':'R9', :]
else:
# pandas 0.18.0 has a bug with .loc on MultiIndexes
# if dates are the first level. It works as expected if we
# move the dates to the end before slicing
scores2 = scores_df.reorder_levels([1, 2, 3, 0]).sort_index()
x = scores2.loc(axis=0)[:, 'R1':'R9', :, str(year):str(year)]
# Don't need to put levels back in order as we are about to drop 3 of them
# x = x.reorder_levels([3, 0, 1, 2]).sort_index()
# Just keep Team. This does a copy too, avoiding SettingWithCopy warning
y = x.reset_index(['Date', 'Venue', 'Round'], drop=True)
# Add cols with 0/1 for number of games played, won, drawn and lost
y['P'] = 1
y['W'] = (y['F'] > y['A']).astype(int)
y['D'] = 0
y.loc[y['F'] == y['A'], 'D'] = 1
y.eval('L = 1*(A>F)', inplace=True)
#print(y)
# Subtotal by team and then sort by Points/Percentage
t = y.groupby(level='Team').sum()
t['PCT'] = 100.0 * t.F / t.A
t['PTS'] = 4 * t['W'] + 2 * t['D']
ladder = t.sort_values(['PTS', 'PCT'], ascending=False)
# Add ladder position (note: assumes no ties!)
ladder['Pos'] = pd.RangeIndex(1, len(ladder) + 1)
#print(ladder)
return ladder
if __name__ == '__main__':
main()
|
gpl-3.0
|
ShiehShieh/UFLDL-Solution
|
softmax.py
|
1
|
2918
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import theano.tensor as T
from theano import shared
from utils import *
from sklearn.datasets import fetch_mldata
from sklearn.preprocessing import OneHotEncoder, scale
from sklearn.cross_validation import train_test_split
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
def softmax_predict(x, weight, b):
"""TODO: Docstring for softmax_predict.
:arg1: TODO
:returns: TODO
"""
z = T.dot(x, weight) + b
pred = T.nnet.softmax(z)
return pred
def softmax2class_threshold(pred, threshold):
"""TODO: Docstring for softmax2class.
:returns: TODO
"""
pred[pred>=threshold] = 1
pred[pred<threshold] = 0
return pred
def softmax2class_max(pred):
"""TODO: Docstring for softmax2class_max.
:returns: TODO
"""
return np.argmax(pred, axis=1)
def cost4softmax(z, t_y, m, decay, theta):
"""TODO: Docstring for cost4softmax.
:returns: TODO
"""
return -T.sum(T.log(T.exp(T.sum(z * t_y, 1))/T.sum(T.exp(z), 1)))/m \
+ (decay/(2.0 * m)) * T.sum(theta ** 2.0)
def train_softmax(X, y, iter_num, alpha, decay):
"""TODO: Docstring for train_softmax.
:returns: TODO
"""
input_n, output_n = X.shape[1], y.shape[1]
m = X.shape[0]
params = initial_params(input_n, output_n)
t_X, t_y = T.matrix(), T.matrix()
theta = shared(params[0], name='theta', borrow=True)
b = shared(params[1], name='b', borrow=True)
z = softmax_predict(t_X, theta, b)
J = cost4softmax(z, t_y, m, decay, theta)
grad = T.grad(J, [theta, b])
trainit = init_gd_trainer(inputs=[t_X, t_y], outputs=[z, J,], name='trainit',
params=[theta, b,], grad=grad, alpha=alpha)
for i in range(iter_num):
pred, err = trainit(X, y)
if i%100 == 0:
print 'iter: %f, err: %f\n' % (i, err)
return theta, b
def main():
"""TODO: Docstring for main.
:arg1: TODO
:returns: TODO
"""
alpha = 1.
iter_num = 600
decay = 0.008 # the performance of 0.0001 is not so good.
enc = OneHotEncoder(sparse=False)
mnist = fetch_mldata('MNIST original', data_home='./')
x_train, x_test, y_train, y_test = \
train_test_split(scale(mnist.data.astype(float)).astype('float32'),
mnist.target.astype('float32'),
test_size=0.146, random_state=0)
y_train = enc.fit_transform(y_train.reshape(y_train.shape[0],1)).astype('float32')
theta, b = train_softmax(x_train, y_train, iter_num, alpha, decay)
x = T.matrix()
f = function([x], [softmax_predict(x, theta, b)])
pred = softmax2class_max(f(x_test)[0])
print accuracy_score(y_test, pred)
print classification_report(y_test, pred)
print confusion_matrix(y_test, pred)
if __name__ == "__main__":
main()
|
gpl-2.0
|
BrainTech/openbci
|
obci/analysis/csp/modCSPv2.py
|
1
|
19496
|
"""
"Class creation of CSP filters,
"The CSP calculation procedure is based on the paper:
"Designing optimal spatial filters for single-trial EEG classification in a movement task;
"Johannes Mueller-Gerking, Gert Pfurtscheller, Henrik Flyvbjerg
"Clinical Neurophysiology vol.110(1999), pp.787--798
"
"Piotr Milanowski, July 2011, Warsaw.
"Corrected Dec. 2011, Warsaw
"For University of Warsaw
"""
from signalParser import signalParser as sp
import numpy as np
from scipy.signal import hamming, ellip, cheby2
from filtfilt import filtfilt
from scipy.linalg import eig
import pickle
from matplotlib.pyplot import plot, show, legend, title, xlabel, ylabel, imshow,\
figure, hist, subplot, savefig, errorbar, boxplot, xticks
from simpleCSP import pfu_csp
def quantile(x, q, method = 5):
"""This calculates q quantiles of x.
A pth quantile Q, of a distribution F of observations X, is defined as:
Q(p) = inf{x in X: F(x) >= p}
Methods based on [1]
Parameters:
===========
x : 1d array
A vector of samples
q : 1d array
A vector of quantiles
method [= 5] : integer 1 - 9
1 : inverse empirical distribution function
2 : similar to 1 but with averaging at discontiunities3
3 : nearest even order satistic. As definded by SAS
4 : linear interpolation of empirical cdf
5 : a piecewise linear function where the knots are the values midway
through the steps of the empirical cdf. Used by Matlab
6 : used by SPSS and Minitab
7 : used by S
8 : median unbiased
9 : unbiased for the expected order statistics if x is nomally distributed
See [1] for explicit definitions of methods above.
Returns:
========
quantiles : array of length of q
The quantiles that correspond to q
References:
===========
[1] 'Sample Quantiles in Statistical Packages', Hyndman, Rob J. and Fan, Yanan;
The American Statistician (1996), Vol. 50, pp 361 -- 365
"""
y = np.array(sorted(x))
quantiles = []
n = len(x)
for p in q:
if method in [1, 2, 3]:
m, kappa, gm = {1:(0, 1, 0), 2:(0, 1, 0.5), 3:(-0.5, 0, 0.5)}[method]
j = int(p * n + m)
g = p * n + m - j
g = kappa and g or g * (j/2.0 - j/2)
gamma = g and 1 or gm
if j <= 0:
quantiles.append(y[0])
elif j >= n:
quantiles.append(y[-1])
else:
quantiles.append(y[j - 1] * (1 - gamma) + y[j] * gamma)
elif method in [4, 5, 6, 7, 8, 9]:
alfa, beta = {4:(0, 1), 5:(0.5, 0.5), \
6:(0, 0), 7:(1, 1), 8:(1.0/3, 1.0/3), \
9:(3.0/8, 3.0/8)}[method]
m = alfa + p * (1 - alfa - beta)
j = int(p * n + m)
gamma = p * n + m - j
if j <= 0:
quantiles.append(y[0])
elif j >= n:
quantiles.append(y[-1])
else:
quantiles.append(y[j - 1] * (1 - gamma) + y[j] * gamma)
else:
raise ValueError, 'Unknown method! Please select one from 1-9.'
return quantiles
class modCSP(object):
"""This class performs a calculation of CSP filter
The CSP method finds such combination of channels that maximizes the variance (power) of first class while minimizing the variance (power) of the second class.
The class, given a signal, frequency and electrodes, calculates CSP filter optimizing SSVEP response (at given frequency)
THIS VERSION CALCULATES ONE CSP FILTER FOR ALL FREQUENCIES
Parameters:
-----------
name : string
the name of the signal. The program looks for files name.raw (containing raw signal), name.xml (containing experiment setup information) and name.tag (containing experiment information)
frequencies : array-like
frequencies of stimulation.
electrodes : array of ints or an array of strings
array containig names or numbers of electrodes to process. Names or numbers should be the same as in name.xml file.
"""
def __init__(self, name, frequency, electrodes, montage='ears', montage_channels=['A1', 'A2']):
"""Begin here"""
self.parsed_data = sp(name)
self.name = name
self.electrodes = electrodes
self.frequencies = frequency
N = len(electrodes)
self.P = np.zeros([N, N])
self.vals = np.zeros(N)
self.method = 'not calculated'
self.montage = montage
self.montage_channels = montage_channels
def set_frequencies(self, frequencies):
"""Sets frequencies to analyze
Parameter:
---------
frequencies : array-like
frequencies of stimulation
"""
self.frequencies = frequencies
def set_electrodes(self, electrodes):
"""Sets electrodes to process
Parameters:
-----------
electrodes : array of ints or an array of strings
array containig names or numbers of electrodes to process. Names or numbers should be the same as in name.xml file.
"""
self.electrodes = electrodes
def __get_filter(self, c_max, c_min):
"""This retzurns CSP filters
Function returns array. Each column is a filter sorted in descending order i.e. first column represents filter that explains most energy, second - second most, etc.
Parameters:
-----------
c_max : ndarray
covariance matrix of signal to maximalize.
c_min : ndarray
covariance matrix of signal to minimalize.
Returns:
--------
P : ndarray
each column of this matrix is a CSP filter sorted in descending order
vals : array-like
corresponding eigenvalues
"""
vals, vects = eig(c_max, c_min + c_max)
vals = vals.real
vals_idx = np.argsort(vals)[::-1]
P = np.zeros([len(vals), len(vals)])
for i in xrange(len(vals)):
P[:,i] = vects[:,vals_idx[i]] / np.sqrt(vals[vals_idx[i]])
return P, vals[vals_idx]
def __get_min_entropy(self, c_max):
vals, vects = eig(c_max)
vals = vals.real
vals_idx = np.argsort(vals)
P = np.zeros([len(vals), len(vals)])
for i in xrange(len(vals)):
P[:,i] = vects[:, vals_idx[i]] / np.sqrt(vals[vals_idx[i]])
return P, vals[vals_idx]
def __get_model_matrix(self, freq, Nt, fs):
Nh = int((fs / 2.0 - 10) / freq)
X = np.zeros([2 * Nh, Nt])
t_vec = np.array(range(Nt)) * 1.0 / fs
for i in xrange(Nh):
X[2*i, :] = np.sin(2 * np.pi * (i + 1) * freq * t_vec)
X[2*i + 1, :] = np.cos(2 * np.pi * (i + 1) * freq * t_vec)
return X
def __is_int(self, x):
"""Checks if x is an integer.
Parameters:
-----------
x : something
a value to be tested
Returns:
--------
y : bool
True if x is an integer
"""
return type(x) is int
def read_matlab_filters(self, txt_file='ba_filters.txt'):
"""Function reads filter coefficient from txt file
Paramesters:
===========
txt_file : string
the name of file with filter coefficients
"""
filter_file = open(txt_file,'r')
filter_tmp = filter_file.read().split('\n')[:-1]
frq_range = range(5,46)
idx = [frq_range.index(j) for j in self.frequencies]
ba_filters = [[float(y) for y in x.split(',')[:-1]] for x in filter_tmp]
needed_filters_b = [ba_filters[2*ix] for ix in idx]
needed_filters_a = [ba_filters[2*ix+1] for ix in idx]
return needed_filters_b, needed_filters_a
def start_CSP(self, signal_time, to_frequency = 128, baseline = True,\
base_time = 4, filt = 'ellip', method = 'pfu', train_tags = None):
"""Produces CSP filter from the data.
THIS VERSION CALCULATES ONE FILTER FOR ALL FREQUENCIES
The filter is stored in a variable P
Parameters:
-----------
signal_time : float
Time in seconds of signal to take as a class for maximalization
to_frequency [= 128Hz] : int
The frequency to which signal will be resampled
baseline [= True] : bool
If true a base line of base_time seconds will be taken as a class for minimalization
[If baseline = True]
base_time [= 4] : float
Time in seconds of baseline to take as minimalization class
filt [= 'ellip']: string ['ellip', 'cov', 'cheby', None]
a filter to use. If method is 'maxcontrast' the variable is set to None
method [= 'pfu'] : string ['pfu', 'regular','maxcontrast']
method of calculation CSP filter
train_tags : list
a list of tags to process. Each list entry is a tuple with first element position of tag in seconds, and second is a frequency of stimulation
"""
if not self.__is_int(to_frequency):
raise ValueError, 'to_frequency is not int!'
self.method = method
signal = self.parsed_data.prep_signal(to_frequency, self.electrodes, montage=self.montage, montage_channels=self.montage_channels)
if train_tags == None:
all_tags = self.parsed_data.get_train_tags(ccof = True)
else:
all_tags = train_tags
N = len(self.electrodes)
if method == 'maxcontrast' or method == 'minimalentropy':
baseline = True
filt = None
cov_pre = np.zeros([N, N])
cov_post = np.zeros([N, N])
pre_i = 0
post_i = 0
for i, frq in enumerate(self.frequencies):
if filt == 'ellip':
filt_b, filt_a = ellip(3, 0.1 , 100, \
[2*(frq - 1) / float(to_frequency), 2*(frq + 1) / float(to_frequency)],\
btype='pass')
signal_tmp = np.array([filtfilt(filt_b, filt_a, x) for x in signal])
elif filt == 'cheby':
filt_b, filt_a = cheby2(1, 10, [2*(frq - 1)/float(to_frequency), 2*(frq + 1)/float(to_frequency)], 'pass')
signal_tmp = np.array([filtfilt(filt_b, filt_a, x) for x in signal])
elif filt == 'conv':
t_vec = np.linspace(0, 0.5-1.0/to_frequency, 0.5 * to_frequency)
sin = np.sin(t_vec * 2 * np.pi)
sin /= sum(sin**2)
M = len(sin)
K = len(signal[0,:])
signal_tmp = np.array([np.convolve(sin, x, mode = 'full')[M:K + M] for x in signal])
elif filt == None:
signal_tmp = signal
tags = [x for (x, y) in all_tags if y == frq]
rest_tags = [x for (x, y) in all_tags if y != frq]
for idx in xrange(min(len(tags),len(rest_tags))):
s_post = signal_tmp[:, to_frequency * (tags[idx] ) : to_frequency * (tags[idx] +\
signal_time)]
dane_B = np.matrix(s_post)
R_B = dane_B * dane_B.T / np.trace(dane_B * dane_B.T)
cov_post += R_B
post_i += 1
if baseline:
if method == 'maxcontrast' or method == 'minimalentropy':
s_pre = signal_tmp[:, to_frequency *\
(tags[idx] + 1) : to_frequency * (tags[idx] + signal_time)]
dane_A = np.matrix(s_pre)
X = np.matrix(self.__get_model_matrix(frq, s_pre.shape[1], to_frequency))
Y = dane_A - (X.T * np.linalg.inv(X * X.T) * X * dane_A.T).T
cov_pre += Y * Y.T / np.trace(Y * Y.T)
pre_i += 1
else:
s_pre = signal_tmp[:, to_frequency * (tags[idx] -\
1 - base_time) : to_frequency * (tags[idx] -1)]
dane_A = np.matrix(s_pre)
R_A = dane_A * dane_A.T / np.trace(dane_A * dane_A.T)
cov_pre += R_A
pre_i += 1
if not baseline:
for idx in rest_tags:
s_pre = signal_tmp[:, to_frequency * (idx ) : to_frequency *\
(idx + signal_time)]
dane_A = np.matrix(s_pre)
R_A = dane_A * dane_A.T / np.trace(dane_A * dane_A.T)
cov_pre += R_A
pre_i += 1
if method == 'regular' or method == 'maxcontrast':
self.P[:,:], self.vals = self.__get_filter(cov_post / post_i, cov_pre / pre_i)
elif method == 'pfu':
self.P[:, :] = pfu_csp(cov_pre / pre_i, cov_post / post_i)
elif method == 'minimalentropy':
self.P[:, :], self.vals = self.__get_min_entropy(cov_pre / pre_i)
def count_stats(self, signal_time, to_freq, tags, plt=False, tr=0.95):
"""Calculates variance and mean"""
signal = np.dot(self.P[:,0], self.parsed_data.prep_signal(to_freq, self.electrodes,\
montage=self.montage, montage_channels=self.montage_channels))
signal -= signal.mean()
q1, q2, q3 = quantile(signal, [.25, .50, .75])
iqr = abs(q1 - q3)
out_top = q2 + 1.5*iqr
out_bottom = q2 - 1.5*iqr
t_vec = np.linspace(0, signal_time - 0.5, (signal_time - 0.5)*to_freq)
max_lag = int(0.1*to_freq)
this_cors = [[] for i in range(len(self.frequencies))]
other_cors = [[] for i in range(len(self.frequencies))]
N = signal_time * to_freq
for fr in self.frequencies:
sin = np.sin(2*np.pi*fr*t_vec)
sin /= np.sqrt(np.sum(sin * sin))
for pos, f in tags:
tmp_sig = signal[(pos+0.5)*to_freq:(pos + 0.5 + signal_time)*to_freq]
tmp_sig -= np.mean(tmp_sig)
tmp_sig /= np.sqrt(np.sum(tmp_sig*tmp_sig))
xcor = np.correlate(tmp_sig, sin, 'full')[N - 1 - max_lag: N + max_lag]
idx = self.frequencies.index(f)
if f == fr:
this_cors[idx].append(np.max(xcor))
else:
other_cors[idx].append(np.max(xcor))
#oc = np.array(np.array(other_cors)).flatten() #Will fail if no. of tags for each frequency is different!
oc = np.array([x for y in other_cors for x in y]) #flattening a list
mu, sigma = oc.mean(), oc.std()
oc = (oc - mu)/sigma
#new_oc = []
#for i in xrange(1000):
#np.random.shuffle(oc)
#new_oc.append(np.max(oc[:8]))
#treshold = quantile(np.array(new_oc), [tr])
treshold = quantile(oc, [tr])
means = []
stds = []
for line in this_cors:
tc = np.array(line)
tc -= mu
tc /= sigma
means.append(tc.mean())
stds.append(tc.std())
if plt:
figure()
#plot(self.frequencies, means, 'og', self.frequencies, [treshold]*len(self.frequencies),'-r')
subplot(311)
plot(self.frequencies, [treshold]*len(self.frequencies), '-r')
errorbar(self.frequencies, means, yerr=stds, fmt='og')
legend(('threshold of '+str(tr), 'Z-scores'))
title('Z-scores '+self.name+'_'+str(signal_time))
xlabel('Frequencies (Hz)')
ylabel('Z-scores')
xticks(self.frequencies)
subplot(312)
plot(self.frequencies, [(means[k] - treshold)/stds[k] for k in xrange(len(means))], 'go')
plot(self.frequencies, [0] * len(self.frequencies), 'r-')
legend(('"Normalized" z-scores', 'Zero'))
xlabel('Frequency (Hz)')
ylabel('(Z-scores - threshold)/std(Z-scores)')
xticks(self.frequencies)
subplot(313)
mk = lambda x: [(k - mu)/sigma for k in x]
tc = map(mk, this_cors)
boxplot(tc, notch=1, positions=self.frequencies)
#xticks(np.arange(0, len(this_cors)), self.frequencies)
xlabel('Frequencies (Hz)')
ylabel('Distribution of z-scores')
show()
#savefig(self.name + '_' + str(signal_time)+'.png')
return treshold, mu, sigma, means, stds, out_top, out_bottom
#return this_cors, other_cors
def time_frequency_selection(self, to_frequency, tags, time=[1, 1.5, 2, 2.5, 3, 3.5, 4], frequency_no=8, tr=0.95, plt=False):
"""Gets shortest time period with frequency_no frequencies' zscores above treshold
"""
ok_no = []
std_ok = []
for i, tm in enumerate(time):
value, mu, sigma, means, stds, o1, o2 = self.count_stats(tm, to_frequency, tags, plt=False, tr=tr)
ok_no.append(len([x for x in means if x > value]))
std_ok.append(len([means[j] for j in xrange(len(means)) if means[j]-stds[j] > value]))
if plt:
plot(time, ok_no, 'g-', time, std_ok, 'r-')
xlabel('Time (s)')
ylabel('# of frequencies above threshold')
show()
try:
xx1 = [x for x in xrange(len(ok_no)) if ok_no[x] >= frequency_no][0]
xx2 = [x for x in xrange(len(std_ok)) if std_ok[x] >= frequency_no][0]
return time[xx1], time[xx2]
except IndexError:
no1 = max(ok_no)
no2 = max(std_ok)
idx1 = ok_no.index(no1)
idx2 = std_ok.index(no2)
print "Warning: maximal number of frequencies above treshold is", no1
return time[idx1], time[idx2]
def dump_filters(self, name, mode = 'pkl', first = False):
"""Function dumps filters and values into file
Parameters:
-----------
name : string
the name of file to create. Only the prefix, a frequency and an exptension will be added
I.e.:
>>>q=modCSP.modCSP('some/file', [10,20,30], [0,1,2,3])
>>>dump_filters('joe_data', mode='pkl')
will create files joe_data_10.pkl, joe_data_20.pkl and joe_data_30.pkl
mode [= 'pkl'] : string ['pkl' | 'txt']
defines mode of file. If 'pkl', pickle file is used. If 'txt' csv is used
first [= False] : bool
if True only first column of filter matrix will be written
"""
file_name = name
for i, frs in enumerate(self.frequencies):
if mode == 'pkl':
f = open(file_name +'_'+ str(frs) + '.pkl','w')
if first:
pickle.dump(self.P[:, 0, i], f)
else:
pickle.dump(self.P[:, :, i], f)
f.close()
elif mode == 'txt':
f = open(file_name + '.txt','w')
if first:
f.write(",".join([str(x) for x in self.P[:,0]]))
for y in self.P:
f.write(",".join([str(x) for x in y]))
f.write('\n')
f.write(",".join([str(x) for x in self.vals]))
f.close()
|
gpl-3.0
|
mrzl/ECO
|
src/python/nlp/original_2d_export.py
|
2
|
3090
|
import argparse
import pprint
import glob
import sys
import gensim
import util
import numpy
import json
import os
from sklearn.manifold import TSNE
def process_arguments(args):
parser = argparse.ArgumentParser(description='configure Word2Vec model building')
parser.add_argument('--model_path', action='store', help='the path to the model')
parser.add_argument('--txt_path', action='store', help='path containing text files which are all loaded')
parser.add_argument('--output_file', action='store', help='the text file to store all vectors in')
params = vars(parser.parse_args(args))
return params
class LineVectorCombination(object):
vector = 0
sentence = 0
if __name__ == '__main__':
params = process_arguments(sys.argv[1:])
input_path = params['model_path']
util.enable_verbose_training(sys.argv[0])
try:
model = gensim.models.Word2Vec.load_word2vec_format(input_path, binary=True)
# this raises an exception if the model type is different..
except Exception:
# just use the other mothod of loading..
model = gensim.models.Word2Vec.load(input_path)
txt_path = params['txt_path']
data_300d = []
originals = []
original_vectors = []
original_sentences = []
text_files = glob.glob(txt_path + '/*.txt')
for file in text_files:
line = 'loading file ' + str(text_files.index(file)) + '/' + str(len(text_files))
print(line)
index = 0
for line in open(file, 'r'):
vector_words = []
word_count = 0
for word in line.split():
try:
vector_words.append(model[word])
word_count += 1
except:
pass
# skip vocab unknown word
if word_count > 5:
vector = gensim.matutils.unitvec(numpy.array(vector_words).mean(axis=0))
combined = LineVectorCombination()
combined.sentence = line
combined.vector = vector
originals.append(combined)
original_vectors.append(vector)
original_sentences.append(line)
vlist = vector.tolist()
intlist = []
for number in vlist:
intnumber = int(number*10000)
intlist.append(intnumber)
data_300d.append({"sentence": line, "point": intlist})
index += 1
output_file = params['output_file']
# X = numpy.array(original_vectors)
# tsne = TSNE(n_components=2, learning_rate=200, perplexity=20, verbose=2).fit_transform(X)
#
# data_2d = []
# for i, f in enumerate(original_sentences):
# point = [(tsne[i, k] - numpy.min(tsne[:, k]))/(numpy.max(tsne[:, k]) - numpy.min(tsne[:, k])) for k in range(2)]
# data_2d.append({"sentence": os.path.abspath(original_sentences[i]), "point": point})
with open(output_file, 'w') as outfile:
#json.dump(data_2d, outfile)
json.dump(data_300d, outfile)
|
apache-2.0
|
francescobaldi86/Ecos2015PaperExtension
|
Data_Process/Data_inputs/RMS_sw_landsort_radings.py
|
1
|
2264
|
import pandas as pd
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import os
%pylab
# This is for inline plotting
project_path = os.path.realpath('.')
project_path
database_path = project_path + os.sep + 'Database' + os.sep
graph_path = project_path + os.sep + 'Analyse' + os.sep + 'Graph' + os.sep
df = pd.read_hdf(database_path + 'selected_df.h5','table')
#%%
# Create dictonary translation from original to new! (not the other way around)
headers = pd.read_excel(project_path + os.sep + 'General' + os.sep + 'headers_dict.xlsx')
# Load the data from the Excel-file with headers. Please not the project_path
# Create a list of each column, then a dictonary which is acting as the translotor.
old = headers['ORIGINAL_HEADER']
new = headers['NEW_HEADER']
d = {}
for n in range(len(old)):
d[old[n]] = new[n]
d[new[n]] = old[n] # To make it bi-directional
# Checking the difference between Landsort sea water temperature and the temperature readings
# from MS Birka SW-temp. We have missing data on this point for the first half year.
#
sw_smhi_landsort = pd.read_excel(database_path + '/smhi-open-data/water_T_landsort_smhi-opendata_5_2507_20170602_084638.xlsx',index_col=0)
sw_smhi_landsort.index = pd.to_datetime(sw_smhi_landsort.index)
havstemp=sw_smhi_landsort['Havstemperatur']['2014-06-01':'2014-12-15'].resample('15min').mean()
havstemp=havstemp.interpolate()
havstemp.plot()
i1='SEA_SW_T_'
#i2='SW-ME-AE24_SW_T_IN'
#series1=df[d[i1]]['2014-06-01']
#series2=df[d[i2]]['2014-06-01']
series1=df[d[i1]]['2014-06-01':'2014-12-15'].resample('15min').mean()
#series2=df[d[i2]].resample('D')
#series2= series2[series1 > 0]
#series1= series1[series1 > 0]
series1.plot()
#diff_sq = (((havstemp - series1)**2)**0.5).mean()
#series2.plot()
#plt.plot(series1,linewidth=0,marker='x')
plt.title((d[i1])+' RMS: '+str( ((((havstemp - series1)**2).sum())/len(havstemp))**0.5 ) )
fig = matplotlib.pyplot.gcf() # higher res
fig.set_size_inches(10,5) #higher res
plt.show()
diff_sq
diff_sq/len(havstemp)
type(diff_sq)
diff_sq = ((havstemp - series1)**2)**0.5
diff_sq.sum()/len(havstemp)
diff_2 = abs(havstemp-series1).mean()
diff_2
diff_2-diff_sq
diff_2-diff_sq
diff_sq = ((((havstemp - series1)**2).sum())/len(havstemp))**0.5
diff_sq
|
mit
|
StratsOn/zipline
|
zipline/protocol.py
|
2
|
17043
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
from six import iteritems, iterkeys
import pandas as pd
import numpy as np
from . utils.protocol_utils import Enum
from . utils.math_utils import nanstd, nanmean, nansum
from zipline.finance.trading import with_environment
from zipline.utils.algo_instance import get_algo_instance
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
# Datasource type should completely determine the other fields of a
# message with its type.
DATASOURCE_TYPE = Enum(
'AS_TRADED_EQUITY',
'MERGER',
'SPLIT',
'DIVIDEND',
'TRADE',
'TRANSACTION',
'ORDER',
'EMPTY',
'DONE',
'CUSTOM',
'BENCHMARK',
'COMMISSION'
)
# Expected fields/index values for a dividend Series.
DIVIDEND_FIELDS = [
'declared_date',
'ex_date',
'gross_amount',
'net_amount',
'pay_date',
'payment_sid',
'ratio',
'sid',
]
# Expected fields/index values for a dividend payment Series.
DIVIDEND_PAYMENT_FIELDS = ['id', 'payment_sid', 'cash_amount', 'share_count']
def dividend_payment(data=None):
"""
Take a dictionary whose values are in DIVIDEND_PAYMENT_FIELDS and return a
series representing the payment of a dividend.
Ids are assigned to each historical dividend in
PerformanceTracker.update_dividends. They are guaranteed to be unique
integers with the context of a single simulation. If @data is non-empty, a
id is required to identify the historical dividend associated with this
payment.
Additionally, if @data is non-empty, either data['cash_amount'] should be
nonzero or data['payment_sid'] should be a security identifier and
data['share_count'] should be nonzero.
The returned Series is given its id value as a name so that concatenating
payments results in a DataFrame indexed by id. (Note, however, that the
name value is not used to construct an index when this series is returned
by function passed to `DataFrame.apply`. In such a case, pandas preserves
the index of the DataFrame on which `apply` is being called.)
"""
return pd.Series(
data=data,
name=data['id'] if data is not None else None,
index=DIVIDEND_PAYMENT_FIELDS,
dtype=object,
)
class Event(object):
def __init__(self, initial_values=None):
if initial_values:
self.__dict__ = initial_values
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __delitem__(self, name):
delattr(self, name)
def keys(self):
return self.__dict__.keys()
def __eq__(self, other):
return hasattr(other, '__dict__') and self.__dict__ == other.__dict__
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "Event({0})".format(self.__dict__)
def to_series(self, index=None):
return pd.Series(self.__dict__, index=index)
class Order(Event):
pass
class Portfolio(object):
def __init__(self):
self.capital_used = 0.0
self.starting_cash = 0.0
self.portfolio_value = 0.0
self.pnl = 0.0
self.returns = 0.0
self.cash = 0.0
self.positions = Positions()
self.start_date = None
self.positions_value = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Portfolio({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
# Have to convert to primitive dict
state_dict['positions'] = dict(self.positions)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Portfolio saved state is too old.")
self.positions = Positions()
self.positions.update(state.pop('positions'))
self.__dict__.update(state)
class Account(object):
'''
The account object tracks information about the trading account. The
values are updated as the algorithm runs and its keys remain unchanged.
If connected to a broker, one can update these values with the trading
account values as reported by the broker.
'''
def __init__(self):
self.settled_cash = 0.0
self.accrued_interest = 0.0
self.buying_power = float('inf')
self.equity_with_loan = 0.0
self.total_positions_value = 0.0
self.regt_equity = 0.0
self.regt_margin = float('inf')
self.initial_margin_requirement = 0.0
self.maintenance_margin_requirement = 0.0
self.available_funds = 0.0
self.excess_liquidity = 0.0
self.cushion = 0.0
self.day_trades_remaining = float('inf')
self.leverage = 0.0
self.net_leverage = 0.0
self.net_liquidation = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Account({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Account saved state is too old.")
self.__dict__.update(state)
class Position(object):
def __init__(self, sid):
self.sid = sid
self.amount = 0
self.cost_basis = 0.0 # per share
self.last_sale_price = 0.0
def __getitem__(self, key):
return self.__dict__[key]
def __repr__(self):
return "Position({0})".format(self.__dict__)
def __getstate__(self):
state_dict = copy(self.__dict__)
STATE_VERSION = 1
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 1
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("Protocol Position saved state is too old.")
self.__dict__.update(state)
class Positions(dict):
def __missing__(self, key):
pos = Position(key)
self[key] = pos
return pos
class SIDData(object):
# Cache some data on the class so that this is shared for all instances of
# siddata.
# The dt where we cached the history.
_history_cache_dt = None
# _history_cache is a a dict mapping fields to pd.DataFrames. This is the
# most data we have for a given field for the _history_cache_dt.
_history_cache = {}
# This is the cache that is used for returns. This will have a different
# structure than the other history cache as this is always daily.
_returns_cache_dt = None
_returns_cache = None
# The last dt that we needed to cache the number of minutes.
_minute_bar_cache_dt = None
# If we are in minute mode, there is some cost associated with computing
# the number of minutes that we need to pass to the bar count of history.
# This will remain constant for a given bar and day count.
# This maps days to number of minutes.
_minute_bar_cache = {}
def __init__(self, sid, initial_values=None):
self._sid = sid
self._freqstr = None
# To check if we have data, we use the __len__ which depends on the
# __dict__. Because we are foward defining the attributes needed, we
# need to account for their entrys in the __dict__.
# We will add 1 because we need to account for the _initial_len entry
# itself.
self._initial_len = len(self.__dict__) + 1
if initial_values:
self.__dict__.update(initial_values)
@property
def datetime(self):
"""
Provides an alias from data['foo'].datetime -> data['foo'].dt
`datetime` was previously provided by adding a seperate `datetime`
member of the SIDData object via a generator that wrapped the incoming
data feed and added the field to each equity event.
This alias is intended to be temporary, to provide backwards
compatibility with existing algorithms, but should be considered
deprecated, and may be removed in the future.
"""
return self.dt
def get(self, name, default=None):
return self.__dict__.get(name, default)
def __getitem__(self, name):
return self.__dict__[name]
def __setitem__(self, name, value):
self.__dict__[name] = value
def __len__(self):
return len(self.__dict__) - self._initial_len
def __contains__(self, name):
return name in self.__dict__
def __repr__(self):
return "SIDData({0})".format(self.__dict__)
def _get_buffer(self, bars, field='price', raw=False):
"""
Gets the result of history for the given number of bars and field.
This will cache the results internally.
"""
cls = self.__class__
algo = get_algo_instance()
now = algo.datetime
if now != cls._history_cache_dt:
# For a given dt, the history call for this field will not change.
# We have a new dt, so we should reset the cache.
cls._history_cache_dt = now
cls._history_cache = {}
if field not in self._history_cache \
or bars > len(cls._history_cache[field][0].index):
# If we have never cached this field OR the amount of bars that we
# need for this field is greater than the amount we have cached,
# then we need to get more history.
hst = algo.history(
bars, self._freqstr, field, ffill=True,
)
# Assert that the column holds ints, not security objects.
if not isinstance(self._sid, str):
hst.columns = hst.columns.astype(int)
self._history_cache[field] = (hst, hst.values, hst.columns)
# Slice of only the bars needed. This is because we strore the LARGEST
# amount of history for the field, and we might request less than the
# largest from the cache.
buffer_, values, columns = cls._history_cache[field]
if raw:
sid_index = columns.get_loc(self._sid)
return values[-bars:, sid_index]
else:
return buffer_[self._sid][-bars:]
def _get_bars(self, days):
"""
Gets the number of bars needed for the current number of days.
Figures this out based on the algo datafrequency and caches the result.
This caches the result by replacing this function on the object.
This means that after the first call to _get_bars, this method will
point to a new function object.
"""
def daily_get_max_bars(days):
return days
def minute_get_max_bars(days):
# max number of minute. regardless of current days or short
# sessions
return days * 390
def daily_get_bars(days):
return days
@with_environment()
def minute_get_bars(days, env=None):
cls = self.__class__
now = get_algo_instance().datetime
if now != cls._minute_bar_cache_dt:
cls._minute_bar_cache_dt = now
cls._minute_bar_cache = {}
if days not in cls._minute_bar_cache:
# Cache this calculation to happen once per bar, even if we
# use another transform with the same number of days.
prev = env.previous_trading_day(now)
ds = env.days_in_range(
env.add_trading_days(-days + 2, prev),
prev,
)
# compute the number of minutes in the (days - 1) days before
# today.
# 210 minutes in a an early close and 390 in a full day.
ms = sum(210 if d in env.early_closes else 390 for d in ds)
# Add the number of minutes for today.
ms += int(
(now - env.get_open_and_close(now)[0]).total_seconds() / 60
)
cls._minute_bar_cache[days] = ms + 1 # Account for this minute
return cls._minute_bar_cache[days]
if get_algo_instance().sim_params.data_frequency == 'daily':
self._freqstr = '1d'
# update this method to point to the daily variant.
self._get_bars = daily_get_bars
self._get_max_bars = daily_get_max_bars
else:
self._freqstr = '1m'
# update this method to point to the minute variant.
self._get_bars = minute_get_bars
self._get_max_bars = minute_get_max_bars
# Not actually recursive because we have already cached the new method.
return self._get_bars(days)
def mavg(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanmean(prices)
def stddev(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
return nanstd(prices, ddof=1)
def vwap(self, days):
bars = self._get_bars(days)
max_bars = self._get_max_bars(days)
prices = self._get_buffer(max_bars, raw=True)[-bars:]
vols = self._get_buffer(max_bars, field='volume', raw=True)[-bars:]
vol_sum = nansum(vols)
try:
ret = nansum(prices * vols) / vol_sum
except ZeroDivisionError:
ret = np.nan
return ret
def returns(self):
algo = get_algo_instance()
now = algo.datetime
if now != self._returns_cache_dt:
self._returns_cache_dt = now
self._returns_cache = algo.history(2, '1d', 'price', ffill=True)
hst = self._returns_cache[self._sid]
return (hst.iloc[-1] - hst.iloc[0]) / hst.iloc[0]
class BarData(object):
"""
Holds the event data for all sids for a given dt.
This is what is passed as `data` to the `handle_data` function.
Note: Many methods are analogues of dictionary because of historical
usage of what this replaced as a dictionary subclass.
"""
def __init__(self, data=None):
self._data = data or {}
self._contains_override = None
def __contains__(self, name):
if self._contains_override:
if self._contains_override(name):
return name in self._data
else:
return False
else:
return name in self._data
def has_key(self, name):
"""
DEPRECATED: __contains__ is preferred, but this method is for
compatibility with existing algorithms.
"""
return name in self
def __setitem__(self, name, value):
self._data[name] = value
def __getitem__(self, name):
return self._data[name]
def __delitem__(self, name):
del self._data[name]
def __iter__(self):
for sid, data in iteritems(self._data):
# Allow contains override to filter out sids.
if sid in self:
if len(data):
yield sid
def iterkeys(self):
# Allow contains override to filter out sids.
return (sid for sid in iterkeys(self._data) if sid in self)
def keys(self):
# Allow contains override to filter out sids.
return list(self.iterkeys())
def itervalues(self):
return (value for _sid, value in self.iteritems())
def values(self):
return list(self.itervalues())
def iteritems(self):
return ((sid, value) for sid, value
in iteritems(self._data)
if sid in self)
def items(self):
return list(self.iteritems())
def __len__(self):
return len(self.keys())
def __repr__(self):
return '{0}({1})'.format(self.__class__.__name__, self._data)
|
apache-2.0
|
omni5cience/django-inlineformfield
|
.tox/py27/lib/python2.7/site-packages/IPython/kernel/zmq/pylab/backend_inline.py
|
8
|
5498
|
"""A matplotlib backend for publishing figures via display_data"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
# Third-party imports
import matplotlib
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg # analysis: ignore
from matplotlib._pylab_helpers import Gcf
# Local imports
from IPython.core.getipython import get_ipython
from IPython.core.display import display
from .config import InlineBackend
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def show(close=None):
"""Show all figures as SVG/PNG payloads sent to the IPython clients.
Parameters
----------
close : bool, optional
If true, a ``plt.close('all')`` call is automatically issued after
sending all the figures. If this is set, the figures will entirely
removed from the internal list of figures.
"""
if close is None:
close = InlineBackend.instance().close_figures
try:
for figure_manager in Gcf.get_all_fig_managers():
display(figure_manager.canvas.figure)
finally:
show._to_draw = []
if close:
matplotlib.pyplot.close('all')
# This flag will be reset by draw_if_interactive when called
show._draw_called = False
# list of figures to draw when flush_figures is called
show._to_draw = []
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
# signal that the current active figure should be sent at the end of
# execution. Also sets the _draw_called flag, signaling that there will be
# something to send. At the end of the code execution, a separate call to
# flush_figures() will act upon these values
manager = Gcf.get_active()
if manager is None:
return
fig = manager.canvas.figure
# Hack: matplotlib FigureManager objects in interacive backends (at least
# in some of them) monkeypatch the figure object and add a .show() method
# to it. This applies the same monkeypatch in order to support user code
# that might expect `.show()` to be part of the official API of figure
# objects.
# For further reference:
# https://github.com/ipython/ipython/issues/1612
# https://github.com/matplotlib/matplotlib/issues/835
if not hasattr(fig, 'show'):
# Queue up `fig` for display
fig.show = lambda *a: display(fig)
# If matplotlib was manually set to non-interactive mode, this function
# should be a no-op (otherwise we'll generate duplicate plots, since a user
# who set ioff() manually expects to make separate draw/show calls).
if not matplotlib.is_interactive():
return
# ensure current figure will be drawn, and each subsequent call
# of draw_if_interactive() moves the active figure to ensure it is
# drawn last
try:
show._to_draw.remove(fig)
except ValueError:
# ensure it only appears in the draw list once
pass
# Queue up the figure for drawing in next show() call
show._to_draw.append(fig)
show._draw_called = True
def flush_figures():
"""Send all figures that changed
This is meant to be called automatically and will call show() if, during
prior code execution, there had been any calls to draw_if_interactive.
This function is meant to be used as a post_execute callback in IPython,
so user-caused errors are handled with showtraceback() instead of being
allowed to raise. If this function is not called from within IPython,
then these exceptions will raise.
"""
if not show._draw_called:
return
if InlineBackend.instance().close_figures:
# ignore the tracking, just draw and close all figures
try:
return show(True)
except Exception as e:
# safely show traceback if in IPython, else raise
ip = get_ipython()
if ip is None:
raise e
else:
ip.showtraceback()
return
try:
# exclude any figures that were closed:
active = set([fm.canvas.figure for fm in Gcf.get_all_fig_managers()])
for fig in [ fig for fig in show._to_draw if fig in active ]:
try:
display(fig)
except Exception as e:
# safely show traceback if in IPython, else raise
ip = get_ipython()
if ip is None:
raise e
else:
ip.showtraceback()
return
finally:
# clear flags for next round
show._to_draw = []
show._draw_called = False
# Changes to matplotlib in version 1.2 requires a mpl backend to supply a default
# figurecanvas. This is set here to a Agg canvas
# See https://github.com/matplotlib/matplotlib/pull/1125
FigureCanvas = FigureCanvasAgg
|
mit
|
cnloni/tensorflow-bezier
|
images/fig3.py
|
1
|
1191
|
#! /usr/bin/python3
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
fp = FontProperties(fname=r'/usr/share/fonts/truetype/fonts-japanese-gothic.ttf', size=18)
file1 = '../data/main3.res'
file2 = '../data/main1.1e-5.res'
fig = plt.figure(figsize=(8., 6.), frameon=False)
ax = fig.add_subplot(111)
ax.axis([0, 80000, 0., 4.], 'scaled')
ax.set_title('Fig.3', fontsize=20, fontweight='bold')
d1raw = np.genfromtxt(file1, delimiter=' ',
dtype=[('phase','S2'),('nstep',int),('diff',float)])
d1x = []
d1y = []
for i in range(len(d1raw['nstep'])):
if i == 0 or d1raw['nstep'][i] != d1raw['nstep'][i-1]:
d1x.append(d1raw['nstep'][i])
d1y.append(d1raw['diff'][i])
d2 = np.genfromtxt(file2, delimiter=' ',
dtype=[('nstep',int),('diff',float)])
d2x = d2['nstep']
d2y = d2['diff']
d1ylog = np.log10(d1y)
d2ylog = np.log10(d2y)
ax.plot(d2x, d2ylog, color='r', marker='None')
ax.plot(d1x, d1ylog, color='b', marker='None')
ax.set_xlabel('steps', fontsize=18)
ax.set_ylabel('diff (log)', fontsize=18)
ax.legend(['実装1', '実装2'], prop=fp, loc='center right')
#plt.show()
plt.savefig('fig3.png', dpi=60)
|
mit
|
crichardson17/starburst_atlas
|
Low_resolution_sims/Dusty_LowRes/Geneva_inst_NoRot/Geneva_inst_NoRot_5/fullgrid/Optical1.py
|
30
|
9342
|
import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_optical_lines.pdf')
plt.clf()
print "figure saved"
|
gpl-2.0
|
Titan-C/scikit-learn
|
sklearn/decomposition/__init__.py
|
66
|
1433
|
"""
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, non_negative_factorization
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'non_negative_factorization',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
|
bsd-3-clause
|
sullivancolin/hexpy
|
tests/conftest.py
|
1
|
4597
|
# -*- coding: utf-8 -*-
"""Test Fixtures."""
import json
from typing import List
import pandas as pd
import pytest
from pandas.io.json import json_normalize
from hexpy import HexpySession
from hexpy.base import JSONDict
@pytest.fixture
def upload_items() -> List[JSONDict]:
"""Raw list of upload dictionaries"""
return [
{
"title": "Example Title",
"date": "2010-01-26T16:14:00+00:00",
"guid": "http://www.crimsonhexagon.com/post1",
"author": "me",
"url": "http://www.crimsonhexagon.com/post1",
"contents": "Example content",
"language": "en",
"gender": "M",
},
{
"title": "Example Title",
"date": "2010-01-26T16:14:00+00:00",
"author": "me",
"url": "http://www.crimsonhexagon.com/post2",
"guid": "http://www.crimsonhexagon.com/post2",
"contents": "Example content",
"language": "en",
"geolocation": {"id": "USA.NY"},
},
{
"date": "2010-01-26T16:14:00+00:00",
"contents": "Example content",
"url": "http://www.crimsonhexagon.com/post3",
"guid": "http://www.crimsonhexagon.com/post3",
"title": "Example Title",
"author": "me",
"language": "en",
"custom": {"CF1": "CF1_value", "CF2": "CF2_45.2", "CF3": "CF3_123"},
"gender": "F",
"pageId": "This is a pageId",
"parentGuid": "123123",
"authorProfileId": "1234567",
"engagementType": "REPLY",
},
]
@pytest.fixture
def upload_dataframe(upload_items: List[JSONDict]) -> pd.DataFrame:
"""Pandas dataframe of upload content"""
return json_normalize(upload_items)
@pytest.fixture
def duplicate_items(upload_items: List[JSONDict]) -> List[JSONDict]:
"""Upload items with duplicates"""
upload_items[1]["guid"] = upload_items[0]["guid"]
return upload_items
@pytest.fixture
def train_items() -> List[JSONDict]:
"""Raw list of training dictionaries"""
return [
{
"title": "Example Title",
"date": "2010-01-26T16:14:00+00:00",
"author": "me",
"url": "http://www.crimsonhexagon.com/post1",
"contents": "Example content",
"language": "en",
"categoryid": 9_107_252_649,
},
{
"title": "Example Title",
"date": "2010-01-26T16:14:00+00:00",
"author": "me",
"url": "http://www.crimsonhexagon.com/post2",
"contents": "Example content",
"language": "en",
"categoryid": 9_107_252_649,
},
]
@pytest.fixture
def train_dataframe(train_items: List[JSONDict]) -> pd.DataFrame:
"""Pandas dataframe of train items"""
return pd.DataFrame.from_records(train_items)
@pytest.fixture
def fake_session() -> HexpySession:
"""Return fake HexpySession"""
return HexpySession(token="test-token-00000")
@pytest.fixture
def posts_json() -> JSONDict:
"""Raw sample posts JSON"""
with open("tests/test_data/test_posts.json") as infile:
posts = json.load(infile)
return posts
@pytest.fixture
def posts_df() -> pd.DataFrame:
"""Expected output for converting posts JSON to df"""
df = pd.read_csv("tests/test_data/test_df.csv")
return df
@pytest.fixture
def json_documentation() -> JSONDict:
"""Raw api documentation json"""
with open("tests/test_data/test_docs.json") as infile:
return json.load(infile)
@pytest.fixture
def markdown_documentation() -> str:
"""Expected output for api documentation formating"""
with open("tests/test_data/test_docs.md") as infile:
return infile.read()
@pytest.fixture
def geography_json() -> JSONDict:
"""Expected format of geography metadata"""
with open("tests/test_data/geography.json") as infile:
return json.load(infile)
@pytest.fixture
def results_json() -> JSONDict:
"""Expected monitor results json"""
with open("tests/test_data/results.json") as infile:
return json.load(infile)
@pytest.fixture
def monitor_details_json() -> JSONDict:
"""Expected monitor details json"""
with open("tests/test_data/monitor_details.json") as infile:
return json.load(infile)
@pytest.fixture
def analysis_request_dict() -> JSONDict:
"""Expected format for analysis request"""
with open("tests/test_data/analysis.json") as infile:
return json.load(infile)
|
mit
|
goyalankit/po-compiler
|
object_files/networkx-1.8.1/build/lib.linux-i686-2.7/networkx/drawing/nx_pylab.py
|
22
|
27761
|
"""
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.sourceforge.net/
pygraphviz: http://networkx.lanl.gov/pygraphviz/
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax=cf.add_axes((0,0,1,1))
else:
ax=cf.gca()
# allow callers to override the hold state by passing hold=True|False
b = plt.ishold()
h = kwds.pop('hold', None)
if h is not None:
plt.hold(h)
try:
draw_networkx(G,pos=pos,ax=ax,**kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
plt.hold(b)
raise
plt.hold(b)
return
def draw_networkx(G, pos=None, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_ cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos=nx.drawing.spring_layout(G) # default to spring layout
node_collection=draw_networkx_nodes(G, pos, **kwds)
edge_collection=draw_networkx_edges(G, pos, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label = None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=plt.gca()
if nodelist is None:
nodelist=G.nodes()
if not nodelist or len(nodelist)==0: # empty nodelist, no drawing
return None
try:
xy=numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection=ax.scatter(xy[:,0], xy[:,1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=None,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float
Line width of edges (default =1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter,Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=plt.gca()
if edgelist is None:
edgelist=G.edges()
if not edgelist or len(edgelist)==0: # no edges!
return None
# set edge positions
edge_pos=numpy.asarray([(pos[e[0]],pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color)==len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c,alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3,4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color)==1:
edge_colors = ( colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors = edge_colors,
linewidths = lw,
antialiaseds = (1,),
linestyle = style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection=None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos=[]
p=1.0-0.25 # make head segment 25 percent of edge length
for src,dst in edge_pos:
x1,y1=src
x2,y2=dst
dx=x2-x1 # x offset
dy=y2-y1 # y offset
d=numpy.sqrt(float(dx**2+dy**2)) # length of edge
if d==0: # source and target at same position
continue
if dx==0: # vertical edge
xa=x2
ya=dy*p+y1
if dy==0: # horizontal edge
ya=y2
xa=dx*p+x1
else:
theta=numpy.arctan2(dy,dx)
xa=p*d*numpy.cos(theta)+x1
ya=p*d*numpy.sin(theta)+y1
a_pos.append(((xa,ya),(x2,y2)))
arrow_collection = LineCollection(a_pos,
colors = arrow_colors,
linewidths = [4*ww for ww in lw],
antialiaseds = (1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:,:,0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:,:,0]))
miny = numpy.amin(numpy.ravel(edge_pos[:,:,1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:,:,1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim( corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=plt.gca()
if labels is None:
labels=dict( (n,n) for n in G.nodes())
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
text_items={} # there is no text collection so we'll fake one
for n, label in labels.items():
(x,y)=pos[n]
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform = ax.transData,
clip_on=True,
)
text_items[n]=t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax=plt.gca()
if edge_labels is None:
labels=dict( ((u,v), d) for u,v,d in G.edges(data=True) )
else:
labels = edge_labels
text_items={}
for (n1,n2), label in labels.items():
(x1,y1)=pos[n1]
(x2,y2)=pos[n2]
(x,y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle=numpy.arctan2(y2-y1,x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle-=180
if angle < - 90:
angle+=180
# transform data coordinate angle to screen coordinate angle
xy=numpy.array((x,y))
trans_angle=ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1,2)))[0]
else:
trans_angle=0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label=str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment=kwds.get('horizontalalignment','center')
verticalalignment=kwds.get('verticalalignment','center')
t=ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform = ax.transData,
bbox = bbox,
zorder = 1,
clip_on=True,
)
text_items[(n1,n2)]=t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout."""
draw(G,circular_layout(G),**kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout."""
draw(G,random_layout(G),**kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout."""
draw(G,spectral_layout(G),**kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout."""
draw(G,spring_layout(G),**kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout."""
nlist = kwargs.get('nlist', None)
if nlist != None:
del(kwargs['nlist'])
draw(G,shell_layout(G,nlist=nlist),**kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout."""
pos=nx.drawing.graphviz_layout(G,prog)
draw(G,pos,**kwargs)
def draw_nx(G,pos,**kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G,pos,**kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS',warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
|
apache-2.0
|
ua-snap/downscale
|
snap_scripts/old_scripts/tem_iem_older_scripts_april2018/tem_inputs_v2/calc_ra_monthly_tem_iem.py
|
1
|
5385
|
# # # # # # # # # # # # # # # # # # # # # # # #
# PORT S.McAffee's Ra SCRIPT TO Python
# # # # # # # # # # # # # # # # # # # # # # # #
def coordinates( fn=None, meta=None, numpy_array=None, input_crs=None, to_latlong=False ):
'''
take a raster file as input and return the centroid coords for each
of the grid cells as a pair of numpy 2d arrays (longitude, latitude)
'''
import rasterio
import numpy as np
from affine import Affine
from pyproj import Proj, transform
if fn:
# Read raster
with rasterio.open( fn ) as r:
T0 = r.affine # upper-left pixel corner affine transform
p1 = Proj( r.crs )
A = r.read( 1 ) # pixel values
elif (meta is not None) & (numpy_array is not None):
A = numpy_array
if input_crs != None:
p1 = Proj( input_crs )
T0 = meta[ 'affine' ]
else:
p1 = None
T0 = meta[ 'affine' ]
else:
BaseException( 'check inputs' )
# All rows and columns
cols, rows = np.meshgrid(np.arange(A.shape[1]), np.arange(A.shape[0]))
# Get affine transform for pixel centres
T1 = T0 * Affine.translation( 0.5, 0.5 )
# Function to convert pixel row/column index (from 0) to easting/northing at centre
rc2en = lambda r, c: ( c, r ) * T1
# All eastings and northings (there is probably a faster way to do this)
eastings, northings = np.vectorize(rc2en, otypes=[np.float, np.float])(rows, cols)
if to_latlong == False:
return eastings, northings
elif (to_latlong == True) & (input_crs != None):
# Project all longitudes, latitudes
longs, lats = transform(p1, p1.to_latlong(), eastings, northings)
return longs, lats
else:
BaseException( 'cant reproject to latlong without an input_crs' )
def calc_ra( day, lat ):
'''
calculate Ra (a direct port to Python from S.McAfee R script) based on Allen et.al 1998
ARGUMENTS:
----------
day = [int] Ordinal (1-365*) Day of the year to compute Ra
lat = [np.ndarray] 2-D Numpy array with Latitude values converted to radians
RETURNS:
--------
numpy.ndarray containing Ra values over the AOI of `lat`
'''
import numpy as np
#Calculate the earth-sun distance, which is a function solely of Julian day. It is a single value for each day
d = 1+(0.033*np.cos( (2*np.pi*day/365) ) )
#Calculate declination, a function of Julian day. It is a single value for each day.
dc = 0.409*np.sin(((2*np.pi/365)*day)-1.39)
w = np.nan_to_num(np.real( np.arccos( ( -1*np.tan( dc )*np.tan( lat ) ) ).astype(np.complex_)))
return (24*60/np.pi) * d * 0.082 * (w*np.sin(lat)*np.sin(dc)+np.cos(lat)*np.cos(dc)*np.sin(w))
if __name__ == '__main__':
import rasterio, datetime, os
import numpy as np
import pandas as pd
import geopandas as gpd
from functools import partial
from pathos.mp_map import mp_map
from shapely.geometry import Point
from pyproj import Proj, transform
fn = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/tem_data_sep2016/downscaled/NCAR-CCSM4/rcp26/hur/hur_mean_pct_ar5_NCAR-CCSM4_rcp26_01_2006.tif'
output_path = '/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/tem_data_sep2016/girr'
lons, lats = coordinates( fn )
rst = rasterio.open( fn )
# mask those lats so we dont compute where we dont need to:
data_ind = np.where( rst.read_masks( 1 ) != 0 )
pts = zip( lons[ data_ind ].ravel().tolist(), lats[ data_ind ].ravel().tolist() )
# radians from pts
p1 = Proj( init='epsg:3338' )
p2 = Proj( init='epsg:4326' )
transform_p = partial( transform, p1=p1, p2=p2 )
pts_radians = [ transform_p( x=lon, y=lat, radians=True ) for lon,lat in pts ]
lat_rad = pd.DataFrame( pts_radians, columns=['lon','lat']).lat
# # # # TESTING STUFF # # # # # # #
# forget the above for testing, lets use Stephs radians
# latr = rasterio.open('/workspace/Shared/Tech_Projects/ESGF_Data_Access/project_data/tem_data_sep2016/radiance/radians.txt')
# latr = latr.read( 1 )
# # # # # # # # # # # # # # # # # #
# calc ordinal days to compute
ordinal_days = range( 1, 365+1, 1 )
# make a monthly grouper of ordinal days
ordinal_to_months = [ str(datetime.date.fromordinal( i ).month) for i in ordinal_days ]
# convert those months to strings
ordinal_to_months = [ ('0'+month if len( month ) < 2 else month) for month in ordinal_to_months ]
# calc girr
f = partial( calc_ra, lat=lat_rad )
Ra = mp_map( f, ordinal_days, nproc=32 )
Ra_monthlies = pd.Series( Ra ).groupby( ordinal_to_months ).apply( lambda x: np.array(x.tolist()).mean( axis=0 ) )
# iteratively put them back in the indexed locations we took them from
meta = rst.meta
meta.pop( 'transform' )
meta.update( compress='lzw', count=1, dtype='float32' )
for month in Ra_monthlies.index:
arr = rst.read( 1 )
arr[ data_ind ] = Ra_monthlies.loc[ month ].tolist()
output_filename = os.path.join( output_path, 'girr_w-m2_{}.tif'.format(str( month ) ) )
with rasterio.open( output_filename, 'w', **meta ) as out:
out.write( arr.astype( np.float32 ), 1 )
# # # # #UNNEEDED OLD STUFF
# JUNK FOR NOW
# SOME SETUP
# # this little bit just makes some sample grids to use in calculations
# fn = '/Users/malindgren/Documents/downscale_epscor/sept_fix/CalcRa/test_calcRa_4326_small.tif'
# rst = rasterio.open( fn )
# meta = rst.meta
# meta.update( compress='lzw', count=1, nodata=None )
# meta.pop( 'transform' )
# new_fn = fn.replace( '_small', '' )
# with rasterio.open( new_fn, 'w', **meta ) as out:
# out.write( rst.read(1), 1 )
# # # # #
|
mit
|
russel1237/scikit-learn
|
benchmarks/bench_multilabel_metrics.py
|
276
|
7138
|
#!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
|
bsd-3-clause
|
rl-institut/reegis-hp
|
reegis_hp/berlin_hp/berlin_brdbg_example_plot.py
|
7
|
3580
|
#!/usr/bin/python3
# -*- coding: utf-8
import logging
import matplotlib.pyplot as plt
from oemof.outputlib import to_pandas as tpd
from oemof.tools import logger
from oemof.core import energy_system as es
# The following dictionaries are a workaround due to issue #26
rename = {
"(val, ('sink', 'Landkreis Wittenberg', 'elec'))": "elec demand",
"(val, ('sto_simple', 'Landkreis Wittenberg', 'elec'))": "battery",
"(val, ('transport', 'bus', 'Stadt Dessau-Rosslau', 'elec', 'bus', 'Landkreis Wittenberg', 'elec'))": "to Dessau",
"(val, ('FixedSrc', 'Landkreis Wittenberg', 'pv_pwr'))": "pv power",
"(val, ('FixedSrc', 'Landkreis Wittenberg', 'wind_pwr'))": "wind power",
"(val, ('transformer', 'Landkreis Wittenberg', 'natural_gas'))": "gas power plant",
"(val, ('transport', 'bus', 'Landkreis Wittenberg', 'elec', 'bus', 'Stadt Dessau-Rosslau', 'elec'))": "to Wittenberg",
"(val, ('sink', 'Stadt Dessau-Rosslau', 'elec'))": "elec demand",
"(val, ('sto_simple', 'Stadt Dessau-Rosslau', 'elec'))": "battery",
"(val, ('FixedSrc', 'Stadt Dessau-Rosslau', 'pv_pwr'))": "pv power",
"(val, ('FixedSrc', 'Stadt Dessau-Rosslau', 'wind_pwr'))": "wind power",
"(val, ('transformer', 'Stadt Dessau-Rosslau', 'lignite'))": "lignite power plant",
"(val, ('transformer', 'Stadt Dessau-Rosslau', 'natural_gas'))": "gas power plant",
}
# Define a color set for the plots.
cdict = {}
cdict["('FixedSrc', 'Landkreis Wittenberg', 'wind_pwr')"] = '#4536bb'
cdict["('FixedSrc', 'Landkreis Wittenberg', 'pv_pwr')"] = '#ffcc00'
cdict["('FixedSrc', 'Stadt Dessau-Rosslau', 'wind_pwr')"] = '#4536bb'
cdict["('FixedSrc', 'Stadt Dessau-Rosslau', 'pv_pwr')"] = '#ffcc00'
cdict["('transport', 'bus', 'Landkreis Wittenberg', 'elec', 'bus', 'Stadt Dessau-Rosslau', 'elec')"] = '#643780'
cdict["('transport', 'bus', 'Stadt Dessau-Rosslau', 'elec', 'bus', 'Landkreis Wittenberg', 'elec')"] = '#643780'
cdict["('transformer', 'Landkreis Wittenberg', 'natural_gas')"] = '#7c7c7c'
cdict["('transformer', 'Stadt Dessau-Rosslau', 'natural_gas')"] = '#7c7c7c'
cdict["('transformer', 'Landkreis Wittenberg', 'lignite')"] = '#000000'
cdict["('transformer', 'Stadt Dessau-Rosslau', 'lignite')"] = '#000000'
cdict["('sto_simple', 'Landkreis Wittenberg', 'elec')"] = '#ff5e5e'
cdict["('sto_simple', 'Stadt Dessau-Rosslau', 'elec')"] = '#ff5e5e'
cdict["('sink', 'Landkreis Wittenberg', 'elec')"] = '#0cce1e'
cdict["('sink', 'Stadt Dessau-Rosslau', 'elec')"] = '#0cce1e'
# Define the oemof default logger
logger.define_logging()
# Create an energy system
TwoRegExample = es.EnergySystem()
# Restoring a dumped EnergySystem
logging.info(TwoRegExample.restore())
esplot = tpd.DataFramePlot(energy_system=TwoRegExample)
fig = plt.figure(figsize=(24, 14))
plt.rc('legend', **{'fontsize': 19})
plt.rcParams.update({'font.size': 14})
plt.style.use('ggplot')
n = 1
# Loop over the regions to plot them.
for region in TwoRegExample.regions:
uid = str(('bus', region.name, 'elec'))
esplot.ax = fig.add_subplot(2, 1, n)
n += 1
handles, labels = esplot.io_plot(
uid, cdict,
date_from="2010-06-01 00:00:00",
date_to="2010-06-8 00:00:00",
line_kwa={'linewidth': 4})
new_labels = []
for lab in labels:
new_labels.append(rename.get(str(lab), lab))
esplot.ax.set_ylabel('Power in MW')
esplot.ax.set_xlabel('')
esplot.ax.set_title(region.name)
esplot.set_datetime_ticks(tick_distance=24, date_format='%d-%m-%Y')
esplot.outside_legend(handles=handles, labels=new_labels)
plt.show()
|
gpl-3.0
|
abhishekgahlot/scikit-learn
|
examples/decomposition/plot_kernel_pca.py
|
353
|
2011
|
"""
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
|
bsd-3-clause
|
wavelets/pandashells
|
pandashells/lib/lomb_scargle_lib.py
|
7
|
3748
|
#! /usr/bin/env python
# standard library imports
try:
import pandas as pd
import numpy as np
# will catch import errors in module_checker_lib so won't test this branch
except ImportError: # pragma: nocover
pass
def _next_power_two(x):
""" given a number, returns the next power of two
"""
x = int(x)
n = 1
while n < x:
n = n << 1
return n
def _compute_pad(t, interp_exponent=0):
"""
Given a sorted time series t, compute the zero padding.
The final padded arrays are the next power of two in length multiplied
by 2 ** interp_exponent.
returns t_pad and y_pad
"""
t_min, t_max, n = t[0], t[-1], len(t)
dt = (t_max - t_min) / float(n - 1)
n_padded = _next_power_two(len(t)) << interp_exponent
n_pad = n_padded - n
t_pad = np.linspace(t_max + dt, t_max + dt + (n_pad - 1) * dt, n_pad)
y_pad = np.zeros(len(t_pad))
return t_pad, y_pad
def _compute_params(t):
"""
Takes a timeseries and computes the parameters needed for the fast
lomb scargle algorithm in gatspy
"""
t_min, t_max, n = t[0], t[-1], len(t)
dt = (t_max - t_min) / float(n - 1)
min_freq = 1. / (t_max - t_min)
d_freq = 1. / (2 * dt * len(t))
return min_freq, d_freq, len(t)
def lomb_scargle(df, time_col, val_col, interp_exponent=0, freq_order=False):
"""
:type df: pandas.DataFrame
:param df: An input dataframe
:type time_col: str
:param time_col: The column of the dataframe holding the timestamps
:type val_col: str
:param val_col: The column of the dataframe holding the observations
:type interp_exp: int
:param interp_exp: Interpolate the spectrum by this power of two
:type freq_order: bool
:param freq_order: If set to True spectrum is returned in frequency order
instead of period order (default=False)
:rtype: Pandas DataFrame
:returns: A dataframe with columns: period, freq, power, amplitude
"""
# do imports here to avoid loading plot libraries when this
# module is loaded in __init__.py
# which then doesn't allow for doing matplotlib.use() later
from pandashells.lib import module_checker_lib
module_checker_lib.check_for_modules(['gatspy', 'pandas', 'numpy'])
import gatspy
# only care about timestamped values
df = df[[time_col, val_col]].dropna()
# standardize column names, remove mean from values, and sort by time
df = df.rename(columns={time_col: 't', val_col: 'y'}).sort_index(by=['t'])
df['y'] = df['y'] - df.y.mean()
# compute total energy in the time series
E_in = np.sum((df.y * df.y))
# appropriately zero-pad the timeseries before taking spectrum
pre_pad_length = len(df)
t_pad, y_pad = _compute_pad(df.t.values, interp_exponent=interp_exponent)
if len(t_pad) > 0:
df = df.append(
pd.DataFrame({'t': t_pad, 'y': y_pad}), ignore_index=True)
# fit the lombs scargle model to the time series
model = gatspy.periodic.LombScargleFast()
model.fit(df.t.values, df.y.values, 1)
# compute params for getting results out of lomb scargle fit
f0, df, N = _compute_params(df.t.values)
f = f0 + df * np.arange(N)
p = 1. / f
# retrieve the lomb scarge fit and normalize for power / amplitude
yf = model.score_frequency_grid(f0, df, N)
yf_power = 2 * yf * E_in * len(yf) / float(pre_pad_length) ** 2
yf_amp = np.sqrt(yf_power)
# generate the output dataframe
df = pd.DataFrame(
{'freq': f, 'period': p, 'power': yf_power, 'amp': yf_amp}
)[['period', 'freq', 'power', 'amp']]
# order by period if desired
if not freq_order:
df = df.sort_index(by='period')
return df
|
bsd-2-clause
|
DmitryOdinoky/sms-tools
|
lectures/07-Sinusoidal-plus-residual-model/plots-code/hprModelFrame.py
|
22
|
2847
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import math
from scipy.fftpack import fft, ifft, fftshift
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
import harmonicModel as HM
(fs, x) = UF.wavread('../../../sounds/flute-A4.wav')
pos = .8*fs
M = 601
hM1 = int(math.floor((M+1)/2))
hM2 = int(math.floor(M/2))
w = np.hamming(M)
N = 1024
t = -100
nH = 40
minf0 = 420
maxf0 = 460
f0et = 5
maxnpeaksTwm = 5
minSineDur = .1
harmDevSlope = 0.01
Ns = 512
H = Ns/4
x1 = x[pos-hM1:pos+hM2]
x2 = x[pos-Ns/2-1:pos+Ns/2-1]
mX, pX = DFT.dftAnal(x1, w, N)
ploc = UF.peakDetection(mX, t)
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc)
ipfreq = fs*iploc/N
f0 = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0)
hfreqp = []
hfreq, hmag, hphase = HM.harmonicDetection(ipfreq, ipmag, ipphase, f0, nH, hfreqp, fs, harmDevSlope)
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs)
mYh = 20 * np.log10(abs(Yh[:Ns/2]))
pYh = np.unwrap(np.angle(Yh[:Ns/2]))
bh=blackmanharris(Ns)
X2 = fft(fftshift(x2*bh/sum(bh)))
Xr = X2-Yh
mXr = 20 * np.log10(abs(Xr[:Ns/2]))
pXr = np.unwrap(np.angle(Xr[:Ns/2]))
xrw = np.real(fftshift(ifft(Xr))) * H * 2
yhw = np.real(fftshift(ifft(Yh))) * H * 2
maxplotfreq = 8000.0
plt.figure(1, figsize=(9, 7))
plt.subplot(3,2,1)
plt.plot(np.arange(M), x[pos-hM1:pos+hM2]*w, lw=1.5)
plt.axis([0, M, min(x[pos-hM1:pos+hM2]*w), max(x[pos-hM1:pos+hM2]*w)])
plt.title('x (flute-A4.wav)')
plt.subplot(3,2,3)
binFreq = (fs/2.0)*np.arange(mX.size)/(mX.size)
plt.plot(binFreq,mX,'r', lw=1.5)
plt.axis([0,maxplotfreq,-90,max(mX)+2])
plt.plot(hfreq, hmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('mX + harmonics')
plt.subplot(3,2,5)
plt.plot(binFreq,pX,'c', lw=1.5)
plt.axis([0,maxplotfreq,0,16])
plt.plot(hfreq, hphase, marker='x', color='b', linestyle='', markeredgewidth=1.5)
plt.title('pX + harmonics')
plt.subplot(3,2,4)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,mYh,'r', lw=.8, label='mYh')
plt.plot(binFreq,mXr,'r', lw=1.5, label='mXr')
plt.axis([0,maxplotfreq,-90,max(mYh)+2])
plt.legend(prop={'size':10})
plt.title('mYh + mXr')
plt.subplot(3,2,6)
binFreq = (fs/2.0)*np.arange(mXr.size)/(mXr.size)
plt.plot(binFreq,pYh,'c', lw=.8, label='pYh')
plt.plot(binFreq,pXr,'c', lw=1.5, label ='pXr')
plt.axis([0,maxplotfreq,-5,25])
plt.legend(prop={'size':10})
plt.title('pYh + pXr')
plt.subplot(3,2,2)
plt.plot(np.arange(Ns), yhw, 'b', lw=.8, label='yh')
plt.plot(np.arange(Ns), xrw, 'b', lw=1.5, label='xr')
plt.axis([0, Ns, min(yhw), max(yhw)])
plt.legend(prop={'size':10})
plt.title('yh + xr')
plt.tight_layout()
plt.savefig('hprModelFrame.png')
plt.show()
|
agpl-3.0
|
jpautom/scikit-learn
|
doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py
|
25
|
2004
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/image_masked.py
|
12
|
1768
|
#!/usr/bin/env python
'''imshow with masked array input and out-of-range colors.
The second subplot illustrates the use of BoundaryNorm to
get a filled contour effect.
'''
from pylab import *
from numpy import ma
import matplotlib.colors as colors
delta = 0.025
x = y = arange(-3.0, 3.0, delta)
X, Y = meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = 10 * (Z2-Z1) # difference of Gaussians
# Set up a colormap:
palette = cm.gray
palette.set_over('r', 1.0)
palette.set_under('g', 1.0)
palette.set_bad('b', 1.0)
# Alternatively, we could use
# palette.set_bad(alpha = 0.0)
# to make the bad region transparent. This is the default.
# If you comment out all the palette.set* lines, you will see
# all the defaults; under and over will be colored with the
# first and last colors in the palette, respectively.
Zm = ma.masked_where(Z > 1.2, Z)
# By setting vmin and vmax in the norm, we establish the
# range to which the regular palette color scale is applied.
# Anything above that range is colored based on palette.set_over, etc.
subplot(1,2,1)
im = imshow(Zm, interpolation='bilinear',
cmap=palette,
norm = colors.Normalize(vmin = -1.0, vmax = 1.0, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('Green=low, Red=high, Blue=bad')
colorbar(im, extend='both', orientation='horizontal', shrink=0.8)
subplot(1,2,2)
im = imshow(Zm, interpolation='nearest',
cmap=palette,
norm = colors.BoundaryNorm([-1, -0.5, -0.2, 0, 0.2, 0.5, 1],
ncolors=256, clip = False),
origin='lower', extent=[-3,3,-3,3])
title('With BoundaryNorm')
colorbar(im, extend='both', spacing='proportional',
orientation='horizontal', shrink=0.8)
show()
|
gpl-2.0
|
moonbury/pythonanywhere
|
github/MasteringMLWithScikit-learn/8365OS_09_Codes/scratch.py
|
3
|
1723
|
import os
import numpy as np
import mahotas as mh
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
if __name__ == '__main__':
X = []
y = []
for path, subdirs, files in os.walk('data/English/Img/GoodImg/Bmp/'):
for filename in files:
f = os.path.join(path, filename)
target = filename[3:filename.index('-')]
img = mh.imread(f, as_grey=True)
if img.shape[0] <= 30 or img.shape[1] <= 30:
continue
img_resized = mh.imresize(img, (30, 30))
if img_resized.shape != (30, 30):
img_resized = mh.imresize(img_resized, (30, 30))
X.append(img_resized.reshape((900, 1)))
y.append(target)
X = np.array(X)
X = X.reshape(X.shape[:2])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.1)
pipeline = Pipeline([
('clf', SVC(kernel='rbf', gamma=0.01, C=100))
])
parameters = {
'clf__gamma': (0.01, 0.03, 0.1, 0.3, 1),
'clf__C': (0.1, 0.3, 1, 3, 10, 30),
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=3, verbose=1, scoring='accuracy')
grid_search.fit(X_train, y_train)
print 'Best score: %0.3f' % grid_search.best_score_
print 'Best parameters set:'
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print '\t%s: %r' % (param_name, best_parameters[param_name])
predictions = grid_search.predict(X_test)
print classification_report(y_test, predictions)
|
gpl-3.0
|
devanshdalal/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
46
|
3387
|
import numpy as np
from sklearn.utils import testing
from sklearn.preprocessing import FunctionTransformer
from sklearn.utils.testing import assert_equal, assert_array_equal
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have received X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have received X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
def test_kw_arg():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
# Test that rounding is correct
assert_array_equal(F.transform(X),
np.around(X, decimals=3))
def test_kw_arg_update():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args['decimals'] = 1
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_kw_arg_reset():
X = np.linspace(0, 1, num=10).reshape((5, 2))
F = FunctionTransformer(np.around, kw_args=dict(decimals=3))
F.kw_args = dict(decimals=1)
# Test that rounding is correct
assert_array_equal(F.transform(X), np.around(X, decimals=1))
def test_inverse_transform():
X = np.array([1, 4, 9, 16]).reshape((2, 2))
# Test that inverse_transform works correctly
F = FunctionTransformer(
func=np.sqrt,
inverse_func=np.around, inv_kw_args=dict(decimals=3),
)
assert_array_equal(
F.inverse_transform(F.transform(X)),
np.around(np.sqrt(X), decimals=3),
)
|
bsd-3-clause
|
abimannans/scikit-learn
|
sklearn/metrics/tests/test_regression.py
|
272
|
6066
|
from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
|
bsd-3-clause
|
RachitKansal/scikit-learn
|
sklearn/cross_decomposition/tests/test_pls.py
|
215
|
11427
|
import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
|
bsd-3-clause
|
eternallyBaffled/itrade
|
itrade_wxabout.py
|
1
|
8673
|
#!/usr/bin/env python
# ============================================================================
# Project Name : iTrade
# Module Name : itrade_wxabout.py
#
# Description: wxPython About box
#
# The Original Code is iTrade code (http://itrade.sourceforge.net).
#
# The Initial Developer of the Original Code is Gilles Dumortier.
#
# Portions created by the Initial Developer are Copyright (C) 2004-2008 the
# Initial Developer. All Rights Reserved.
#
# Contributor(s):
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see http://www.gnu.org/licenses/gpl.html
#
# History Rev Description
# 2005-04-02 dgil Wrote it from scratch
# 2007-01-29 dgil Use Boa inspired code to have a better About Box
# ============================================================================
# ============================================================================
# Imports
# ============================================================================
# python system
import logging
# iTrade system
import itrade_config
# wxPython system
if not itrade_config.nowxversion:
import itrade_wxversion
import wx
import wx.html
import wx.lib.wxpTag
# iTrade system
from itrade_logging import *
from itrade_local import message
from itrade_wxhtml import wxUrlClickHtmlWindow,EVT_HTML_URL_CLICK
# ============================================================================
# about_html
# ============================================================================
about_html = '''
<html>
<body bgcolor="#C5C1C4">
<center>
<table cellpadding="5" bgcolor="#FFFFFF" width="100%%">
<tr>
<td align="center">
<br><img src="%s"><br>
<font color="#006600" size="+4"><b>iTrade</b></font><br>
<strong>%s %s - %s</strong>
%s
</td>
</tr>
</table>
%s
</body>
</html>
'''
about_text = '''
<p>Trading and Charting software written in <b>Python</b> and <b>wxPython</b>
</p>
<p><a href="iTrade">%s</a><br><br>
<b>© %s</b> and <a href="Authors">Authors</a> (<a href="Mail">[email protected]</a>)<br><br>
<a href="Credits">Credits</a>
</p>
<p>This pre-alpha software shows off some of the capabilities
of <b>iTrade</b>. Select items from the menu or list control,
sit back and enjoy. Be sure to take a peek at the source code for each
demo item so you can learn how to help us on this project.
</p>
<p>
<font size="-1"><b>iTrade</b> is published under the terms of the %s license.
Please see <i><a href="LICENSE">LICENSE</a></i> file for more information.</font>
</p>
<hr>
<wxp module="wx" class="Button">
<param name="label" value="Okay">
<param name="id" value="ID_OK">
</wxp>
</center>
'''
# ============================================================================
# credits_html
# ============================================================================
credits_html = '''
<html>
<body bgcolor="#4488FF">
<center>
<table bgcolor="#FFFFFF" width="100%%">
<tr>
<td align="center"><h3>Credits</h3>
<p><b>the iTrade Team</b><br>
<p>Gilles Dumortier ([email protected]) : Lead Developer</p>
<p>Michel Legrand ([email protected]) : Testing & Docs</p>
<br>
<p><b>Many thanks to</b><br>
<p>Peter Mills ([email protected]) : ASE</p>
<p>Olivier Jacq ([email protected]) : Linux feedback & Docs</p>
<br>
<p><b>Translations</b><br>
<p>Catherine Pedrosa and Guilherme (guigui) : Portuguese</p>
<br>
<p><b>iTrade is built on:</b><br>
<a href="Python">Python</a>
<a href="wxPython">wxPython</a>
<a href="NumPy">NumPy</a>
<a href="Matplotlib">Matplotlib</a>
</p>
<p>
<a href="Back">Back</a><br>
</td>
</tr>
</table>
</body>
</html>
'''
# ============================================================================
# license_html
# ============================================================================
license_html = '''
<html>
<body bgcolor="#4488FF">
<center>
<table bgcolor="#FFFFFF" width="100%%">
<tr>
<td align="left">
<p>
<a href="Back">Back</a>
<br><br>
<font size="-2">
%s
</font>
<p>
<a href="Back">Back</a><br>
</td>
</tr>
</table>
</body>
</html>
'''
# ============================================================================
# About box
# ============================================================================
wxID_ABOUTBOX = wx.NewId()
class iTradeAboutBox(wx.Dialog):
border = 7
def __init__(self, prnt):
wx.Dialog.__init__(self, size=wx.Size(480, 525), pos=(-1, -1),
id = wxID_ABOUTBOX, title = message('about_title'), parent=prnt,
name = 'AboutBox', style = wx.DEFAULT_DIALOG_STYLE)
self.blackback = wx.Window(self, -1, pos=(0, 0),
size=self.GetClientSize(), style=wx.CLIP_CHILDREN)
self.blackback.SetBackgroundColour(wx.BLACK)
self.m_html = wxUrlClickHtmlWindow(self.blackback, -1, style = wx.CLIP_CHILDREN | wx.html.HW_NO_SELECTION)
EVT_HTML_URL_CLICK(self.m_html, self.OnLinkClick)
self.setPage()
self.blackback.SetAutoLayout(True)
# adjust constraints
lc = wx.LayoutConstraints()
lc.top.SameAs(self.blackback, wx.Top, self.border)
lc.left.SameAs(self.blackback, wx.Left, self.border)
lc.bottom.SameAs(self.blackback, wx.Bottom, self.border)
lc.right.SameAs(self.blackback, wx.Right, self.border)
self.m_html.SetConstraints(lc)
# layout everything
self.blackback.Layout()
self.Center(wx.BOTH)
#
self.SetAcceleratorTable(wx.AcceleratorTable([(0, wx.WXK_ESCAPE, wx.ID_OK)]))
def gotoInternetUrl(self, url):
try:
import webbrowser
except ImportError:
wx.MessageBox(message('about_url') % url)
else:
webbrowser.open(url)
def OnLinkClick(self, event):
clicked = event.linkinfo[0]
if clicked == 'Credits':
self.m_html.SetPage(credits_html)
elif clicked == 'Back':
self.setPage()
elif clicked == 'iTrade':
self.gotoInternetUrl(itrade_config.softwareWebsite)
elif clicked == 'Authors':
self.gotoInternetUrl(itrade_config.softwareWebsite+'contact.htm')
elif clicked == 'Python':
self.gotoInternetUrl('http://www.python.org')
elif clicked == 'wxPython':
self.gotoInternetUrl('http://wxpython.org')
elif clicked == 'NumPy':
self.gotoInternetUrl('http://numpy.sourceforge.net')
elif clicked == 'Matplotlib':
self.gotoInternetUrl('http://matplotlib.sourceforge.net')
elif clicked == 'Mail':
self.gotoInternetUrl('mailto:[email protected]')
elif clicked == 'LICENSE':
f = open('LICENSE','r')
lines = f.readlines()
s = ''
for l in lines:
s = s + l + '<br>'
self.m_html.SetPage(license_html % s)
f.close()
def setPage(self):
self.m_html.SetPage(about_html % (
os.path.join(itrade_config.dirRes, 'itrade.png'), itrade_config.softwareVersionName, itrade_config.softwareStatus, itrade_config.softwareVersion,
'', about_text % (itrade_config.softwareWebsite, itrade_config.softwareCopyright,itrade_config.softwareLicense)
))
# ============================================================================
# Test me
# ============================================================================
if __name__=='__main__':
setLevel(logging.INFO)
app = wx.App(False)
dlg = iTradeAboutBox(None)
dlg.CentreOnParent()
dlg.ShowModal()
dlg.Destroy()
app.MainLoop()
# ============================================================================
# That's all folks !
# ============================================================================
|
gpl-3.0
|
r9y9/librosa
|
librosa/version.py
|
1
|
1368
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Version info"""
import sys
import importlib
short_version = '0.6'
version = '0.6.0'
def __get_mod_version(modname):
try:
if modname in sys.modules:
mod = sys.modules[modname]
else:
mod = importlib.import_module(modname)
try:
return mod.__version__
except AttributeError:
return 'installed, no version number available'
except ImportError:
return None
def show_versions():
'''Return the version information for all librosa dependencies.'''
core_deps = ['audioread',
'numpy',
'scipy',
'sklearn',
'joblib',
'decorator',
'six',
'resampy']
extra_deps = ['numpydoc',
'sphinx',
'sphinx_rtd_theme',
'sphinxcontrib.versioning',
'matplotlib',
'numba']
print('INSTALLED VERSIONS')
print('------------------')
print('python: {}\n'.format(sys.version))
print('librosa: {}\n'.format(version))
for dep in core_deps:
print('{}: {}'.format(dep, __get_mod_version(dep)))
print('')
for dep in extra_deps:
print('{}: {}'.format(dep, __get_mod_version(dep)))
pass
|
isc
|
xsynergy510x/android_external_chromium_org
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
94
|
3083
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows but it is not
# clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
# run_breakpad_browser_process_crash_test is flaky.
# See http://crbug.com/317890
tests_to_disable.append('run_breakpad_browser_process_crash_test')
# See http://crbug.com/332301
tests_to_disable.append('run_breakpad_crash_in_syscall_test')
# It appears that crash_service.exe is not being reliably built by
# default in the CQ. See: http://crbug.com/380880
tests_to_disable.append('run_breakpad_untrusted_crash_test')
tests_to_disable.append('run_breakpad_trusted_crash_in_startup_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
bsd-3-clause
|
f3r/scikit-learn
|
benchmarks/bench_plot_ward.py
|
290
|
1260
|
"""
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
|
bsd-3-clause
|
mattilyra/scikit-learn
|
build_tools/cythonize.py
|
42
|
6375
|
#!/usr/bin/env python
""" cythonize
Cythonize pyx files into C files as needed.
Usage: cythonize [root_dir]
Default [root_dir] is 'sklearn'.
Checks pyx files to see if they have been changed relative to their
corresponding C files. If they have, then runs cython on these files to
recreate the C files.
The script detects changes in the pyx/pxd files using checksums
[or hashes] stored in a database file
Simple script to invoke Cython on all .pyx
files; while waiting for a proper build system. Uses file hashes to
figure out if rebuild is needed.
It is called by ./setup.py sdist so that sdist package can be installed without
cython
Originally written by Dag Sverre Seljebotn, and adapted from statsmodel 0.6.1
(Modified BSD 3-clause)
We copied it for scikit-learn.
Note: this script does not check any of the dependent C libraries; it only
operates on the Cython .pyx files or their corresponding Cython header (.pxd)
files.
"""
# Author: Arthur Mensch <[email protected]>
# Author: Raghav R V <[email protected]>
#
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import re
import sys
import hashlib
import subprocess
HASH_FILE = 'cythonize.dat'
DEFAULT_ROOT = 'sklearn'
# WindowsError is not defined on unix systems
try:
WindowsError
except NameError:
WindowsError = None
def cythonize(cython_file, gen_file):
try:
from Cython.Compiler.Version import version as cython_version
from distutils.version import LooseVersion
if LooseVersion(cython_version) < LooseVersion('0.21'):
raise Exception('Building scikit-learn requires Cython >= 0.21')
except ImportError:
pass
flags = ['--fast-fail']
if gen_file.endswith('.cpp'):
flags += ['--cplus']
try:
try:
rc = subprocess.call(['cython'] +
flags + ["-o", gen_file, cython_file])
if rc != 0:
raise Exception('Cythonizing %s failed' % cython_file)
except OSError:
# There are ways of installing Cython that don't result in a cython
# executable on the path, see scipy issue gh-2397.
rc = subprocess.call([sys.executable, '-c',
'import sys; from Cython.Compiler.Main '
'import setuptools_main as main;'
' sys.exit(main())'] + flags +
["-o", gen_file, cython_file])
if rc != 0:
raise Exception('Cythonizing %s failed' % cython_file)
except OSError:
raise OSError('Cython needs to be installed')
def load_hashes(filename):
"""Load the hashes dict from the hashfile"""
# { filename : (sha1 of header if available or 'NA',
# sha1 of input,
# sha1 of output) }
hashes = {}
try:
with open(filename, 'r') as cython_hash_file:
for hash_record in cython_hash_file:
(filename, header_hash,
cython_hash, gen_file_hash) = hash_record.split()
hashes[filename] = (header_hash, cython_hash, gen_file_hash)
except (KeyError, ValueError, AttributeError, IOError):
hashes = {}
return hashes
def save_hashes(hashes, filename):
"""Save the hashes dict to the hashfile"""
with open(filename, 'w') as cython_hash_file:
for key, value in hashes.items():
cython_hash_file.write("%s %s %s %s\n"
% (key, value[0], value[1], value[2]))
def sha1_of_file(filename):
h = hashlib.sha1()
with open(filename, "rb") as f:
h.update(f.read())
return h.hexdigest()
def clean_path(path):
"""Clean the path"""
path = path.replace(os.sep, '/')
if path.startswith('./'):
path = path[2:]
return path
def get_hash_tuple(header_path, cython_path, gen_file_path):
"""Get the hashes from the given files"""
header_hash = (sha1_of_file(header_path)
if os.path.exists(header_path) else 'NA')
from_hash = sha1_of_file(cython_path)
to_hash = (sha1_of_file(gen_file_path)
if os.path.exists(gen_file_path) else 'NA')
return header_hash, from_hash, to_hash
def cythonize_if_unchanged(path, cython_file, gen_file, hashes):
full_cython_path = os.path.join(path, cython_file)
full_header_path = full_cython_path.replace('.pyx', '.pxd')
full_gen_file_path = os.path.join(path, gen_file)
current_hash = get_hash_tuple(full_header_path, full_cython_path,
full_gen_file_path)
if current_hash == hashes.get(clean_path(full_cython_path)):
print('%s has not changed' % full_cython_path)
return
print('Processing %s' % full_cython_path)
cythonize(full_cython_path, full_gen_file_path)
# changed target file, recompute hash
current_hash = get_hash_tuple(full_header_path, full_cython_path,
full_gen_file_path)
# Update the hashes dict with the new hash
hashes[clean_path(full_cython_path)] = current_hash
def check_and_cythonize(root_dir):
print(root_dir)
hashes = load_hashes(HASH_FILE)
for cur_dir, dirs, files in os.walk(root_dir):
for filename in files:
if filename.endswith('.pyx'):
gen_file_ext = '.c'
# Cython files with libcpp imports should be compiled to cpp
with open(os.path.join(cur_dir, filename), 'rb') as f:
data = f.read()
m = re.search(b"libcpp", data, re.I | re.M)
if m:
gen_file_ext = ".cpp"
cython_file = filename
gen_file = filename.replace('.pyx', gen_file_ext)
cythonize_if_unchanged(cur_dir, cython_file, gen_file, hashes)
# Save hashes once per module. This prevents cythonizing prev.
# files again when debugging broken code in a single file
save_hashes(hashes, HASH_FILE)
def main(root_dir=DEFAULT_ROOT):
check_and_cythonize(root_dir)
if __name__ == '__main__':
try:
root_dir_arg = sys.argv[1]
except IndexError:
root_dir_arg = DEFAULT_ROOT
main(root_dir_arg)
|
bsd-3-clause
|
arengela/AngelaUCSFCodeAll
|
koepsell-phase-coupling-estimation-271441c/python/phasemodel/tests/test_plotlib.py
|
2
|
2277
|
# make sure phasemodel package is in path
import sys,os
cwd = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0,os.path.join(cwd,"..",".."))
import numpy as np
import phasemodel
import matplotlib.pyplot as plt
import nose
@nose.tools.nottest
def test_plot_phasedist():
# load test data
datadir = os.path.join(os.path.dirname(phasemodel.__file__),'tests','testdata')
mdict = np.load(os.path.join(datadir,'three_phases_v2.npz'))
for var in mdict.files: globals()[var] = mdict[var]
phasemodel.plotlib.plot_phasedist(data)
@nose.tools.nottest
def test_plot_joint_phasedist():
# load test data
datadir = os.path.join(os.path.dirname(phasemodel.__file__),'tests','testdata')
mdict = np.load(os.path.join(datadir,'three_phases_v2.npz'))
for var in mdict.files: globals()[var] = mdict[var]
phasemodel.plotlib.plot_joint_phasedist(data)
@nose.tools.nottest
def test_plot_graph():
# load test data
datadir = os.path.join(os.path.dirname(phasemodel.__file__),'tests','testdata')
mdict = np.load(os.path.join(datadir,'three_phases_v2.npz'))
for var in mdict.files: globals()[var] = mdict[var]
fig = plt.figure()
ax = fig.add_subplot(121)
phasemodel.plotlib.plot_graph(np.abs(K_true),start_angle=.5*np.pi,stop_angle=1.5*np.pi,endpoint=True,ax=ax)
ax = fig.add_subplot(122)
phasemodel.plotlib.plot_graph(np.abs(K_true),start_angle=0,stop_angle=2*np.pi,endpoint=False,ax=ax)
@nose.tools.nottest
def test_plot_matrix():
# load test data
datadir = os.path.join(os.path.dirname(phasemodel.__file__),'tests','testdata')
mdict = np.load(os.path.join(datadir,'three_phases_v2.npz'))
for var in mdict.files: globals()[var] = mdict[var]
fig = plt.figure()
ax = fig.add_subplot(111)
phasemodel.plotlib.plot_matrix(np.abs(K_true),ax=ax)
# color bar
from matplotlib import mpl
fig.subplots_adjust(bottom=.25)
ax = fig.add_axes([0.15, 0.15, 0.7, 0.05])
norm = mpl.colors.Normalize(vmin=0, vmax=np.abs(K_true).max())
mpl.colorbar.ColorbarBase(ax, cmap=plt.cm.Reds, norm=norm, orientation='horizontal')
if __name__ == "__main__":
test_plot_graph()
test_plot_phasedist()
test_plot_joint_phasedist()
test_plot_matrix()
plt.show()
|
bsd-3-clause
|
abimannans/scikit-learn
|
sklearn/neural_network/rbm.py
|
206
|
12292
|
"""Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <[email protected]>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hiddens. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='fortran')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
|
bsd-3-clause
|
ejolly/pymer4
|
pymer4/utils.py
|
1
|
22036
|
"""Utility functions"""
__all__ = [
"get_resource_path",
"_check_random_state",
"_sig_stars",
"_robust_estimator",
"_chunk_boot_ols_coefs",
"_chunk_perm_ols",
"_permute_sign",
"_ols",
"_ols_group",
"_corr_group",
"_perm_find",
"_mean_diff",
"_return_t",
"_get_params",
"_lrt",
"_df_meta_to_arr",
"_welch_ingredients",
"_to_ranks_by_group",
"isPSD",
"nearestPSD",
"upper",
"R2con",
"con2R",
]
__author__ = ["Eshin Jolly"]
__license__ = "MIT"
import os
import numpy as np
import pandas as pd
from patsy import dmatrices
from scipy.stats import chi2
from rpy2.robjects.packages import importr
from rpy2.robjects.conversion import localconverter
from rpy2.robjects import pandas2ri
import rpy2.robjects as robjects
base = importr("base")
MAX_INT = np.iinfo(np.int32).max
def get_resource_path():
"""Get path sample data directory."""
return os.path.join(os.path.dirname(__file__), "resources") + os.path.sep
def _mean_diff(x, y):
"""For use in plotting of tost_equivalence"""
return np.mean(x) - np.mean(y)
def _check_random_state(seed):
"""Turn seed into a np.random.RandomState instance. Note: credit for this code goes entirely to sklearn.utils.check_random_state. Using the source here simply avoids an unecessary dependency.
Args:
seed (None, int, np.RandomState): iff seed is None, return the RandomState singleton used by np.random. If seed is an int, return a new RandomState instance seeded with seed. If seed is already a RandomState instance, return it. Otherwise raise ValueError.
"""
import numbers
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState" " instance" % seed
)
def _sig_stars(val):
"""Adds sig stars to coef table prettier output."""
star = ""
if 0 <= val < 0.001:
star = "***"
elif 0.001 <= val < 0.01:
star = "**"
elif 0.01 <= val < 0.05:
star = "*"
elif 0.05 <= val < 0.1:
star = "."
return star
def _robust_estimator(vals, X, robust_estimator="hc1", n_lags=1, cluster=None):
"""
Computes robust sandwich estimators for standard errors used in OLS computation. Types include:
'hc0': Huber (1980) sandwich estimator to return robust standard error estimates.
'hc1': small sample dof correction to 'hc0'
'hc2': alternate small sample weighting correction to 'hc0'
'hc3': MacKinnon and White (1985) HC3 sandwich estimator. Provides more robustness in smaller samples than HC0 and HC1 Long & Ervin (2000)
'hac': Newey-West (1987) estimator for robustness to heteroscedasticity as well as serial auto-correlation at given lags.
Good reference: https://bit.ly/2VRb7jK
Args:
vals (np.ndarray): 1d array of residuals
X (np.ndarray): design matrix used in OLS
robust_estimator (str): estimator type, 'hc0' (default), 'hc3', 'hac', or 'cluster'
n_lags (int): number of lags, only used with 'hac' estimator, default is 1
cluster (np.ndarry): array of cluster ids
Returns:
stderr (np.ndarray): 1d array of standard errors with length == X.shape[1]
"""
assert robust_estimator in [
"hc0",
"hc1",
"hc2",
"hc3",
"hac",
"cluster",
], "robust_estimator must be one of hc0, hc1, hc2, hc3, hac, or cluster"
# Make a sandwich!
# First we need bread
bread = np.linalg.pinv(np.dot(X.T, X))
# Then we need meat
# First deal with estimators that have more complicated formulations
# Cluster robust
if robust_estimator == "cluster":
# Good ref: http://projects.iq.harvard.edu/files/gov2001/files/sesection_5.pdf
if cluster is None:
raise ValueError("data column identifying clusters must be provided")
else:
u = vals[:, np.newaxis] * X
u = pd.DataFrame(u)
# Use pandas groupby to get cluster-specific residuals
u["Group"] = cluster
u_clust = u.groupby("Group").sum()
num_grps = u["Group"].nunique()
meat = (
(num_grps / (num_grps - 1))
* (X.shape[0] / (X.shape[0] - X.shape[1]))
* u_clust.T.dot(u_clust)
)
# Auto-correlation robust
elif robust_estimator == "hac":
weights = 1 - np.arange(n_lags + 1.0) / (n_lags + 1.0)
# First compute lag 0
V = np.diag(vals ** 2)
meat = weights[0] * np.dot(np.dot(X.T, V), X)
# Now loop over additional lags
for j in range(1, n_lags + 1):
V = np.diag(vals[j:] * vals[:-j])
meat_1 = np.dot(np.dot(X[j:].T, V), X[:-j])
meat_2 = np.dot(np.dot(X[:-j].T, V), X[j:])
meat += weights[j] * (meat_1 + meat_2)
else:
# Otherwise deal with estimators that modify the same essential operation
V = np.diag(vals ** 2)
if robust_estimator == "hc0":
# No modification of residuals
pass
elif robust_estimator == "hc1":
# Degrees of freedom adjustment to HC0
V = V * X.shape[0] / (X.shape[0] - X.shape[1])
elif robust_estimator == "hc2":
# Rather than dof correction, weight residuals by reciprocal of "leverage values" in the hat-matrix
V = V / (1 - np.diag(np.dot(X, np.dot(bread, X.T))))
elif robust_estimator == "hc3":
# Same as hc2 but more aggressive weighting due to squaring
V = V / (1 - np.diag(np.dot(X, np.dot(bread, X.T)))) ** 2
meat = np.dot(np.dot(X.T, V), X)
# Finally we make a sandwich
vcv = np.dot(np.dot(bread, meat), bread)
return np.sqrt(np.diag(vcv))
def _whiten_wls(mat, weights):
"""
Whiten a matrix for a WLS regression. Just multiply each column of mat by sqrt(weights) if mat is 2d. Similar to statsmodels
Args:
x (np.ndarray): design matrix to be passed to _ols
weights (np.ndarray): 1d array of weights, most often variance of each group if some columns in x refer to categorical predictors
"""
if weights.shape[0] != mat.shape[0]:
raise ValueError(
"The number of weights must be the same as the number of observations"
)
if mat.ndim == 1:
return mat * np.sqrt(weights)
elif mat.ndim == 2:
# return np.column_stack([x[:,0], np.sqrt(weights)[:, None]*x[:,1:]])
return np.sqrt(weights)[:, None] * mat
def _ols(x, y, robust, n_lags, cluster, all_stats=True, resid_only=False, weights=None):
"""
Compute OLS on data. Useful for single computation and within permutation schemes.
"""
if all_stats and resid_only:
raise ValueError("_ols must be called with EITHER all_stats OR resid_only")
# Expects as input pandas series and dataframe
Y, X = y.values.squeeze(), x.values
# Whiten if required
if weights is not None:
if isinstance(weights, (pd.DataFrame, pd.Series)):
weights = weights.values
X = _whiten_wls(X, weights)
Y = _whiten_wls(Y, weights)
# The good stuff
b = np.dot(np.linalg.pinv(X), Y)
if all_stats:
res = Y - np.dot(X, b)
if robust:
se = _robust_estimator(
res, X, robust_estimator=robust, n_lags=n_lags, cluster=cluster
)
else:
sigma = np.sqrt(res.T.dot(res) / (X.shape[0] - X.shape[1]))
se = np.sqrt(np.diag(np.linalg.pinv(np.dot(X.T, X)))) * sigma
t = b / se
return b, se, t, res
elif resid_only:
return Y - np.dot(X, b)
else:
return b
def _chunk_perm_ols(x, y, robust, n_lags, cluster, weights, seed):
"""
Permuted OLS chunk.
"""
# Shuffle y labels
y = y.sample(frac=1, replace=False, random_state=seed)
_, _, t, _ = _ols(x, y, robust, n_lags, cluster, weights=weights, all_stats=True)
return list(t)
def _permute_sign(data, seed, return_stat="mean"):
"""Given a list/array of data, randomly sign flip the values and compute a new mean. For use in one-sample permutation test. Returns a 'mean' or 't-stat'."""
random_state = np.random.RandomState(seed)
new_dat = data * random_state.choice([1, -1], len(data))
if return_stat == "ceof":
return np.mean(new_dat)
elif return_stat == "t-stat":
return np.mean(new_dat) / (np.std(new_dat, ddof=1) / np.sqrt(len(new_dat)))
def _chunk_boot_ols_coefs(dat, formula, weights, seed):
"""
OLS computation of coefficients to be used in a parallelization context.
"""
# Random sample with replacement from all data
dat = dat.sample(frac=1, replace=True, random_state=seed)
y, x = dmatrices(formula, dat, 1, return_type="dataframe")
b = _ols(
x, y, robust=None, n_lags=1, cluster=None, all_stats=False, weights=weights
)
return list(b)
def _ols_group(dat, formula, group_col, group, rank):
"""Compute OLS on data given a formula. Used by Lm2"""
dat = dat[dat[group_col] == group].reset_index(drop=True)
if rank:
dat = dat.rank()
y, x = dmatrices(formula, dat, 1, return_type="dataframe")
b = _ols(x, y, robust=None, n_lags=1, cluster=None, all_stats=False)
return list(b)
def _corr_group(dat, formula, group_col, group, rank, corr_type):
"""Compute partial correlations via OLS. Used by Lm2"""
from scipy.stats import pearsonr
dat = dat[dat[group_col] == group].reset_index(drop=True)
if rank:
dat = dat.rank()
y, x = dmatrices(formula, dat, 1, return_type="dataframe")
corrs = []
for c in x.columns[1:]:
other_preds = [e for e in x.columns if e != c]
other_preds = x[other_preds]
cc = x[c]
pred_m_resid = _ols(
other_preds,
cc,
robust=None,
n_lags=1,
cluster=None,
all_stats=False,
resid_only=True,
)
if corr_type == "semi":
dv_m_resid = y.values.squeeze()
elif corr_type == "partial":
dv_m_resid = _ols(
other_preds,
y,
robust=None,
n_lags=1,
cluster=None,
all_stats=False,
resid_only=True,
)
corrs.append(pearsonr(dv_m_resid, pred_m_resid)[0])
return corrs
def _to_ranks_by_group(dat, group, formula, exclude_cols=[]):
"""
Covert predictors to ranks separately for each group for use in rank Lmer. Any columns not in the model formula or in exclude_cols will not be converted to ranks. Used by models.Lmer
Args:
dat (pd.DataFrame): dataframe of data
group (string): string name of column to group data on
formula (string): Lmer flavored model formula with random effects
exclude_cols (list): optional columns that are part of the formula to exclude from rank conversion.
Returns:
pandas.core.frame.DataFrame: ranked data
"""
if (not isinstance(group, str)) and (group not in dat.columns):
raise TypeError(
"group must be a valid column name in the dataframe. Currently only 1 grouping variable is supported."
)
if isinstance(exclude_cols, str):
exclude_cols = [exclude_cols]
original_col_order = list(dat.columns)
formula = formula.replace(" ", "")
to_rank = formula.split("~")[-1].split("(")[0].split("+")[:-1]
# add dv to be ranked
to_rank.append(formula.split("~")[0])
to_rank = [c for c in to_rank if c not in exclude_cols]
other_cols = [c for c in dat.columns if c not in to_rank]
dat = pd.concat(
[dat[other_cols], dat.groupby(group).apply(lambda g: g[to_rank].rank())], axis=1
)
return dat[original_col_order]
def _perm_find(arr, x):
"""
Find permutation cutoff in array. Two-tailed only
"""
return (np.sum(np.abs(arr) >= np.abs(x)) + 1) / (float(len(arr)) + 1)
def isPSD(mat, tol=1e-8):
"""
Check if matrix is positive-semi-definite by virtue of all its eigenvalues being >= 0. The cholesky decomposition does not work for edge cases because np.linalg.cholesky fails on matrices with exactly 0 valued eigenvalues, whereas in Matlab this is not true, so that method appropriate. Ref: https://goo.gl/qKWWzJ
Args:
mat (np.ndarray): 2d numpy array
Returns:
bool: whether matrix is postive-semi-definite
"""
# We dont assume matrix is Hermitian, i.e. real-valued and symmetric
# Could swap this out with np.linalg.eigvalsh(), which is faster but less general
e = np.linalg.eigvals(mat)
return np.all(e > -tol)
def nearestPSD(mat, nit=100):
"""
Higham (2000) algorithm to find the nearest positive semi-definite matrix that minimizes the Frobenius distance/norm. Statsmodels using something very similar in corr_nearest(), but with spectral SGD to search for a local minima. Reference: https://goo.gl/Eut7UU
Args:
mat (np.ndarray): 2d numpy array
nit (int): number of iterations to run algorithm; more iterations improves accuracy but increases computation time.
Returns:
np.ndarray: closest positive-semi-definite 2d numpy array
"""
n = mat.shape[0]
W = np.identity(n)
def _getAplus(mat):
eigval, eigvec = np.linalg.eig(mat)
Q = np.matrix(eigvec)
xdiag = np.matrix(np.diag(np.maximum(eigval, 0)))
return Q * xdiag * Q.T
def _getPs(mat, W=None):
W05 = np.matrix(W ** 0.5)
return W05.I * _getAplus(W05 * mat * W05) * W05.I
def _getPu(mat, W=None):
Aret = np.array(mat.copy())
Aret[W > 0] = np.array(W)[W > 0]
return np.matrix(Aret)
# W is the matrix used for the norm (assumed to be Identity matrix here)
# the algorithm should work for any diagonal W
deltaS = 0
Yk = mat.copy()
for _ in range(nit):
Rk = Yk - deltaS
Xk = _getPs(Rk, W=W)
deltaS = Xk - Rk
Yk = _getPu(Xk, W=W)
# Double check returned matrix is PSD
if isPSD(Yk):
return Yk
else:
nearestPSD(Yk)
def upper(mat):
"""
Return upper triangle of matrix. Useful for grabbing unique values from a symmetric matrix.
Args:
mat (np.ndarray): 2d numpy array
Returns:
np.array: 1d numpy array of values
"""
idx = np.triu_indices_from(mat, k=1)
return mat[idx]
def _return_t(model):
"""Return t or z stat from R model summary."""
summary = base.summary(model)
unsum = base.unclass(summary)
return unsum.rx2("coefficients")[:, -1]
def _get_params(model):
"""Get number of params in a model."""
return model.coefs.shape[0]
def _lrt(tup):
"""Likelihood ratio test between 2 models. Used by stats.lrt"""
d = np.abs(2 * (tup[0].logLike - tup[1].logLike))
return chi2.sf(d, np.abs(tup[0].coefs.shape[0] - tup[1].coefs.shape[0]))
def _welch_ingredients(x):
"""
Helper function to compute the numerator and denominator for a single group/array for use in Welch's degrees of freedom calculation. Used by stats.welch_dof
"""
numerator = x.var(ddof=1) / x.size
denominator = np.power(x.var(ddof=1) / x.size, 2) / (x.size - 1)
return [numerator, denominator]
def con2R(arr, names=None):
"""
Convert human-readable contrasts into a form that R requires. Works like the make.contrasts() function from the gmodels package, in that it will auto-solve for the remaining orthogonal k-1 contrasts if fewer than k-1 contrasts are specified.
Arguments:
arr (np.ndarray): 1d or 2d numpy array with each row reflecting a unique contrast and each column a factor level
names (list/np.ndarray): optional list of contrast names which will cast the return object as a dataframe
Returns:
A 2d numpy array or dataframe useable with the contrasts argument of glmer
"""
if isinstance(arr, list):
arr = np.array(arr)
if arr.ndim < 2:
arr = np.atleast_2d(arr)
elif arr.ndim > 2:
raise ValueError(
f"input array should be 1d or 2d but a {arr.ndim}d array was passed"
)
nrow, ncol = arr.shape[0], arr.shape[1]
if names is not None:
if not isinstance(names, (list, np.ndarray)):
raise TypeError("names should be a list or numpy array")
elif len(names) != nrow:
raise ValueError(
"names should have the same number of items as contrasts (rows)"
)
# At most k-1 contrasts are possible
if nrow >= ncol:
raise ValueError(
f"Too many contrasts requested ({nrow}). Must be less than the number of factor levels ({ncol})."
)
# Pseudo-invert request contrasts
value = np.linalg.pinv(arr)
v_nrow, v_ncol = value.shape[0], value.shape[1]
# Upper triangle of R is the same as result from qr() in R
Q, R = np.linalg.qr(np.column_stack([np.ones((v_nrow, 1)), value]), mode="complete")
if np.linalg.matrix_rank(R) != v_ncol + 1:
raise ValueError(
"Singular contrast matrix. Some of the requested contrasts are perfectly co-linear."
)
cm = Q[:, 1:ncol]
cm[:, :v_ncol] = value
if names is not None:
cm = pd.DataFrame(cm, columns=names)
return cm
def R2con(arr):
"""
Convert R-flavored contrast matrix to intepretable contrasts as would be specified by user. Reference: https://goo.gl/E4Mms2
Args:
arr (np.ndarry): 2d contrast matrix output from R's contrasts() function.
Returns:
np.ndarray: 2d array organized as contrasts X factor levels
"""
intercept = np.ones((arr.shape[0], 1))
mat = np.column_stack([intercept, arr])
inv = np.linalg.inv(mat)
return inv
def _df_meta_to_arr(df):
"""Check what kind of data exists in pandas columns or index. If string return as numpy array 'S' type, otherwise regular numpy array."""
if len(df.columns):
if isinstance(df.columns[0], str):
columns = df.columns.values.astype("S")
else:
columns = df.columns.values
else:
columns = []
if len(df.index):
if isinstance(df.index[0], str):
index = df.index.values.astype("S")
else:
index = df.index.values
else:
index = []
return columns, index
def pandas2R(df):
"""Local conversion of pandas dataframe to R dataframe as recommended by rpy2"""
with localconverter(robjects.default_converter + pandas2ri.converter):
data = robjects.conversion.py2rpy(df)
return data
def result_to_table(
model,
drop_intercept=True,
iv_name="Predictor",
round=True,
pval_text="< .001",
pval_thresh=0.001,
):
"""
Nicely format the `.coefs` attribute of a fitted model. The intended use of this function is to nicely format the `.coefs` of a fitted model such that the resultant dataframe can be copied outside of python/jupyter or saved to another file (e.g. googlesheet). It's particularly well suited for use with `gspread_pandas`.
Args:
model (pymer.model): pymer4 model object that's already been fit
drop_intercept (bool, optional): remove the model intercept results from the table; Default True
iv_name (str, optional): column name of the model's independent variables. Defaults to "Predictor".
round (bool, optional): round all numeric values to 3 decimal places. Defaults to True.
pval_text (str, optional): what to replace p-values with when they are < pval_thres. Defaults to "< .001".
pval_thresh (float, optional): threshold to replace p-values with. Primarily intended to be used for very small p-values (e.g. .0001), where the tradition is to display '< .001' instead of the exact p-values. Defaults to 0.001.
Returns:
pd.DataFrame: formatted dataframe of results
Example:
Send model results to a google sheet, assuming `model.fit()` has already been called:
>>> from gspread_pandas import Spread
>>> spread = Spread('My_Results_Sheet')
>>> formatted_results = result_to_table(model)
>>> spread.df_to_sheet(formatted_results, replace=True, index=False)
Now 'My_Results_Sheet' will have a copy of `formatted_results` which can be copy and pasted into a google doc as a nice auto-updating table. On new model fits, simple repeat the steps above to replace the values in the google sheet, thus triggering an update of the linked table in a google doc.
"""
if not model.fitted:
raise ValueError("model must be fit to format results")
results = model.coefs.copy()
if round:
results = results.round(3)
if drop_intercept:
if "(Intercept)" in results.index:
results = results.drop(index=["(Intercept)"])
elif "Intercept" in results.index:
results = results.drop(index=["Intercept"])
results = (
results.drop(columns=["Sig"])
.reset_index()
.assign(
ci=lambda df: df[["2.5_ci", "97.5_ci"]].apply(
lambda row: f"({' '.join(row.values.astype(str))})", axis=1
),
p=lambda df: df["P-val"].apply(
lambda val: pval_text if val < pval_thresh else str(val)
),
)
.drop(columns=["2.5_ci", "97.5_ci", "SE", "P-val"])
.rename(
columns={
"index": iv_name,
"Estimate": "b",
"T-stat": "t",
"DF": "df",
}
)
.reindex(columns=[iv_name, "b", "ci", "t", "df", "p"])
)
return results
|
mit
|
arthurmensch/modl
|
modl/datasets/adhd.py
|
1
|
1645
|
from os.path import join
from modl.datasets import get_data_dirs
from nilearn.datasets.utils import _fetch_file
from sklearn.datasets.base import Bunch
from nilearn.datasets import fetch_adhd as nilearn_fetch_adhd
import pandas as pd
import os
def fetch_adhd(n_subjects=40, data_dir=None,
url=None, resume=True,
modl_data_dir=None,
mask_url=None,
verbose=1):
dataset = nilearn_fetch_adhd(n_subjects=n_subjects,
data_dir=data_dir, url=url, resume=resume,
verbose=verbose)
root_dir = dataset.func[0]
tail_dir = ''
while tail_dir != 'adhd':
root_dir, tail_dir = os.path.split(root_dir)
root_dir = os.path.join(root_dir, tail_dir)
modl_data_dir = get_data_dirs(modl_data_dir)[0]
mask_data_dir = join(modl_data_dir, 'adhd')
if mask_url is None:
mask_url = 'http://amensch.fr/data/cogspaces/mask/mask_img.nii.gz'
_fetch_file(mask_url, mask_data_dir, resume=resume)
mask_img = join(mask_data_dir, 'mask_img.nii.gz')
behavioral = pd.DataFrame(dataset.phenotypic)
behavioral.loc[:, 'Subject'] = pd.to_numeric(behavioral.loc[:, 'Subject'])
behavioral.set_index('Subject', inplace=True)
behavioral.index.names = ['subject']
rest = pd.DataFrame(data=list(zip(dataset.func, dataset.confounds)),
columns=['filename', 'confounds'],
index=behavioral.index)
return Bunch(rest=rest,
behavioral=behavioral, description=dataset.description,
mask=mask_img, root=root_dir)
|
bsd-2-clause
|
shahankhatch/scikit-learn
|
examples/linear_model/plot_ols.py
|
220
|
1940
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
simon-pepin/scikit-learn
|
sklearn/utils/tests/test_testing.py
|
144
|
4121
|
import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.lda import LDA
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LDA()
tree = DecisionTreeClassifier()
# LDA doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
|
bsd-3-clause
|
sridhar912/Self-Driving-Car-NanoDegree
|
CarND-Advanced-Lane-Lines/LaneDetection.py
|
1
|
6408
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
from skimage.exposure import adjust_gamma
NORMAL = 1
DARK = 2
BRIGHT = 3
class LaneDetection():
def __init__(self, cameraInfo, prespectiveInfo):
self.edge_bird_view = None
self.edge_front_view = None
self.img_undist = None
self.img_undist_warp = None
self.cameraInfo = cameraInfo
self.prespectiveInfo = prespectiveInfo
self.mtx, self.dist = cameraInfo.get_camera_parameters()
self.mtx_perp, self.mtx_perp_inv = prespectiveInfo.get_prespective_parameters()
# 1--> normal 2--> Dark 3-->bright
self.condition = NORMAL
def nonZeroCount(self, img, offset):
return cv2.countNonZero(img[offset:, :])
def check_saturation(self, white_lane, yellow_lane, white_lane_warp, yellow_lane_warp, offset=480, thresh=(500, 20000)):
count_wl = self.nonZeroCount(white_lane, offset)
count_wlw = self.nonZeroCount(white_lane_warp, offset)
count_yl = self.nonZeroCount(yellow_lane, offset)
count_ylw = self.nonZeroCount(yellow_lane_warp, offset)
if (count_wl < thresh[1] and count_wlw < thresh[1]):
if (count_wl < thresh[0] and count_wlw < thresh[0]) or (count_yl < thresh[0] and count_ylw < thresh[0]) or (
count_yl > thresh[1] or count_ylw > thresh[1]):
return DARK
else:
return NORMAL
else:
return BRIGHT
def extract_color_info(self, img, threshL=(210, 250), threshB=(200, 250)):
lab = cv2.cvtColor(img, cv2.COLOR_RGB2LAB).astype(np.float)
channelL, channelA, channelB = cv2.split(lab)
channelL_norm = np.uint8(255 * channelL / np.max(channelL))
white_lane = cv2.inRange(channelL_norm, threshL[0], threshL[1])
channelB_norm = np.uint8(255 * channelB / np.max(channelB))
yellow_lane = cv2.inRange(channelB_norm, threshB[0], threshB[1])
#plt.imshow(channelL_norm)
#plt.show()
return white_lane, yellow_lane
def extract_sobel_edge(self,img):
sobel = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=3))
scaled_sobel = np.uint8(255 * sobel / np.max(sobel))
sobel_output = np.zeros_like(scaled_sobel)
sobel_output[(scaled_sobel >= 20) & (scaled_sobel <= 200)] = 255
return sobel_output
def extract_lane_information_diff_condition(self, img, condition):
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY).astype(np.float)
if condition == 2:
gray_norm = adjust_gamma(gray, 0.4)
else:
gray_norm = adjust_gamma(gray, 5)
#gray_norm = np.uint8(255 * (gray) / np.max(gray))
sobelx = np.absolute(cv2.Sobel(gray_norm, cv2.CV_64F, 1, 0, ksize=15))
sobely = np.absolute(cv2.Sobel(gray_norm, cv2.CV_64F, 0, 1, ksize=15))
scaled_sobelx = np.uint8(255 * sobelx / np.max(sobelx))
binary_outputx = np.zeros_like(scaled_sobelx)
binary_outputx[(scaled_sobelx >= 20) & (scaled_sobelx <= 200)] = 1
scaled_sobely = np.uint8(255 * sobely / np.max(sobely))
binary_outputy = np.zeros_like(scaled_sobely)
binary_outputy[(scaled_sobely >= 20) & (scaled_sobely <= 200)] = 1
# show_images(binary_outputx,binary_outputy)
absgraddir = np.arctan2((binary_outputy), (binary_outputx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= 0.7) & (absgraddir <= 0.8)] = 1
lanes_front_view = np.uint8(255 * binary_output / np.max(binary_output))
lanes_bird_view = self.prespectiveInfo.warp_image(lanes_front_view)
return lanes_front_view, lanes_bird_view
def extract_lane_information(self, img, useEdge = True, show_images = False):
img_undist = self.cameraInfo.undistort_image(img)
img_undist_warp = self.prespectiveInfo.warp_image(img_undist)
white_lane, yellow_lane = self.extract_color_info(img_undist)
color_lane = cv2.bitwise_or(white_lane, yellow_lane)
color_lane_warped = self.prespectiveInfo.warp_image(color_lane)
white_lane_warp, yellow_lane_warp = self.extract_color_info(img_undist_warp)
color_lane_warp = cv2.bitwise_or(white_lane_warp, yellow_lane_warp)
lanes_bird_view = cv2.bitwise_or(color_lane_warp, color_lane_warped)
lanes_front_view = self.prespectiveInfo.warp_image(lanes_bird_view,inverse=True)
condition = self.check_saturation(white_lane, yellow_lane, white_lane_warp, yellow_lane_warp)
if condition != 1:
# Currently not used
#print()
lanes_front_view, lanes_bird_view = self.extract_lane_information_diff_condition(img_undist, condition)
if useEdge:
edge_front_view = self.extract_sobel_edge(lanes_front_view)
edge_bird_view = self.extract_sobel_edge(lanes_bird_view)
self.edge_bird_view = edge_bird_view
self.edge_front_view = edge_front_view
else:
self.edge_bird_view = lanes_bird_view
self.edge_front_view = lanes_front_view
self.img_undist = img_undist
if show_images:
self.show_output(img_undist,white_lane,yellow_lane)
self.show_output(img_undist_warp,white_lane_warp, yellow_lane_warp)
self.show_output(img_undist,self.edge_front_view,self.edge_bird_view,'Input','Combined Front View','Combined BirdEye View')
self.img_undist = img_undist
self.img_undist_warp = img_undist_warp
self.condition = condition
def show_output(self, img1, img2, img3, t1 = 'Input', t2 = 'White Lane', t3 = 'Yellow Lane'):
"""
Show orginal and undistorted images
:param org: The original image
:param undist: The undistorted image
:return:
"""
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(20, 10))
ax1.imshow(img1)
ax1.set_title(t1, fontsize=20)
ax2.imshow(img2, cmap ='gray')
ax2.set_title(t2, fontsize=20)
ax3.imshow(img3, cmap='gray')
ax3.set_title(t3, fontsize=20)
plt.show()
def get_undistored_image(self):
return self.img_undist
def get_warped_image(self):
return self.img_undist_warp
def get_lane_output(self):
return self.edge_front_view, self.edge_bird_view
|
mit
|
zhenv5/scikit-learn
|
sklearn/cluster/tests/test_k_means.py
|
63
|
26190
|
"""Testing for K-means"""
import sys
import numpy as np
from scipy import sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.extmath import row_norms
from sklearn.metrics.cluster import v_measure_score
from sklearn.cluster import KMeans, k_means
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster.k_means_ import _labels_inertia
from sklearn.cluster.k_means_ import _mini_batch_step
from sklearn.datasets.samples_generator import make_blobs
from sklearn.externals.six.moves import cStringIO as StringIO
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 100
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
X_csr = sp.csr_matrix(X)
def test_kmeans_dtype():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
X = (X * 10).astype(np.uint8)
km = KMeans(n_init=1).fit(X)
pred_x = assert_warns(DataConversionWarning, km.predict, X)
assert_array_equal(km.labels_, pred_x)
def test_labels_assignment_and_inertia():
# pure numpy implementation as easily auditable reference gold
# implementation
rng = np.random.RandomState(42)
noisy_centers = centers + rng.normal(size=centers.shape)
labels_gold = - np.ones(n_samples, dtype=np.int)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(n_clusters):
dist = np.sum((X - noisy_centers[center_id]) ** 2, axis=1)
labels_gold[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
inertia_gold = mindist.sum()
assert_true((mindist >= 0.0).all())
assert_true((labels_gold != -1).all())
# perform label assignment using the dense array input
x_squared_norms = (X ** 2).sum(axis=1)
labels_array, inertia_array = _labels_inertia(
X, x_squared_norms, noisy_centers)
assert_array_almost_equal(inertia_array, inertia_gold)
assert_array_equal(labels_array, labels_gold)
# perform label assignment using the sparse CSR input
x_squared_norms_from_csr = row_norms(X_csr, squared=True)
labels_csr, inertia_csr = _labels_inertia(
X_csr, x_squared_norms_from_csr, noisy_centers)
assert_array_almost_equal(inertia_csr, inertia_gold)
assert_array_equal(labels_csr, labels_gold)
def test_minibatch_update_consistency():
# Check that dense and sparse minibatch update give the same results
rng = np.random.RandomState(42)
old_centers = centers + rng.normal(size=centers.shape)
new_centers = old_centers.copy()
new_centers_csr = old_centers.copy()
counts = np.zeros(new_centers.shape[0], dtype=np.int32)
counts_csr = np.zeros(new_centers.shape[0], dtype=np.int32)
x_squared_norms = (X ** 2).sum(axis=1)
x_squared_norms_csr = row_norms(X_csr, squared=True)
buffer = np.zeros(centers.shape[1], dtype=np.double)
buffer_csr = np.zeros(centers.shape[1], dtype=np.double)
# extract a small minibatch
X_mb = X[:10]
X_mb_csr = X_csr[:10]
x_mb_squared_norms = x_squared_norms[:10]
x_mb_squared_norms_csr = x_squared_norms_csr[:10]
# step 1: compute the dense minibatch update
old_inertia, incremental_diff = _mini_batch_step(
X_mb, x_mb_squared_norms, new_centers, counts,
buffer, 1, None, random_reassign=False)
assert_greater(old_inertia, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels, new_inertia = _labels_inertia(
X_mb, x_mb_squared_norms, new_centers)
assert_greater(new_inertia, 0.0)
assert_less(new_inertia, old_inertia)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers - old_centers) ** 2)
assert_almost_equal(incremental_diff, effective_diff)
# step 2: compute the sparse minibatch update
old_inertia_csr, incremental_diff_csr = _mini_batch_step(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr, counts_csr,
buffer_csr, 1, None, random_reassign=False)
assert_greater(old_inertia_csr, 0.0)
# compute the new inertia on the same batch to check that it decreased
labels_csr, new_inertia_csr = _labels_inertia(
X_mb_csr, x_mb_squared_norms_csr, new_centers_csr)
assert_greater(new_inertia_csr, 0.0)
assert_less(new_inertia_csr, old_inertia_csr)
# check that the incremental difference computation is matching the
# final observed value
effective_diff = np.sum((new_centers_csr - old_centers) ** 2)
assert_almost_equal(incremental_diff_csr, effective_diff)
# step 3: check that sparse and dense updates lead to the same results
assert_array_equal(labels, labels_csr)
assert_array_almost_equal(new_centers, new_centers_csr)
assert_almost_equal(incremental_diff, incremental_diff_csr)
assert_almost_equal(old_inertia, old_inertia_csr)
assert_almost_equal(new_inertia, new_inertia_csr)
def _check_fitted_model(km):
# check that the number of clusters centers and distinct labels match
# the expectation
centers = km.cluster_centers_
assert_equal(centers.shape, (n_clusters, n_features))
labels = km.labels_
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(km.inertia_, 0.0)
# check error on dataset being too small
assert_raises(ValueError, km.fit, [[0., 1.]])
def test_k_means_plus_plus_init():
km = KMeans(init="k-means++", n_clusters=n_clusters,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_new_centers():
# Explore the part of the code where a new center is reassigned
X = np.array([[0, 0, 1, 1],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0]])
labels = [0, 1, 2, 1, 1, 2]
bad_centers = np.array([[+0, 1, 0, 0],
[.2, 0, .2, .2],
[+0, 0, 0, 0]])
km = KMeans(n_clusters=3, init=bad_centers, n_init=1, max_iter=10,
random_state=1)
for this_X in (X, sp.coo_matrix(X)):
km.fit(this_X)
this_labels = km.labels_
# Reorder the labels so that the first instance is in cluster 0,
# the second in cluster 1, ...
this_labels = np.unique(this_labels, return_index=True)[1][this_labels]
np.testing.assert_array_equal(this_labels, labels)
@if_safe_multiprocessing_with_blas
def test_k_means_plus_plus_init_2_jobs():
if sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
km = KMeans(init="k-means++", n_clusters=n_clusters, n_jobs=2,
random_state=42).fit(X)
_check_fitted_model(km)
def test_k_means_precompute_distances_flag():
# check that a warning is raised if the precompute_distances flag is not
# supported
km = KMeans(precompute_distances="wrong")
assert_raises(ValueError, km.fit, X)
def test_k_means_plus_plus_init_sparse():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_random_init():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X)
_check_fitted_model(km)
def test_k_means_random_init_sparse():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42)
km.fit(X_csr)
_check_fitted_model(km)
def test_k_means_plus_plus_init_not_precomputed():
km = KMeans(init="k-means++", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_random_init_not_precomputed():
km = KMeans(init="random", n_clusters=n_clusters, random_state=42,
precompute_distances=False).fit(X)
_check_fitted_model(km)
def test_k_means_perfect_init():
km = KMeans(init=centers.copy(), n_clusters=n_clusters, random_state=42,
n_init=1)
km.fit(X)
_check_fitted_model(km)
def test_k_means_n_init():
rnd = np.random.RandomState(0)
X = rnd.normal(size=(40, 2))
# two regression tests on bad n_init argument
# previous bug: n_init <= 0 threw non-informative TypeError (#3858)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=0).fit, X)
assert_raises_regexp(ValueError, "n_init", KMeans(n_init=-1).fit, X)
def test_k_means_fortran_aligned_data():
# Check the KMeans will work well, even if X is a fortran-aligned data.
X = np.asfortranarray([[0, 0], [0, 1], [0, 1]])
centers = np.array([[0, 0], [0, 1]])
labels = np.array([0, 1, 1])
km = KMeans(n_init=1, init=centers, precompute_distances=False,
random_state=42)
km.fit(X)
assert_array_equal(km.cluster_centers_, centers)
assert_array_equal(km.labels_, labels)
def test_mb_k_means_plus_plus_init_dense_array():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X)
_check_fitted_model(mb_k_means)
def test_mb_kmeans_verbose():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
mb_k_means.fit(X)
finally:
sys.stdout = old_stdout
def test_mb_k_means_plus_plus_init_sparse_matrix():
mb_k_means = MiniBatchKMeans(init="k-means++", n_clusters=n_clusters,
random_state=42)
mb_k_means.fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_init_with_large_k():
mb_k_means = MiniBatchKMeans(init='k-means++', init_size=10, n_clusters=20)
# Check that a warning is raised, as the number clusters is larger
# than the init_size
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_random_init_dense_array():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_random_init_sparse_csr():
# increase n_init to make random init stable enough
mb_k_means = MiniBatchKMeans(init="random", n_clusters=n_clusters,
random_state=42, n_init=10).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_perfect_init_dense_array():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_k_means_init_multiple_runs_with_explicit_centers():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=10)
assert_warns(RuntimeWarning, mb_k_means.fit, X)
def test_minibatch_k_means_perfect_init_sparse_csr():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
random_state=42, n_init=1).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_minibatch_sensible_reassign_fit():
# check if identical initial clusters are reassigned
# also a regression test for when there are more desired reassignments than
# samples.
zeroed_X, true_labels = make_blobs(n_samples=100, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=10, random_state=42,
init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
# do the same with batch-size > X.shape[0] (regression test)
mb_k_means = MiniBatchKMeans(n_clusters=20, batch_size=201,
random_state=42, init="random")
mb_k_means.fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_sensible_reassign_partial_fit():
zeroed_X, true_labels = make_blobs(n_samples=n_samples, centers=5,
cluster_std=1., random_state=42)
zeroed_X[::2, :] = 0
mb_k_means = MiniBatchKMeans(n_clusters=20, random_state=42, init="random")
for i in range(100):
mb_k_means.partial_fit(zeroed_X)
# there should not be too many exact zero cluster centers
assert_greater(mb_k_means.cluster_centers_.any(axis=1).sum(), 10)
def test_minibatch_reassign():
# Give a perfect initialization, but a large reassignment_ratio,
# as a result all the centers should be reassigned and the model
# should not longer be good
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
random_state=42)
mb_k_means.fit(this_X)
score_before = mb_k_means.score(this_X)
try:
old_stdout = sys.stdout
sys.stdout = StringIO()
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1, verbose=True)
finally:
sys.stdout = old_stdout
assert_greater(score_before, mb_k_means.score(this_X))
# Give a perfect initialization, with a small reassignment_ratio,
# no center should be reassigned
for this_X in (X, X_csr):
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=100,
init=centers.copy(),
random_state=42, n_init=1)
mb_k_means.fit(this_X)
clusters_before = mb_k_means.cluster_centers_
# Turn on verbosity to smoke test the display code
_mini_batch_step(this_X, (X ** 2).sum(axis=1),
mb_k_means.cluster_centers_,
mb_k_means.counts_,
np.zeros(X.shape[1], np.double),
False, distances=np.zeros(X.shape[0]),
random_reassign=True, random_state=42,
reassignment_ratio=1e-15)
assert_array_almost_equal(clusters_before, mb_k_means.cluster_centers_)
def test_minibatch_with_many_reassignments():
# Test for the case that the number of clusters to reassign is bigger
# than the batch_size
n_samples = 550
rnd = np.random.RandomState(42)
X = rnd.uniform(size=(n_samples, 10))
# Check that the fit works if n_clusters is bigger than the batch_size.
# Run the test with 550 clusters and 550 samples, because it turned out
# that this values ensure that the number of clusters to reassign
# is always bigger than the batch_size
n_clusters = 550
MiniBatchKMeans(n_clusters=n_clusters,
batch_size=100,
init_size=n_samples,
random_state=42).fit(X)
def test_sparse_mb_k_means_callable_init():
def test_init(X, k, random_state):
return centers
# Small test to check that giving the wrong number of centers
# raises a meaningful error
assert_raises(ValueError,
MiniBatchKMeans(init=test_init, random_state=42).fit, X_csr)
# Now check that the fit actually works
mb_k_means = MiniBatchKMeans(n_clusters=3, init=test_init,
random_state=42).fit(X_csr)
_check_fitted_model(mb_k_means)
def test_mini_batch_k_means_random_init_partial_fit():
km = MiniBatchKMeans(n_clusters=n_clusters, init="random", random_state=42)
# use the partial_fit API for online learning
for X_minibatch in np.array_split(X, 10):
km.partial_fit(X_minibatch)
# compute the labeling on the complete dataset
labels = km.predict(X)
assert_equal(v_measure_score(true_labels, labels), 1.0)
def test_minibatch_default_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
batch_size=10, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size_, 3 * mb_k_means.batch_size)
_check_fitted_model(mb_k_means)
def test_minibatch_tol():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, batch_size=10,
random_state=42, tol=.01).fit(X)
_check_fitted_model(mb_k_means)
def test_minibatch_set_init_size():
mb_k_means = MiniBatchKMeans(init=centers.copy(), n_clusters=n_clusters,
init_size=666, random_state=42,
n_init=1).fit(X)
assert_equal(mb_k_means.init_size, 666)
assert_equal(mb_k_means.init_size_, n_samples)
_check_fitted_model(mb_k_means)
def test_k_means_invalid_init():
km = KMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_mini_match_k_means_invalid_init():
km = MiniBatchKMeans(init="invalid", n_init=1, n_clusters=n_clusters)
assert_raises(ValueError, km.fit, X)
def test_k_means_copyx():
# Check if copy_x=False returns nearly equal X after de-centering.
my_X = X.copy()
km = KMeans(copy_x=False, n_clusters=n_clusters, random_state=42)
km.fit(my_X)
_check_fitted_model(km)
# check if my_X is centered
assert_array_almost_equal(my_X, X)
def test_k_means_non_collapsed():
# Check k_means with a bad initialization does not yield a singleton
# Starting with bad centers that are quickly ignored should not
# result in a repositioning of the centers to the center of mass that
# would lead to collapsed centers which in turns make the clustering
# dependent of the numerical unstabilities.
my_X = np.array([[1.1, 1.1], [0.9, 1.1], [1.1, 0.9], [0.9, 1.1]])
array_init = np.array([[1.0, 1.0], [5.0, 5.0], [-5.0, -5.0]])
km = KMeans(init=array_init, n_clusters=3, random_state=42, n_init=1)
km.fit(my_X)
# centers must not been collapsed
assert_equal(len(np.unique(km.labels_)), 3)
centers = km.cluster_centers_
assert_true(np.linalg.norm(centers[0] - centers[1]) >= 0.1)
assert_true(np.linalg.norm(centers[0] - centers[2]) >= 0.1)
assert_true(np.linalg.norm(centers[1] - centers[2]) >= 0.1)
def test_predict():
km = KMeans(n_clusters=n_clusters, random_state=42)
km.fit(X)
# sanity check: predict centroid labels
pred = km.predict(km.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = km.predict(X)
assert_array_equal(pred, km.labels_)
# re-predict labels for training set using fit_predict
pred = km.fit_predict(X)
assert_array_equal(pred, km.labels_)
def test_score():
km1 = KMeans(n_clusters=n_clusters, max_iter=1, random_state=42)
s1 = km1.fit(X).score(X)
km2 = KMeans(n_clusters=n_clusters, max_iter=10, random_state=42)
s2 = km2.fit(X).score(X)
assert_greater(s2, s1)
def test_predict_minibatch_dense_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, random_state=40).fit(X)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# sanity check: re-predict labeling for training set samples
pred = mb_k_means.predict(X)
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_kmeanspp_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='k-means++',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_predict_minibatch_random_init_sparse_input():
mb_k_means = MiniBatchKMeans(n_clusters=n_clusters, init='random',
n_init=10).fit(X_csr)
# sanity check: re-predict labeling for training set samples
assert_array_equal(mb_k_means.predict(X_csr), mb_k_means.labels_)
# sanity check: predict centroid labels
pred = mb_k_means.predict(mb_k_means.cluster_centers_)
assert_array_equal(pred, np.arange(n_clusters))
# check that models trained on sparse input also works for dense input at
# predict time
assert_array_equal(mb_k_means.predict(X), mb_k_means.labels_)
def test_input_dtypes():
X_list = [[0, 0], [10, 10], [12, 9], [-1, 1], [2, 0], [8, 10]]
X_int = np.array(X_list, dtype=np.int32)
X_int_csr = sp.csr_matrix(X_int)
init_int = X_int[:2]
fitted_models = [
KMeans(n_clusters=2).fit(X_list),
KMeans(n_clusters=2).fit(X_int),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_list),
KMeans(n_clusters=2, init=init_int, n_init=1).fit(X_int),
# mini batch kmeans is very unstable on such a small dataset hence
# we use many inits
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_list),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int),
MiniBatchKMeans(n_clusters=2, n_init=10, batch_size=2).fit(X_int_csr),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_list),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int),
MiniBatchKMeans(n_clusters=2, batch_size=2,
init=init_int, n_init=1).fit(X_int_csr),
]
expected_labels = [0, 1, 1, 0, 0, 1]
scores = np.array([v_measure_score(expected_labels, km.labels_)
for km in fitted_models])
assert_array_equal(scores, np.ones(scores.shape[0]))
def test_transform():
km = KMeans(n_clusters=n_clusters)
km.fit(X)
X_new = km.transform(km.cluster_centers_)
for c in range(n_clusters):
assert_equal(X_new[c, c], 0)
for c2 in range(n_clusters):
if c != c2:
assert_greater(X_new[c, c2], 0)
def test_fit_transform():
X1 = KMeans(n_clusters=3, random_state=51).fit(X).transform(X)
X2 = KMeans(n_clusters=3, random_state=51).fit_transform(X)
assert_array_equal(X1, X2)
def test_n_init():
# Check that increasing the number of init increases the quality
n_runs = 5
n_init_range = [1, 5, 10]
inertia = np.zeros((len(n_init_range), n_runs))
for i, n_init in enumerate(n_init_range):
for j in range(n_runs):
km = KMeans(n_clusters=n_clusters, init="random", n_init=n_init,
random_state=j).fit(X)
inertia[i, j] = km.inertia_
inertia = inertia.mean(axis=1)
failure_msg = ("Inertia %r should be decreasing"
" when n_init is increasing.") % list(inertia)
for i in range(len(n_init_range) - 1):
assert_true(inertia[i] >= inertia[i + 1], failure_msg)
def test_k_means_function():
# test calling the k_means function directly
# catch output
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
cluster_centers, labels, inertia = k_means(X, n_clusters=n_clusters,
verbose=True)
finally:
sys.stdout = old_stdout
centers = cluster_centers
assert_equal(centers.shape, (n_clusters, n_features))
labels = labels
assert_equal(np.unique(labels).shape[0], n_clusters)
# check that the labels assignment are perfect (up to a permutation)
assert_equal(v_measure_score(true_labels, labels), 1.0)
assert_greater(inertia, 0.0)
# check warning when centers are passed
assert_warns(RuntimeWarning, k_means, X, n_clusters=n_clusters,
init=centers)
# to many clusters desired
assert_raises(ValueError, k_means, X, n_clusters=X.shape[0] + 1)
def test_x_squared_norms_init_centroids():
"""Test that x_squared_norms can be None in _init_centroids"""
from sklearn.cluster.k_means_ import _init_centroids
X_norms = np.sum(X**2, axis=1)
precompute = _init_centroids(
X, 3, "k-means++", random_state=0, x_squared_norms=X_norms)
assert_array_equal(
precompute,
_init_centroids(X, 3, "k-means++", random_state=0))
|
bsd-3-clause
|
wadester/wh_test_py
|
scipy_rand.py
|
1
|
1922
|
#!/usr/bin/env python
# Module: scipy_rand.py
# Purpose: random distributions in SciPy
# Date: N/A
# Notes:
# 1) ...
# Ref: http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.exponential.html#numpy.random.exponential
#
"""Random numbers and Numpy"""
import numpy as np
#import scipy as sp
#import matplotlib as mpl
import matplotlib.pyplot as plt
import random as r
def scipy_rand():
"""Random numbers and Numpy"""
print "scipy_rand.py: random numbers and numpy"
ll = 1000
print "create array with %d random numbers, list comprehension" % ll
# -- example using Python's random
# n1 = np.array([r.random() for x in range(ll)])
n1 = np.random.random_sample((ll,))
x1 = np.linspace(1, ll, ll)
plt.plot(x1, n1)
plt.grid()
plt.show()
print "make a histogram of the numbers and plot"
bins = 10
h1 = np.histogram(n1, bins)
x10 = np.linspace(1, bins, bins)
plt.plot(x10, h1[0])
plt.show()
print "redo with poisson distribution"
print "create array with %d random numbers, list comprehension" % ll
# -- example using Python's random
# n2 = np.array([r.expovariate(1/5.0) for x in range(ll)])
n2=np.random.exponential(scale=5, size=ll)
#x1 = np.linspace(1, ll, ll)
plt.plot(x1, n2)
plt.grid()
plt.show()
print "make a histogram of the numbers and plot"
#bins = 10
h2 = np.histogram(n2, bins)
#x2 = np.linspace(1, bins, bins)
plt.plot(x10, h2[0])
plt.show()
print "Average of N2 is: ", np.average(n2)
print "use poisson and plot data then histogram"
n3=np.random.poisson(lam=5.0, size=ll)
plt.plot(x1, n3)
plt.grid()
plt.show()
#bins = 10
h3 = np.histogram(n2, bins)
#x2 = np.linspace(1, bins, bins)
plt.plot(x10, h3[0])
plt.show()
print "Average of N3 is: ", np.average(n3)
if __name__ == "__main__":
scipy_rand()
|
gpl-2.0
|
deepesch/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
57
|
47417
|
"""
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import raises
from sklearn.utils.validation import check_random_state
from sklearn.utils.validation import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.preprocessing._weights import _balance_weights
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
splitter="presort-best"),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
splitter="presort-best"),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = [name for name, Tree in ALL_TREES.items()
if Tree().splitter in SPARSE_SPLITTERS]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = clf.transform(X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [-2, -1, 1] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
# Check that tree estimator are pickable
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
serialized_object = pickle.dumps(clf)
clf2 = pickle.loads(serialized_object)
assert_equal(type(clf2), clf.__class__)
score2 = clf2.score(iris.data, iris.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (classification) "
"with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
serialized_object = pickle.dumps(reg)
reg2 = pickle.loads(serialized_object)
assert_equal(type(reg2), reg.__class__)
score2 = reg2.score(boston.data, boston.target)
assert_equal(score, score2, "Failed to generate same score "
"after pickling (regression) "
"with {0}".format(name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = _balance_weights(unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 200)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0]], [0]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-2 <= value.flat[0] < 2,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._tree import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, X)
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if TreeEstimator().splitter in SPARSE_SPLITTERS:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
|
bsd-3-clause
|
xubenben/scikit-learn
|
examples/linear_model/plot_logistic_l1_l2_sparsity.py
|
384
|
2601
|
"""
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
|
bsd-3-clause
|
NixaSoftware/CVis
|
venv/lib/python2.7/site-packages/pandas/tests/indexes/test_interval.py
|
3
|
46724
|
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (Interval, IntervalIndex, Index, isna,
interval_range, Timestamp, Timedelta,
compat, date_range, timedelta_range, DateOffset)
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self):
return IntervalIndex.from_breaks(np.arange(10))
def test_constructors(self):
expected = self.index
actual = IntervalIndex.from_breaks(np.arange(3), closed='right')
assert expected.equals(actual)
alternate = IntervalIndex.from_breaks(np.arange(3), closed='left')
assert not expected.equals(alternate)
actual = IntervalIndex.from_intervals([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex([Interval(0, 1), Interval(1, 2)])
assert expected.equals(actual)
actual = IntervalIndex.from_arrays(np.arange(2), np.arange(2) + 1,
closed='right')
assert expected.equals(actual)
actual = Index([Interval(0, 1), Interval(1, 2)])
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
actual = Index(expected)
assert isinstance(actual, IntervalIndex)
assert expected.equals(actual)
def test_constructors_other(self):
# all-nan
result = IntervalIndex.from_intervals([np.nan])
expected = np.array([np.nan], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
# empty
result = IntervalIndex.from_intervals([])
expected = np.array([], dtype=object)
tm.assert_numpy_array_equal(result.values, expected)
def test_constructors_errors(self):
# scalar
with pytest.raises(TypeError):
IntervalIndex(5)
# not an interval
with pytest.raises(TypeError):
IntervalIndex([0, 1])
with pytest.raises(TypeError):
IntervalIndex.from_intervals([0, 1])
# invalid closed
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed
with pytest.raises(ValueError):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with pytest.raises(ValueError):
IntervalIndex.from_arrays([0, 10], [3, 5])
with pytest.raises(ValueError):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# no point in nesting periods in an IntervalIndex
with pytest.raises(ValueError):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
def test_constructors_datetimelike(self):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx)
expected = IntervalIndex.from_breaks(idx.values)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self):
index = self.index
assert len(index) == 2
assert index.size == 2
assert index.shape == (2, )
tm.assert_index_equal(index.left, Index([0, 1]))
tm.assert_index_equal(index.right, Index([1, 2]))
tm.assert_index_equal(index.mid, Index([0.5, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.index_with_nan
assert len(index) == 3
assert index.size == 3
assert index.shape == (3, )
tm.assert_index_equal(index.left, Index([0, np.nan, 1]))
tm.assert_index_equal(index.right, Index([1, np.nan, 2]))
tm.assert_index_equal(index.mid, Index([0.5, np.nan, 1.5]))
assert index.closed == 'right'
expected = np.array([Interval(0, 1), np.nan,
Interval(1, 2)], dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self):
index = self.index
assert not index.hasnans
tm.assert_numpy_array_equal(index.isna(),
np.array([False, False]))
tm.assert_numpy_array_equal(index.notna(),
np.array([True, True]))
index = self.index_with_nan
assert index.hasnans
tm.assert_numpy_array_equal(index.notna(),
np.array([True, False, True]))
tm.assert_numpy_array_equal(index.isna(),
np.array([False, True, False]))
def test_copy(self):
actual = self.index.copy()
assert actual.equals(self.index)
actual = self.index.copy(deep=True)
assert actual.equals(self.index)
assert actual.left is not self.index.left
def test_ensure_copied_data(self):
# exercise the copy flag in the constructor
# not copying
index = self.index
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self):
idx = self.index
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert not idx.equals(idx.astype(object))
assert not idx.equals(np.array(idx))
assert not idx.equals(list(idx))
assert not idx.equals([1, 2])
assert not idx.equals(np.array([1, 2]))
assert not idx.equals(pd.date_range('20130101', periods=2))
def test_astype(self):
idx = self.index
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
def test_where(self):
expected = self.index
result = self.index.where(self.index.notna())
tm.assert_index_equal(result, expected)
idx = IntervalIndex.from_breaks([1, 2])
result = idx.where([True, False])
expected = IntervalIndex.from_intervals(
[Interval(1.0, 2.0, closed='right'), np.nan])
tm.assert_index_equal(result, expected)
def test_where_array_like(self):
pass
def test_delete(self):
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.delete(0)
assert expected.equals(actual)
def test_insert(self):
expected = IntervalIndex.from_breaks(range(4))
actual = self.index.insert(2, Interval(2, 3))
assert expected.equals(actual)
pytest.raises(ValueError, self.index.insert, 0, 1)
pytest.raises(ValueError, self.index.insert, 0,
Interval(2, 3, closed='left'))
def test_take(self):
actual = self.index.take([0, 1])
assert self.index.equals(actual)
expected = IntervalIndex.from_arrays([0, 0, 1], [1, 1, 2])
actual = self.index.take([0, 0, 1])
assert expected.equals(actual)
def test_unique(self):
# unique non-overlapping
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)])
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)])
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)])
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples([(0, 1), (0, 1), (2, 3)])
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')])
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b'), (0, 1)])
assert not idx.is_unique
# empty
idx = IntervalIndex([])
assert idx.is_unique
def test_monotonic(self):
# increasing non-overlapping
idx = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples([(4, 5), (2, 3), (1, 2)])
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples([(0, 1), (4, 5), (2, 3)])
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples([(0, 2), (0.5, 2.5), (1, 3)])
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples([(1, 3), (0.5, 2.5), (0, 2)])
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples([(0.5, 2.5), (0, 2), (1, 3)])
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples([(1, 2), (1, 3), (2, 3)])
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples([(2, 3), (1, 3), (1, 2)])
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)])
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([])
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed='right')
assert i[0] == Interval(0.0, 1.0)
assert i[1] == Interval(1.0, 2.0)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed='right')
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed='right')
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed='right')
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self):
expected = IntervalIndex.from_tuples([(0.0, 1.0), (1.0, 2.0)])
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan])
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays([0, 1, np.nan], [1, 2, np.nan])
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)])
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self):
other = IntervalIndex.from_arrays([2], [3])
expected = IntervalIndex.from_arrays(range(3), range(1, 4))
actual = self.index.union(other)
assert expected.equals(actual)
actual = other.union(self.index)
assert expected.equals(actual)
tm.assert_index_equal(self.index.union(self.index), self.index)
tm.assert_index_equal(self.index.union(self.index[:1]),
self.index)
def test_intersection(self):
other = IntervalIndex.from_breaks([1, 2, 3])
expected = IntervalIndex.from_breaks([1, 2])
actual = self.index.intersection(other)
assert expected.equals(actual)
tm.assert_index_equal(self.index.intersection(self.index),
self.index)
def test_difference(self):
tm.assert_index_equal(self.index.difference(self.index[:1]),
self.index[1:])
def test_symmetric_difference(self):
result = self.index[:1].symmetric_difference(self.index[1:])
expected = self.index
tm.assert_index_equal(result, expected)
def test_set_operation_errors(self):
pytest.raises(ValueError, self.index.union, self.index.left)
other = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
pytest.raises(ValueError, self.index.union, other)
def test_isin(self):
actual = self.index.isin(self.index)
tm.assert_numpy_array_equal(np.array([True, True]), actual)
actual = self.index.isin(self.index[:1])
tm.assert_numpy_array_equal(np.array([True, False]), actual)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self):
idx = pd.Index([np.nan, pd.Interval(0, 1), pd.Interval(1, 2)])
idx2 = pd.IntervalIndex.from_arrays([np.nan, 0, 1], [np.nan, 1, 2])
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays([np.nan, 0, 1], np.array([0, 1, 2]))
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self):
expected = IntervalIndex.from_breaks([1, 2, 3, 4])
actual = IntervalIndex.from_tuples([(3, 4), (1, 2),
(2, 3)]).sort_values()
tm.assert_index_equal(expected, actual)
# nan
idx = self.index_with_nan
mask = idx.isna()
tm.assert_numpy_array_equal(mask, np.array([False, True, False]))
result = idx.sort_values()
mask = result.isna()
tm.assert_numpy_array_equal(mask, np.array([False, False, True]))
result = idx.sort_values(ascending=False)
mask = result.isna()
tm.assert_numpy_array_equal(mask, np.array([True, False, False]))
def test_datetime(self):
dates = pd.date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = pd.date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert pd.Timestamp('2000-01-01T12') not in idx
assert pd.Timestamp('2000-01-01T12') not in idx
target = pd.date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2])
index2 = IntervalIndex.from_arrays([1, 2], [2, 3])
result = index1.append(index2)
expected = IntervalIndex.from_arrays([0, 1, 1, 2], [1, 2, 2, 3])
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays([0, 1, 0, 1, 1, 2],
[1, 2, 1, 2, 2, 3])
tm.assert_index_equal(result, expected)
def f():
index1.append(IntervalIndex.from_arrays([0, 1], [1, 2],
closed='both'))
pytest.raises(ValueError, f)
def test_is_non_overlapping_monotonic(self):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
for closed in ('left', 'right', 'neither', 'both'):
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
for closed in ('left', 'right', 'neither', 'both'):
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
for closed in ('left', 'right', 'neither', 'both'):
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(reversed(tpls), closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
idx = IntervalIndex.from_breaks(range(4), closed='both')
assert idx.is_non_overlapping_monotonic is False
for closed in ('left', 'right', 'neither'):
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
@pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
def test_construction_from_numeric(self, closed):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name='foo', closed=closed)
result = interval_range(start=0, end=5, name='foo', closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name='foo', closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name='foo', closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name='foo', closed=closed)
result = interval_range(start=0, end=6, freq=2, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name='foo', closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
def test_construction_from_timestamp(self, closed):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('closed', ['left', 'right', 'neither', 'both'])
def test_construction_from_timedelta(self, closed):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name='foo', closed=closed)
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timedelta('7 days 1 hour')
result = interval_range(start=start, end=end, freq=freq, name='foo',
closed=closed)
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
# float value for periods
expected = pd.interval_range(start=0, periods=10)
result = pd.interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.tz_localize('UTC'),
end=end.tz_localize('UTC'))
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = Timedelta(days=1), Timedelta(days=10)
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pytimedelta(),
end=end.to_pytimedelta())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timedelta
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0)
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=5)
with tm.assert_raises_regex(ValueError, msg):
interval_range(periods=2)
with tm.assert_raises_regex(ValueError, msg):
interval_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=5, periods=6)
# mixed units
msg = 'start, end, freq need to be type compatible'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timestamp('20130101'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timedelta('1 day'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timedelta('1 day'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timestamp('20130110'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timestamp('20130110'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timedelta('10 days'), freq=2)
# invalid periods
msg = 'periods must be a number, got foo'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, periods='foo')
# invalid start
msg = 'start must be numeric or datetime-like, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start='foo', periods=10)
# invalid end
msg = r'end must be numeric or datetime-like, got \(0, 1\]'
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Interval(0, 1), periods=10)
# invalid freq for datetime-like
msg = 'freq must be numeric or convertible to DateOffset, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=Timestamp('20130101'), periods=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
class TestIntervalTree(object):
def setup_method(self, method):
gentree = lambda dtype: IntervalTree(np.arange(5, dtype=dtype),
np.arange(5, dtype=dtype) + 2)
self.tree = gentree('int64')
self.trees = {dtype: gentree(dtype)
for dtype in ['int32', 'int64', 'float32', 'float64']}
def test_get_loc(self):
for dtype, tree in self.trees.items():
tm.assert_numpy_array_equal(tree.get_loc(1),
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)),
np.array([0, 1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_loc(-1)
def test_get_indexer(self):
for dtype, tree in self.trees.items():
tm.assert_numpy_array_equal(
tree.get_indexer(np.array([1.0, 5.5, 6.5])),
np.array([0, 4, -1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([3.0]))
def test_get_indexer_non_unique(self):
indexer, missing = self.tree.get_indexer_non_unique(
np.array([1.0, 2.0, 6.5]))
tm.assert_numpy_array_equal(indexer[:1],
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(indexer[1:3]),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(indexer[3:]),
np.array([-1], dtype='int64'))
tm.assert_numpy_array_equal(missing, np.array([2], dtype='int64'))
def test_duplicates(self):
tree = IntervalTree([0, 0, 0], [1, 1, 1])
tm.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)),
np.array([0, 1, 2], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([0.5]))
indexer, missing = tree.get_indexer_non_unique(np.array([0.5]))
tm.assert_numpy_array_equal(np.sort(indexer),
np.array([0, 1, 2], dtype='int64'))
tm.assert_numpy_array_equal(missing, np.array([], dtype='int64'))
def test_get_loc_closed(self):
for closed in ['left', 'right', 'both', 'neither']:
tree = IntervalTree([0], [1], closed=closed)
for p, errors in [(0, tree.open_left),
(1, tree.open_right)]:
if errors:
with pytest.raises(KeyError):
tree.get_loc(p)
else:
tm.assert_numpy_array_equal(tree.get_loc(p),
np.array([0], dtype='int64'))
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="int type mismatch on 32bit")
def test_get_indexer_closed(self):
x = np.arange(1000, dtype='float64')
found = x.astype('intp')
not_found = (-1 * np.ones(1000)).astype('intp')
for leaf_size in [1, 10, 100, 10000]:
for closed in ['left', 'right', 'both', 'neither']:
tree = IntervalTree(x, x + 0.5, closed=closed,
leaf_size=leaf_size)
tm.assert_numpy_array_equal(found,
tree.get_indexer(x + 0.25))
expected = found if tree.closed_left else not_found
tm.assert_numpy_array_equal(expected,
tree.get_indexer(x + 0.0))
expected = found if tree.closed_right else not_found
tm.assert_numpy_array_equal(expected,
tree.get_indexer(x + 0.5))
|
apache-2.0
|
suku248/nest-simulator
|
pynest/examples/store_restore_network.py
|
7
|
14788
|
# -*- coding: utf-8 -*-
#
# store_restore_network.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Store and restore a network simulation
--------------------------------------
This example shows how to store user-specified aspects of a network
to file and how to later restore the network for further simulation.
This may be used, e.g., to train weights in a network up to a certain
point, store those weights and later perform diverse experiments on
the same network using the stored weights.
.. admonition:: Only user-specified aspects are stored
NEST does not support storing the complete state of a simulation
in a way that would allow one to continue a simulation as if one had
made a new ``Simulate()`` call on an existing network. Such complete
checkpointing would be very difficult to implement.
NEST's explicit approach to storing and restoring network state makes
clear to all which aspects of a network are carried from one simulation
to another and thus contributes to good scientific practice.
Storing and restoring is currently not supported for MPI-parallel simulations.
"""
###############################################################################
# Import necessary modules.
import nest
import pickle
###############################################################################
# These modules are only needed for illustrative plotting.
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import pandas as pd
import textwrap
###############################################################################
# Implement network as class.
#
# Implementing the network as a class makes network properties available to
# the initial network builder, the storer and the restorer, thus reducing the
# amount of data that needs to be stored.
class EINetwork:
"""
A simple balanced random network with plastic excitatory synapses.
This simple Brunel-style balanced random network has an excitatory
and inhibitory population, both driven by external excitatory poisson
input. Excitatory connections are plastic (STDP). Spike activity of
the excitatory population is recorded.
The model is provided as a non-trivial example for storing and restoring.
"""
def __init__(self):
self.nI = 500
self.nE = 4 * self.nI
self.n = self.nE + self.nI
self.JE = 1.0
self.JI = -4 * self.JE
self.indeg_e = 200
self.indeg_i = 50
self.neuron_model = "iaf_psc_delta"
# Create synapse models so we can extract specific connection information
nest.CopyModel("stdp_synapse_hom", "e_syn", {"Wmax": 2 * self.JE})
nest.CopyModel("static_synapse", "i_syn")
self.nrn_params = {"V_m": nest.random.normal(-65., 5.)}
self.poisson_rate = 800.
def build(self):
"""
Construct network from scratch, including instrumentation.
"""
self.e_neurons = nest.Create(self.neuron_model, n=self.nE, params=self.nrn_params)
self.i_neurons = nest.Create(self.neuron_model, n=self.nI, params=self.nrn_params)
self.neurons = self.e_neurons + self.i_neurons
self.pg = nest.Create("poisson_generator", {"rate": self.poisson_rate})
self.sr = nest.Create("spike_recorder")
nest.Connect(self.e_neurons, self.neurons,
{"rule": "fixed_indegree", "indegree": self.indeg_e},
{"synapse_model": "e_syn", "weight": self.JE})
nest.Connect(self.i_neurons, self.neurons,
{"rule": "fixed_indegree", "indegree": self.indeg_i},
{"synapse_model": "i_syn", "weight": self.JI})
nest.Connect(self.pg, self.neurons, "all_to_all", {"weight": self.JE})
nest.Connect(self.e_neurons, self.sr)
def store(self, dump_filename):
"""
Store neuron membrane potential and synaptic weights to given file.
"""
assert nest.NumProcesses() == 1, "Cannot dump MPI parallel"
###############################################################################
# Build dictionary with relevant network information:
# - membrane potential for all neurons in each population
# - source, target and weight of all connections
# Dictionary entries are Pandas Dataframes.
#
# Strictly speaking, we would not need to store the weight of the inhibitory
# synapses since they are fixed, but we do so out of symmetry and to make it
# easier to add plasticity for inhibitory connections later.
network = {}
network["n_vp"] = nest.GetKernelStatus("total_num_virtual_procs")
network["e_nrns"] = self.neurons.get(["V_m"], output="pandas")
network["i_nrns"] = self.neurons.get(["V_m"], output="pandas")
network["e_syns"] = nest.GetConnections(synapse_model="e_syn").get(
("source", "target", "weight"), output="pandas")
network["i_syns"] = nest.GetConnections(synapse_model="i_syn").get(
("source", "target", "weight"), output="pandas")
with open(dump_filename, "wb") as f:
pickle.dump(network, f, pickle.HIGHEST_PROTOCOL)
def restore(self, dump_filename):
"""
Restore network from data in file combined with base information in the class.
"""
assert nest.NumProcesses() == 1, "Cannot load MPI parallel"
with open(dump_filename, "rb") as f:
network = pickle.load(f)
assert network["n_vp"] == nest.GetKernelStatus("total_num_virtual_procs"),\
"N_VP must match"
###############################################################################
# Reconstruct neurons
# Since NEST does not understand Pandas Series, we must pass the values as
# NumPy arrays
self.e_neurons = nest.Create(self.neuron_model, n=self.nE,
params={"V_m": network["e_nrns"].V_m.values})
self.i_neurons = nest.Create(self.neuron_model, n=self.nI,
params={"V_m": network["i_nrns"].V_m.values})
self.neurons = self.e_neurons + self.i_neurons
###############################################################################
# Reconstruct instrumentation
self.pg = nest.Create("poisson_generator", {"rate": self.poisson_rate})
self.sr = nest.Create("spike_recorder")
###############################################################################
# Reconstruct connectivity
nest.Connect(network["e_syns"].source.values, network["e_syns"].target.values,
"one_to_one",
{"synapse_model": "e_syn", "weight": network["e_syns"].weight.values})
nest.Connect(network["i_syns"].source.values, network["i_syns"].target.values,
"one_to_one",
{"synapse_model": "i_syn", "weight": network["i_syns"].weight.values})
###############################################################################
# Reconnect instruments
nest.Connect(self.pg, self.neurons, "all_to_all", {"weight": self.JE})
nest.Connect(self.e_neurons, self.sr)
class DemoPlot:
"""
Create demonstration figure for effect of storing and restoring a network.
The figure shows raster plots for five different runs, a PSTH for the
initial 1 s simulation and PSTHs for all 1 s continuations, and weight
histograms.
"""
def __init__(self):
self._colors = [c["color"] for c in plt.rcParams["axes.prop_cycle"]]
self._next_line = 0
plt.rcParams.update({'font.size': 10})
self.fig = plt.figure(figsize=(10, 7), constrained_layout=False)
gs = gridspec.GridSpec(4, 2, bottom=0.08, top=0.9, left=0.07, right=0.98, wspace=0.2, hspace=0.4)
self.rasters = ([self.fig.add_subplot(gs[0, 0])] +
[self.fig.add_subplot(gs[n, 1]) for n in range(4)])
self.weights = self.fig.add_subplot(gs[1, 0])
self.comment = self.fig.add_subplot(gs[2:, 0])
self.fig.suptitle("Storing and reloading a network simulation")
self.comment.set_axis_off()
self.comment.text(0, 1, textwrap.dedent("""
Storing, loading and continuing a simulation of a balanced E-I network
with STDP in excitatory synapses.
Top left: Raster plot of initial simulation for 1000ms (blue). Network state
(connections, membrane potential, synaptic weights) is stored at the end of
the initial simulation.
Top right: Immediate continuation of the initial simulation from t=1000ms
to t=2000ms (orange) by calling Simulate(1000) again after storing the network.
This continues based on the full network state, including spikes in transit.
Second row, right: Simulating for 1000ms after loading the stored network
into a clean kernel (green). Time runs from 0ms and only connectivity, V_m and
synaptic weights are restored. Dynamics differ somewhat from continuation.
Third row, right: Same as in second row with identical random seed (red),
resulting in identical spike patterns.
Fourth row, right: Simulating for 1000ms from same stored network state as
above but with different random seed yields different spike patterns (purple).
Above: Distribution of excitatory synaptic weights at end of each sample
simulation. Green and red curves are identical and overlay to form brown curve."""),
transform=self.comment.transAxes, fontsize=8,
verticalalignment='top')
def add_to_plot(self, net, n_max=100, t_min=0, t_max=1000, lbl=""):
spks = pd.DataFrame.from_dict(net.sr.get("events"))
spks = spks.loc[(spks.senders < n_max) & (t_min < spks.times) & (spks.times < t_max)]
self.rasters[self._next_line].plot(spks.times, spks.senders, ".",
color=self._colors[self._next_line])
self.rasters[self._next_line].set_xlim(t_min, t_max)
self.rasters[self._next_line].set_title(lbl)
if 1 < self._next_line < 4:
self.rasters[self._next_line].set_xticklabels([])
elif self._next_line == 4:
self.rasters[self._next_line].set_xlabel('Time [ms]')
# To save time while plotting, we extract only a subset of connections.
# For simplicity, we just use a prime-number stepping.
w = nest.GetConnections(source=net.e_neurons[::41], synapse_model="e_syn").weight
wbins = np.arange(0.7, 1.4, 0.01)
self.weights.hist(w, bins=wbins,
histtype="step", density=True, label=lbl,
color=self._colors[self._next_line],
alpha=0.7, lw=3)
if self._next_line == 0:
self.rasters[0].set_ylabel("neuron id")
self.weights.set_ylabel("p(w)")
self.weights.set_xlabel("Weight w [mV]")
plt.draw()
plt.pause(1e-3) # allow figure window to draw figure
self._next_line += 1
if __name__ == "__main__":
plt.ion()
T_sim = 1000
dplot = DemoPlot()
###############################################################################
# Ensure clean slate and make NEST less chatty
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Create network from scratch and simulate 1s.
nest.SetKernelStatus({"local_num_threads": 4,
"print_time": True})
ein = EINetwork()
print("*** Initial simulation ***")
ein.build()
nest.Simulate(T_sim)
dplot.add_to_plot(ein, lbl="Initial simulation")
###############################################################################
# Store network state to file with state after 1s.
print("\n*** Storing simulation ...", end="", flush=True)
ein.store("ein_1000.pkl")
print(" done ***\n")
###############################################################################
# Continue simulation by another 1s.
print("\n*** Continuing simulation ***")
nest.Simulate(T_sim)
dplot.add_to_plot(ein, lbl="Continued simulation", t_min=T_sim, t_max=2*T_sim)
###############################################################################
# Clear kernel, restore network from file and simulate for 1s.
print("\n*** Reloading and resuming simulation ***")
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 4})
ein2 = EINetwork()
ein2.restore("ein_1000.pkl")
nest.Simulate(T_sim)
dplot.add_to_plot(ein2, lbl="Reloaded simulation")
###############################################################################
# Repeat previous step. This shall result in *exactly* the same results as
# the previous run because we use the same random seed.
print("\n*** Reloading and resuming simulation (same seed) ***")
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 4})
ein2 = EINetwork()
ein2.restore("ein_1000.pkl")
nest.Simulate(T_sim)
dplot.add_to_plot(ein2, lbl="Reloaded simulation (same seed)")
###############################################################################
# Clear, restore and simulate again, but now with different random seed.
# Details in results shall differ from previous run.
print("\n*** Reloading and resuming simulation (different seed) ***")
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads": 4, "rng_seed": 987654321})
ein2 = EINetwork()
ein2.restore("ein_1000.pkl")
nest.Simulate(T_sim)
dplot.add_to_plot(ein2, lbl="Reloaded simulation (different seed)")
dplot.fig.savefig("store_restore_network.png")
input("Press ENTER to close figure!")
|
gpl-2.0
|
BalzGuenat/ParallelGumtree
|
plot_script.py
|
1
|
3921
|
#!/usr/bin/python2
import sys
import pickle
import subprocess
import time
import matplotlib.pyplot as plt
import numpy as np
import os
import filecmp
max_threads_log = 3
with open('dump') as dumpFile:
(sizes, runs, average_linecount, parallelGumtreeTimes, referenceGumtreeTimes) = pickle.load(dumpFile)
runJava = referenceGumtreeTimes.any()
# sort the files by size:
p = average_linecount.argsort() # get the permutation
average_linecount = average_linecount[p] # reorder the average linecount itself
for r in range(0,runs):
if runJava:
referenceGumtreeTimes[r] = referenceGumtreeTimes[r][p]
for num_threads in range(0,max_threads_log):
parallelGumtreeTimes[num_threads][r] = parallelGumtreeTimes[num_threads][r][p]
# calculate average for the times and plot everything
fig, ax = plt.subplots()
parallel_average_times = np.empty([max_threads_log, sizes])
parallel_standard_deviation = np.empty([max_threads_log, sizes])
reference_standard_deviation = np.empty([max_threads_log, sizes])
for t in range(0,max_threads_log):
parallel_average_times[t] = parallelGumtreeTimes[t].mean(axis=0)
parallel_standard_deviation[t] = parallelGumtreeTimes[t].std(axis=0)
ax.errorbar(average_linecount, parallel_average_times[t], parallel_standard_deviation[t], marker='o')
if runJava:
reference_average_times = referenceGumtreeTimes.mean(axis=0)
reference_standard_deviation = referenceGumtreeTimes.std(axis=0)
ax.errorbar(average_linecount, reference_average_times, reference_standard_deviation, marker='o')
ax.set_ylabel('elapsed time [seconds]')
ax.set_xlabel('input size [average number of nodes]')
ax.set_title("Input tree size vs execution time")
legend = []
for t in range(0,max_threads_log):
legend.append('C++, ' + str(2**t) + ' thread(s)')
if runJava:
legend.append('Reference implementation')
ax.legend(legend, loc='upper left', prop={'size':16}).draggable()
ax.set_xscale('log')
plt.draw()
plt.savefig('timePlot.png', bbox_inches='tight')
plt.savefig('timePlot.eps', bbox_inches='tight')
if runJava:
# speedup plots
print "\nSpeedup vs Java reference"
fig, ax = plt.subplots()
for t in range(0,max_threads_log):
speedup_parallel = reference_average_times/parallel_average_times[t]
ax.plot(average_linecount, speedup_parallel, marker='o')
print "average speedup with " + str(2**t) + " threads: " + str(np.mean(speedup_parallel))
ax.set_ylabel('speedup')
ax.set_xlabel('input size')
ax.set_title("speedup compared to the java solution")
ax.legend(legend[:-1], loc='upper right').draggable()
ax.set_xscale('log')
plt.draw()
plt.savefig('speedupPlot.png', bbox_inches='tight')
# speedup plots vs the single thread c++ solution
if max_threads_log>1:
print "\nSpeedup vs single c++ thread"
speedup_parallel_standard_deviation = np.empty([max_threads_log, sizes])
average_speedup_parallel = np.empty([max_threads_log, sizes])
fig, ax = plt.subplots()
for t in range(1,max_threads_log):
speedup_parallel = parallelGumtreeTimes[0]/parallelGumtreeTimes[t]
average_speedup_parallel[t] = speedup_parallel.mean(axis=0)
speedup_parallel_standard_deviation[t] = speedup_parallel.std(axis=0)
ax.errorbar(average_linecount, average_speedup_parallel[t], speedup_parallel_standard_deviation[t], marker='o')
#ax.plot(average_linecount, speedup_parallel, marker='o')
print "average speedup with " + str(2**t) + " threads: " + str(np.mean(speedup_parallel))
if runJava:
speedup_reference = parallel_average_times[0]/reference_average_times
print "average speedup of the java reference solution: " + str(np.mean(speedup_reference))
ax.plot(average_linecount, speedup_reference, marker='o')
ax.set_ylabel('speedup')
ax.set_xlabel('input size [average number of nodes]')
ax.set_title("speedup with multiple threads")
ax.legend(legend[1:], loc='lower right').draggable()
ax.set_xscale('log')
plt.draw()
plt.savefig('speedupPlot_c.png', bbox_inches='tight')
# show plots
plt.show()
|
lgpl-3.0
|
ernestyalumni/18-327-wavelets-filter-banks
|
tools/example7.py
|
2
|
5011
|
## This is my implementation of example6.py
## Example 7: Generation of biorthogonal scaling functions and wavelets.
## using Python libraries numpy, scipy, matlibplot, PyWavelets
## this needs biphivals.py (just import it in from the same directory!)
##
## The main reference that I'll use is
## Gilbert Strang, and Kevin Amaratunga. 18.327 Wavelets, Filter Banks and Applications, Spring 2003. (Massachusetts Institute of Technology: MIT OpenCourseWare), http://ocw.mit.edu (Accessed 19 Jun, 2015). License: Creative Commons BY-NC-SA
##
##
##
#####################################################################################
## Copyleft 2015, Ernest Yeung <[email protected]>
##
## 20150702
##
## This program, along with all its code, is free software;
## you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You can have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software Foundation, Inc.,
## S1 Franklin Street, Fifth Floor, Boston, MA
## 02110-1301, USA
##
## Governing the ethics of using this program, I default to the Caltech Honor Code:
## ``No member of the Caltech community shall take unfair advantage of
## any other member of the Caltech community.''
##
## If you like what I'm doing and would like to help and contribute support,
## please take a look at my crowdfunding campaign at ernestyalumni.tilt.com
## and subscription-based Patreon
## read my mission statement and give your financial support,
## no matter how small or large,
## if you can
## and to keep checking my ernestyalumni.wordpress.com blog and
## various social media channels
## for updates as I try to keep putting out great stuff.
##
## Fund Science! Help my physics education outreach and research efforts at
## Open/Tilt or subscription Patreon - Ernest Yeung
##
## ernestyalumni.tilt.com
##
## Facebook : ernestyalumni
## gmail : ernestyalumni
## google : ernestyalumni
## linkedin : ernestyalumni
## Patreon : ernestyalumni
## Tilt/Open : ernestyalumni
## tumblr : ernestyalumni
## twitter : ernestyalumni
## youtube : ernestyalumni
## wordpress : ernestyalumni
##
##
################################################################################
##
import numpy as np
import matplotlib.pyplot as plt
import pywt
from biphivals import biphivals
# Example 3a: Compute the samples of the biorthogonal scaling functions
# and wavelets
# 9/7 filters
# create the biorthogonal spline wavelet with 4 vanishing moments object
w_bior = pywt.Wavelet('bior4.4')
[h0,h1,f0,f1] = w_bior.dec_lo, w_bior.dec_hi, w_bior.rec_lo, w_bior.rec_hi
[x,phi,phitilde,psi,psitilde] = biphivals(h0,h1,f0,f1,5)
plt.figure(1)
plt.plot(x,phi,'-',label="Primary scaling function")
plt.plot(x,psi,'-.',label="Primary wavelet")
plt.legend()
plt.title("Primary Daubachies 9/7 Pair")
plt.figure(2)
plt.plot(x,phitilde,'--',label="Dual scaling function")
plt.plot(x,psitilde,':',label="Dual wavelet")
plt.legend()
plt.title('Dual Daubachies 9/7 pair')
|
mit
|
cjayb/mne-python
|
examples/simulation/plot_simulate_raw_data.py
|
19
|
2830
|
"""
===========================
Generate simulated raw data
===========================
This example generates raw data by repeating a desired source activation
multiple times.
"""
# Authors: Yousra Bekhti <[email protected]>
# Mark Wronkiewicz <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import find_events, Epochs, compute_covariance, make_ad_hoc_cov
from mne.datasets import sample
from mne.simulation import (simulate_sparse_stc, simulate_raw,
add_noise, add_ecg, add_eog)
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
# Load real data as the template
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference(projection=True)
##############################################################################
# Generate dipole time series
n_dipoles = 4 # number of dipoles to create
epoch_duration = 2. # duration of each epoch/event
n = 0 # harmonic number
rng = np.random.RandomState(0) # random state (make reproducible)
def data_fun(times):
"""Generate time-staggered sinusoids at harmonics of 10Hz"""
global n
n_samp = len(times)
window = np.zeros(n_samp)
start, stop = [int(ii * float(n_samp) / (2 * n_dipoles))
for ii in (2 * n, 2 * n + 1)]
window[start:stop] = 1.
n += 1
data = 25e-9 * np.sin(2. * np.pi * 10. * n * times)
data *= window
return data
times = raw.times[:int(raw.info['sfreq'] * epoch_duration)]
fwd = mne.read_forward_solution(fwd_fname)
src = fwd['src']
stc = simulate_sparse_stc(src, n_dipoles=n_dipoles, times=times,
data_fun=data_fun, random_state=rng)
# look at our source data
fig, ax = plt.subplots(1)
ax.plot(times, 1e9 * stc.data.T)
ax.set(ylabel='Amplitude (nAm)', xlabel='Time (sec)')
mne.viz.utils.plt_show()
##############################################################################
# Simulate raw data
raw_sim = simulate_raw(raw.info, [stc] * 10, forward=fwd, verbose=True)
cov = make_ad_hoc_cov(raw_sim.info)
add_noise(raw_sim, cov, iir_filter=[0.2, -0.2, 0.04], random_state=rng)
add_ecg(raw_sim, random_state=rng)
add_eog(raw_sim, random_state=rng)
raw_sim.plot()
##############################################################################
# Plot evoked data
events = find_events(raw_sim) # only 1 pos, so event number == 1
epochs = Epochs(raw_sim, events, 1, tmin=-0.2, tmax=epoch_duration)
cov = compute_covariance(epochs, tmax=0., method='empirical',
verbose='error') # quick calc
evoked = epochs.average()
evoked.plot_white(cov, time_unit='s')
|
bsd-3-clause
|
adamrvfisher/TechnicalAnalysisLibrary
|
DojiFinder.py
|
1
|
6270
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 19 19:17:24 2017
@author: AmatVictoriaCuramIII
"""
#doji finder, its sketchy.
#modules
from YahooGrabber import YahooGrabber
import numpy as np
import random as rand
import pandas as pd
#define tickers
Ticker1 = 'MS'
iterations = range(0, 2000)
Asset1 = YahooGrabber(Ticker1)
Counter = 0
Empty = []
Dataset = pd.DataFrame()
#trimmer
#Asset1 = Asset1[-2000:]
trail = rand.randint(3,40)
trailrange = range(1,trail)
#statistics
for i in iterations:
window = rand.randint(3,100)
mag = rand.random()/500
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset1['SMA'] = Asset1['Adj Close'].rolling(window=window, center=False).mean()
Asset1['Trend'] = (Asset1['Adj Close']/Asset1['SMA']) - 1
Asset1['DojiFactor'] = Asset1['Open']/Asset1['Adj Close']
Asset1['MAGN'] = abs(1 - Asset1['DojiFactor'])
Asset1['Doji?'] = np.where(Asset1['MAGN'] < mag, 1, 0)
Asset1['Sign'] = np.where(Asset1['Trend'].shift(1) < 0, 1, -1)
Asset1['Position'] = (Asset1['Doji?'] * Asset1['Sign'])
Asset1['AddlPosition'] = 0
for t in trailrange:
Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(t) == 1, 1, Asset1['AddlPosition'])
Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(t) == -1, -1, Asset1['AddlPosition'])
# Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(1) == 1, 1, Asset1['AddlPosition'])
# Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(1) == -1, -1, Asset1['AddlPosition'])
# Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(2) == 1, 1, Asset1['AddlPosition'])
# Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(2) == -1, -1, Asset1['AddlPosition'])
# Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(3) == 1, 1, Asset1['AddlPosition'])
# Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(3) == -1, -1, Asset1['AddlPosition'])
# Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(4) == 1, 1, Asset1['AddlPosition'])
# Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(4) == -1, -1, Asset1['AddlPosition'])
Asset1['TotalPosition'] = Asset1['Position'] + Asset1['AddlPosition']
Asset1['TotalPosition'] = np.where(Asset1['TotalPosition'] == -2, -1, Asset1['AddlPosition'])
Asset1['TotalPosition'] = np.where(Asset1['TotalPosition'] == 2, 1, Asset1['AddlPosition'])
Asset1['Pass'] = Asset1['TotalPosition'] * Asset1['LogRet']
Asset1['Multiplier'] = Asset1['Pass'].cumsum().apply(np.exp)
Counter = Counter + 1
print(Counter)
drawdown = 1 - Asset1['Multiplier'].div(Asset1['Multiplier'].cummax())
dailyreturn = Asset1['Pass'].mean()
if dailyreturn < 0:
continue
dailyvol = Asset1['Pass'].std()
if Asset1['Pass'].std() == 0:
continue
sharpe =(dailyreturn/dailyvol)
MaxDD = max(drawdown)
Empty.append(window)
Empty.append(mag)
Empty.append(trail)
Empty.append(sharpe)
Empty.append(sharpe/MaxDD)
Empty.append(dailyreturn/MaxDD)
Empty.append(MaxDD)
Emptyseries = pd.Series(Empty)
Dataset[0] = Emptyseries.values
Dataset[i] = Emptyseries.values
Empty[:] = []
##find optimal parameters from simulation
z1 = Dataset.iloc[3]
w1 = np.percentile(z1, 80)
v1 = [] #this variable stores the Nth percentile of top performers
DS1W = pd.DataFrame() #this variable stores your financial advisors for specific dataset
for l in z1:
if l > w1:
v1.append(l)
for j in v1:
r = Dataset.columns[(Dataset == j).iloc[3]]
DS1W = pd.concat([DS1W,Dataset[r]], axis = 1)
y = max(z1)
k = Dataset.columns[(Dataset == y).iloc[3]] #this is the column number
kfloat = float(k[0])
#window = int(Dataset[kfloat][0])
#mag = Dataset[kfloat][1]
Asset1['LogRet'] = np.log(Asset1['Adj Close']/Asset1['Adj Close'].shift(1))
Asset1['LogRet'] = Asset1['LogRet'].fillna(0)
Asset1['SMA'] = Asset1['Adj Close'].rolling(window=int(Dataset[kfloat][0]), center=False).mean()
Asset1['Trend'] = (Asset1['Adj Close']/Asset1['SMA']) - 1
Asset1['DojiFactor'] = Asset1['Open']/Asset1['Adj Close']
Asset1['MAGN'] = abs(1 - Asset1['DojiFactor'])
Asset1['Doji?'] = np.where(Asset1['MAGN'] < Dataset[kfloat][1], 1, 0)
Asset1['Sign'] = np.where(Asset1['Trend'].shift(1) < 0, 1, -1)
Asset1['Position'] = (Asset1['Doji?'] * Asset1['Sign'])
Asset1['AddlPosition'] = 0
trail = int(Dataset[kfloat][2])
trailrange = range(1,trail)
for t in trailrange:
Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(t) == 1, 1, Asset1['AddlPosition'])
Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(t) == -1, -1, Asset1['AddlPosition'])
#Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(1) == 1, 1, Asset1['AddlPosition'])
#Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(1) == -1, -1, Asset1['AddlPosition'])
#Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(2) == 1, 1, Asset1['AddlPosition'])
#Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(2) == -1, -1, Asset1['AddlPosition'])
#Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(3) == 1, 1, Asset1['AddlPosition'])
#Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(3) == -1, -1, Asset1['AddlPosition'])
#Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(4) == 1, 1, Asset1['AddlPosition'])
#Asset1['AddlPosition'] = np.where(Asset1['Position'].shift(4) == -1, -1, Asset1['AddlPosition'])
Asset1['TotalPosition'] = Asset1['Position'] + Asset1['AddlPosition']
Asset1['TotalPosition'] = np.where(Asset1['TotalPosition'] == -2, -1, Asset1['AddlPosition'])
Asset1['TotalPosition'] = np.where(Asset1['TotalPosition'] == 2, 1, Asset1['AddlPosition'])
Asset1['Pass'] = Asset1['TotalPosition'] * Asset1['LogRet']
Asset1['Multiplier'] = Asset1['Pass'].cumsum().apply(np.exp)
drawdown = 1 - Asset1['Multiplier'].div(Asset1['Multiplier'].cummax())
dailyreturn = Asset1['Pass'].mean()
dailyvol = Asset1['Pass'].std()
sharpe =(dailyreturn/dailyvol)
MaxDD = max(drawdown)
print(MaxDD)
Asset1['Multiplier'].plot()
print(Dataset[kfloat])
|
apache-2.0
|
ryanfobel/multispeq1.py
|
rename.py
|
1
|
2562
|
import sys
import pandas as pd
from path_helpers import path
def main(root, old_name, new_name):
names = pd.Series([old_name, new_name], index=['old', 'new'])
underscore_names = names.map(lambda v: v.replace('-', '_'))
camel_names = names.str.split('-').map(lambda x: ''.join([y.title()
for y in x]))
# Replace all occurrences of provided original name with new name, and all
# occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Dashes are used in Python package names, but underscores are used in
# Python module names.
for p in path(root).walkfiles():
data = p.bytes()
if '.git' not in p and (names.old in data or
underscore_names.old in data or
camel_names.old in data):
p.write_bytes(data.replace(names.old, names.new)
.replace(underscore_names.old, underscore_names.new)
.replace(camel_names.old, camel_names.new))
def rename_path(p):
if '.git' in p:
return
if underscore_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(underscore_names.old,
underscore_names.new)))
if camel_names.old in p.name:
p.rename(p.parent.joinpath(p.name.replace(camel_names.old,
camel_names.new)))
# Rename all files/directories containing original name with new name, and
# all occurrences where dashes (i.e., '-') are replaced with underscores.
#
# Process list of paths in *reverse order* to avoid renaming parent
# directories before children.
for p in sorted(list(path(root).walkdirs()))[-1::-1]:
rename_path(p)
for p in path(root).walkfiles():
rename_path(p)
def parse_args(args=None):
"""Parses arguments, returns (options, args)."""
from argparse import ArgumentParser
if args is None:
args = sys.argv
parser = ArgumentParser(description='Rename template project with'
'hyphen-separated <new name> (path names and in '
'files).')
parser.add_argument('new_name', help='New project name (e.g., '
' `my-new-project`)')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
main('.', 'multispeq1', args.new_name)
|
gpl-3.0
|
chugunovyar/factoryForBuild
|
env/lib/python2.7/site-packages/matplotlib/tests/test_colors.py
|
3
|
24671
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import itertools
from distutils.version import LooseVersion as V
from nose.tools import assert_raises, assert_equal, assert_true
try:
# this is not available in nose + py2.6
from nose.tools import assert_sequence_equal
except ImportError:
assert_sequence_equal = None
import numpy as np
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
from nose.plugins.skip import SkipTest
from matplotlib import cycler
import matplotlib
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.colorbar as mcolorbar
import matplotlib.cbook as cbook
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import (image_comparison,
cleanup, knownfailureif)
def test_resample():
"""
Github issue #6025 pointed to incorrect ListedColormap._resample;
here we test the method for LinearSegmentedColormap as well.
"""
n = 101
colorlist = np.empty((n, 4), float)
colorlist[:, 0] = np.linspace(0, 1, n)
colorlist[:, 1] = 0.2
colorlist[:, 2] = np.linspace(1, 0, n)
colorlist[:, 3] = 0.7
lsc = mcolors.LinearSegmentedColormap.from_list('lsc', colorlist)
lc = mcolors.ListedColormap(colorlist)
lsc3 = lsc._resample(3)
lc3 = lc._resample(3)
expected = np.array([[0.0, 0.2, 1.0, 0.7],
[0.5, 0.2, 0.5, 0.7],
[1.0, 0.2, 0.0, 0.7]], float)
assert_array_almost_equal(lsc3([0, 0.5, 1]), expected)
assert_array_almost_equal(lc3([0, 0.5, 1]), expected)
def test_colormap_endian():
"""
Github issue #1005: a bug in putmask caused erroneous
mapping of 1.0 when input from a non-native-byteorder
array.
"""
cmap = cm.get_cmap("jet")
# Test under, over, and invalid along with values 0 and 1.
a = [-0.5, 0, 0.5, 1, 1.5, np.nan]
for dt in ["f2", "f4", "f8"]:
anative = np.ma.masked_invalid(np.array(a, dtype=dt))
aforeign = anative.byteswap().newbyteorder()
#print(anative.dtype.isnative, aforeign.dtype.isnative)
assert_array_equal(cmap(anative), cmap(aforeign))
def test_BoundaryNorm():
"""
Github issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 1, 2, 2.2, 4]
# Without interpolation
expected = [-1, 0, 0, 1, 2, 2]
ncolors = len(boundaries) - 1
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# ncolors != len(boundaries) - 1 triggers interpolation
expected = [-1, 0, 0, 2, 3, 3]
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# more boundaries for a third color
boundaries = [0, 1, 2, 3]
vals = [-1, 0.1, 1.1, 2.2, 4]
ncolors = 5
expected = [-1, 0, 2, 4, 5]
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
# a scalar as input should not trigger an error and should return a scalar
boundaries = [0, 1, 2]
vals = [-1, 0.1, 1.1, 2.2]
bn = mcolors.BoundaryNorm(boundaries, 2)
expected = [-1, 0, 1, 2]
for v, ex in zip(vals, expected):
ret = bn(v)
assert_true(isinstance(ret, six.integer_types))
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# same with interp
bn = mcolors.BoundaryNorm(boundaries, 3)
expected = [-1, 0, 2, 3]
for v, ex in zip(vals, expected):
ret = bn(v)
assert_true(isinstance(ret, six.integer_types))
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# Clipping
bn = mcolors.BoundaryNorm(boundaries, 3, clip=True)
expected = [0, 0, 2, 2]
for v, ex in zip(vals, expected):
ret = bn(v)
assert_true(isinstance(ret, six.integer_types))
assert_array_equal(ret, ex)
assert_array_equal(bn([v]), ex)
# Masked arrays
boundaries = [0, 1.1, 2.2]
vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9])
# Without interpolation
ncolors = len(boundaries) - 1
bn = mcolors.BoundaryNorm(boundaries, ncolors)
expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0])
assert_array_equal(bn(vals), expected)
# With interpolation
bn = mcolors.BoundaryNorm(boundaries, len(boundaries))
expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0])
assert_array_equal(bn(vals), expected)
# Non-trivial masked arrays
vals = np.ma.masked_invalid([np.Inf, np.NaN])
assert_true(np.all(bn(vals).mask))
vals = np.ma.masked_invalid([np.Inf])
assert_true(np.all(bn(vals).mask))
def test_LogNorm():
"""
LogNorm ignored clip, now it has the same
behavior as Normalize, e.g., values > vmax are bigger than 1
without clip, with clip they are 1.
"""
ln = mcolors.LogNorm(clip=True, vmax=5)
assert_array_equal(ln([1, 6]), [0, 1.0])
def test_PowerNorm():
a = np.array([0, 0.5, 1, 1.5], dtype=np.float)
pnorm = mcolors.PowerNorm(1)
norm = mcolors.Normalize()
assert_array_almost_equal(norm(a), pnorm(a))
a = np.array([-0.5, 0, 2, 4, 8], dtype=np.float)
expected = [0, 0, 1/16, 1/4, 1]
pnorm = mcolors.PowerNorm(2, vmin=0, vmax=8)
assert_array_almost_equal(pnorm(a), expected)
assert_equal(pnorm(a[0]), expected[0])
assert_equal(pnorm(a[2]), expected[2])
assert_array_almost_equal(a[1:], pnorm.inverse(pnorm(a))[1:])
# Clip = True
a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=True)
assert_array_almost_equal(pnorm(a), expected)
assert_equal(pnorm(a[0]), expected[0])
assert_equal(pnorm(a[-1]), expected[-1])
# Clip = True at call time
a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=False)
assert_array_almost_equal(pnorm(a, clip=True), expected)
assert_equal(pnorm(a[0], clip=True), expected[0])
assert_equal(pnorm(a[-1], clip=True), expected[-1])
def test_Normalize():
norm = mcolors.Normalize()
vals = np.arange(-10, 10, 1, dtype=np.float)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Handle integer input correctly (don't overflow when computing max-min,
# i.e. 127-(-128) here).
vals = np.array([-128, 127], dtype=np.int8)
norm = mcolors.Normalize(vals.min(), vals.max())
assert_array_equal(np.asarray(norm(vals)), [0, 1])
# Don't lose precision on longdoubles (float128 on Linux):
# for array inputs...
vals = np.array([1.2345678901, 9.8765432109], dtype=np.longdouble)
norm = mcolors.Normalize(vals.min(), vals.max())
assert_array_equal(np.asarray(norm(vals)), [0, 1])
# and for scalar ones.
eps = np.finfo(np.longdouble).resolution
norm = plt.Normalize(1, 1 + 100 * eps)
# This returns exactly 0.5 when longdouble is extended precision (80-bit),
# but only a value close to it when it is quadruple precision (128-bit).
assert 0 < norm(1 + 50 * eps) < 1
def test_SymLogNorm():
"""
Test SymLogNorm behavior
"""
norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
vals = np.array([-30, -1, 2, 6], dtype=np.float)
normed_vals = norm(vals)
expected = [0., 0.53980074, 0.826991, 1.02758204]
assert_array_almost_equal(normed_vals, expected)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Ensure that specifying vmin returns the same result as above
norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2)
normed_vals = norm(vals)
assert_array_almost_equal(normed_vals, expected)
@cleanup
def test_SymLogNorm_colorbar():
"""
Test un-called SymLogNorm in a colorbar.
"""
norm = mcolors.SymLogNorm(0.1, vmin=-1, vmax=1, linscale=1)
fig = plt.figure()
cbar = mcolorbar.ColorbarBase(fig.add_subplot(111), norm=norm)
plt.close(fig)
def _inverse_tester(norm_instance, vals):
"""
Checks if the inverse of the given normalization is working.
"""
assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals)
def _scalar_tester(norm_instance, vals):
"""
Checks if scalars and arrays are handled the same way.
Tests only for float.
"""
scalar_result = [norm_instance(float(v)) for v in vals]
assert_array_almost_equal(scalar_result, norm_instance(vals))
def _mask_tester(norm_instance, vals):
"""
Checks mask handling
"""
masked_array = np.ma.array(vals)
masked_array[0] = np.ma.masked
assert_array_equal(masked_array.mask, norm_instance(masked_array).mask)
@image_comparison(baseline_images=['levels_and_colors'],
extensions=['png'])
def test_cmap_and_norm_from_levels_and_colors():
data = np.linspace(-2, 4, 49).reshape(7, 7)
levels = [-1, 2, 2.5, 3]
colors = ['red', 'green', 'blue', 'yellow', 'black']
extend = 'both'
cmap, norm = mcolors.from_levels_and_colors(levels, colors, extend=extend)
ax = plt.axes()
m = plt.pcolormesh(data, cmap=cmap, norm=norm)
plt.colorbar(m)
# Hide the axes labels (but not the colorbar ones, as they are useful)
for lab in ax.get_xticklabels() + ax.get_yticklabels():
lab.set_visible(False)
def test_cmap_and_norm_from_levels_and_colors2():
levels = [-1, 2, 2.5, 3]
colors = ['red', (0, 1, 0), 'blue', (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)]
clr = mcolors.to_rgba_array(colors)
bad = (0.1, 0.1, 0.1, 0.1)
no_color = (0.0, 0.0, 0.0, 0.0)
masked_value = 'masked_value'
# Define the test values which are of interest.
# Note: levels are lev[i] <= v < lev[i+1]
tests = [('both', None, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: clr[4],
3.5: clr[4],
masked_value: bad}),
('min', -1, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: no_color,
3.5: no_color,
masked_value: bad}),
('max', -1, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: clr[3],
3.5: clr[3],
masked_value: bad}),
('neither', -2, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: no_color,
3.5: no_color,
masked_value: bad}),
]
for extend, i1, cases in tests:
cmap, norm = mcolors.from_levels_and_colors(levels, colors[0:i1],
extend=extend)
cmap.set_bad(bad)
for d_val, expected_color in cases.items():
if d_val == masked_value:
d_val = np.ma.array([1], mask=True)
else:
d_val = [d_val]
assert_array_equal(expected_color, cmap(norm(d_val))[0],
'Wih extend={0!r} and data '
'value={1!r}'.format(extend, d_val))
assert_raises(ValueError, mcolors.from_levels_and_colors, levels, colors)
def test_rgb_hsv_round_trip():
for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
np.random.seed(0)
tt = np.random.random(a_shape)
assert_array_almost_equal(tt,
mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
assert_array_almost_equal(tt,
mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
@cleanup
def test_autoscale_masked():
# Test for #2336. Previously fully masked data would trigger a ValueError.
data = np.ma.masked_all((12, 20))
plt.pcolor(data)
plt.draw()
def test_colors_no_float():
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
def gray_from_float_rgba():
return mcolors.to_rgba(0.4)
assert_raises(ValueError, gray_from_float_rgba)
@image_comparison(baseline_images=['light_source_shading_topo'],
extensions=['png'])
def test_light_source_topo_surface():
"""Shades a DEM using different v.e.'s and blend modes."""
fname = cbook.get_sample_data('jacksboro_fault_dem.npz', asfileobj=False)
dem = np.load(fname)
elev = dem['elevation']
# Get the true cellsize in meters for accurate vertical exaggeration
# Convert from decimal degrees to meters
dx, dy = dem['dx'], dem['dy']
dx = 111320.0 * dx * np.cos(dem['ymin'])
dy = 111320.0 * dy
dem.close()
ls = mcolors.LightSource(315, 45)
cmap = cm.gist_earth
fig, axes = plt.subplots(nrows=3, ncols=3)
for row, mode in zip(axes, ['hsv', 'overlay', 'soft']):
for ax, ve in zip(row, [0.1, 1, 10]):
rgb = ls.shade(elev, cmap, vert_exag=ve, dx=dx, dy=dy,
blend_mode=mode)
ax.imshow(rgb)
ax.set(xticks=[], yticks=[])
def test_light_source_shading_default():
"""Array comparison test for the default "hsv" blend mode. Ensure the
default result doesn't change without warning."""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
cmap = plt.cm.copper
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for for more compact display...
expect = np.array(
[[[0.00, 0.45, 0.90, 0.90, 0.82, 0.62, 0.28, 0.00],
[0.45, 0.94, 0.99, 1.00, 1.00, 0.96, 0.65, 0.17],
[0.90, 0.99, 1.00, 1.00, 1.00, 1.00, 0.94, 0.35],
[0.90, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.49],
[0.82, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 0.41],
[0.62, 0.96, 1.00, 1.00, 1.00, 1.00, 0.90, 0.07],
[0.28, 0.65, 0.94, 1.00, 1.00, 0.90, 0.35, 0.01],
[0.00, 0.17, 0.35, 0.49, 0.41, 0.07, 0.01, 0.00]],
[[0.00, 0.28, 0.59, 0.72, 0.62, 0.40, 0.18, 0.00],
[0.28, 0.78, 0.93, 0.92, 0.83, 0.66, 0.39, 0.11],
[0.59, 0.93, 0.99, 1.00, 0.92, 0.75, 0.50, 0.21],
[0.72, 0.92, 1.00, 0.99, 0.93, 0.76, 0.51, 0.18],
[0.62, 0.83, 0.92, 0.93, 0.87, 0.68, 0.42, 0.08],
[0.40, 0.66, 0.75, 0.76, 0.68, 0.52, 0.23, 0.02],
[0.18, 0.39, 0.50, 0.51, 0.42, 0.23, 0.00, 0.00],
[0.00, 0.11, 0.21, 0.18, 0.08, 0.02, 0.00, 0.00]],
[[0.00, 0.18, 0.38, 0.46, 0.39, 0.26, 0.11, 0.00],
[0.18, 0.50, 0.70, 0.75, 0.64, 0.44, 0.25, 0.07],
[0.38, 0.70, 0.91, 0.98, 0.81, 0.51, 0.29, 0.13],
[0.46, 0.75, 0.98, 0.96, 0.84, 0.48, 0.22, 0.12],
[0.39, 0.64, 0.81, 0.84, 0.71, 0.31, 0.11, 0.05],
[0.26, 0.44, 0.51, 0.48, 0.31, 0.10, 0.03, 0.01],
[0.11, 0.25, 0.29, 0.22, 0.11, 0.03, 0.00, 0.00],
[0.00, 0.07, 0.13, 0.12, 0.05, 0.01, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]]
]).T
if (V(np.__version__) == V('1.9.0')):
# Numpy 1.9.0 uses a 2. order algorithm on the edges by default
# This was changed back again in 1.9.1
expect = expect[1:-1, 1:-1, :]
rgb = rgb[1:-1, 1:-1, :]
assert_array_almost_equal(rgb, expect, decimal=2)
@knownfailureif((V(np.__version__) <= V('1.9.0')
and V(np.__version__) >= V('1.7.0')))
# Numpy 1.9.1 fixed a bug in masked arrays which resulted in
# additional elements being masked when calculating the gradient thus
# the output is different with earlier numpy versions.
def test_light_source_masked_shading():
"""Array comparison test for a surface with a masked portion. Ensures that
we don't wind up with "fringes" of odd colors around masked regions."""
y, x = np.mgrid[-1.2:1.2:8j, -1.2:1.2:8j]
z = 10 * np.cos(x**2 + y**2)
z = np.ma.masked_greater(z, 9.9)
cmap = plt.cm.copper
ls = mcolors.LightSource(315, 45)
rgb = ls.shade(z, cmap)
# Result stored transposed and rounded for for more compact display...
expect = np.array(
[[[0.00, 0.46, 0.91, 0.91, 0.84, 0.64, 0.29, 0.00],
[0.46, 0.96, 1.00, 1.00, 1.00, 0.97, 0.67, 0.18],
[0.91, 1.00, 1.00, 1.00, 1.00, 1.00, 0.96, 0.36],
[0.91, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.51],
[0.84, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 0.44],
[0.64, 0.97, 1.00, 1.00, 1.00, 1.00, 0.94, 0.09],
[0.29, 0.67, 0.96, 1.00, 1.00, 0.94, 0.38, 0.01],
[0.00, 0.18, 0.36, 0.51, 0.44, 0.09, 0.01, 0.00]],
[[0.00, 0.29, 0.61, 0.75, 0.64, 0.41, 0.18, 0.00],
[0.29, 0.81, 0.95, 0.93, 0.85, 0.68, 0.40, 0.11],
[0.61, 0.95, 1.00, 0.78, 0.78, 0.77, 0.52, 0.22],
[0.75, 0.93, 0.78, 0.00, 0.00, 0.78, 0.54, 0.19],
[0.64, 0.85, 0.78, 0.00, 0.00, 0.78, 0.45, 0.08],
[0.41, 0.68, 0.77, 0.78, 0.78, 0.55, 0.25, 0.02],
[0.18, 0.40, 0.52, 0.54, 0.45, 0.25, 0.00, 0.00],
[0.00, 0.11, 0.22, 0.19, 0.08, 0.02, 0.00, 0.00]],
[[0.00, 0.19, 0.39, 0.48, 0.41, 0.26, 0.12, 0.00],
[0.19, 0.52, 0.73, 0.78, 0.66, 0.46, 0.26, 0.07],
[0.39, 0.73, 0.95, 0.50, 0.50, 0.53, 0.30, 0.14],
[0.48, 0.78, 0.50, 0.00, 0.00, 0.50, 0.23, 0.12],
[0.41, 0.66, 0.50, 0.00, 0.00, 0.50, 0.11, 0.05],
[0.26, 0.46, 0.53, 0.50, 0.50, 0.11, 0.03, 0.01],
[0.12, 0.26, 0.30, 0.23, 0.11, 0.03, 0.00, 0.00],
[0.00, 0.07, 0.14, 0.12, 0.05, 0.01, 0.00, 0.00]],
[[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 0.00, 0.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00]],
]).T
assert_array_almost_equal(rgb, expect, decimal=2)
def test_light_source_hillshading():
"""Compare the current hillshading method against one that should be
mathematically equivalent. Illuminates a cone from a range of angles."""
def alternative_hillshade(azimuth, elev, z):
illum = _sph2cart(*_azimuth2math(azimuth, elev))
illum = np.array(illum)
dy, dx = np.gradient(-z)
dy = -dy
dz = np.ones_like(dy)
normals = np.dstack([dx, dy, dz])
dividers = np.zeros_like(z)[..., None]
for i, mat in enumerate(normals):
for j, vec in enumerate(mat):
dividers[i, j, 0] = np.linalg.norm(vec)
normals /= dividers
# once we drop support for numpy 1.7.x the above can be written as
# normals /= np.linalg.norm(normals, axis=2)[..., None]
# aviding the double loop.
intensity = np.tensordot(normals, illum, axes=(2, 0))
intensity -= intensity.min()
intensity /= intensity.ptp()
return intensity
y, x = np.mgrid[5:0:-1, :5]
z = -np.hypot(x - x.mean(), y - y.mean())
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
h1 = ls.hillshade(z)
h2 = alternative_hillshade(az, elev, z)
assert_array_almost_equal(h1, h2)
def test_light_source_planar_hillshading():
"""Ensure that the illumination intensity is correct for planar
surfaces."""
def plane(azimuth, elevation, x, y):
"""Create a plane whose normal vector is at the given azimuth and
elevation."""
theta, phi = _azimuth2math(azimuth, elevation)
a, b, c = _sph2cart(theta, phi)
z = -(a*x + b*y) / c
return z
def angled_plane(azimuth, elevation, angle, x, y):
"""Create a plane whose normal vector is at an angle from the given
azimuth and elevation."""
elevation = elevation + angle
if elevation > 90:
azimuth = (azimuth + 180) % 360
elevation = (90 - elevation) % 90
return plane(azimuth, elevation, x, y)
y, x = np.mgrid[5:0:-1, :5]
for az, elev in itertools.product(range(0, 390, 30), range(0, 105, 15)):
ls = mcolors.LightSource(az, elev)
# Make a plane at a range of angles to the illumination
for angle in range(0, 105, 15):
z = angled_plane(az, elev, angle, x, y)
h = ls.hillshade(z)
assert_array_almost_equal(h, np.cos(np.radians(angle)))
def test_color_names():
assert mcolors.to_hex("blue") == "#0000ff"
assert mcolors.to_hex("xkcd:blue") == "#0343df"
assert mcolors.to_hex("tab:blue") == "#1f77b4"
def _sph2cart(theta, phi):
x = np.cos(theta) * np.sin(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(phi)
return x, y, z
def _azimuth2math(azimuth, elevation):
"""Converts from clockwise-from-north and up-from-horizontal to
mathematical conventions."""
theta = np.radians((90 - azimuth) % 360)
phi = np.radians(90 - elevation)
return theta, phi
def test_pandas_iterable():
try:
import pandas as pd
except ImportError:
raise SkipTest("Pandas not installed")
if assert_sequence_equal is None:
raise SkipTest("nose lacks required function")
# Using a list or series yields equivalent
# color maps, i.e the series isn't seen as
# a single color
lst = ['red', 'blue', 'green']
s = pd.Series(lst)
cm1 = mcolors.ListedColormap(lst, N=5)
cm2 = mcolors.ListedColormap(s, N=5)
assert_sequence_equal(cm1.colors, cm2.colors)
@cleanup
def test_cn():
matplotlib.rcParams['axes.prop_cycle'] = cycler('color',
['blue', 'r'])
assert mcolors.to_hex("C0") == '#0000ff'
assert mcolors.to_hex("C1") == '#ff0000'
matplotlib.rcParams['axes.prop_cycle'] = cycler('color',
['xkcd:blue', 'r'])
assert mcolors.to_hex("C0") == '#0343df'
assert mcolors.to_hex("C1") == '#ff0000'
matplotlib.rcParams['axes.prop_cycle'] = cycler('color', ['8e4585', 'r'])
assert mcolors.to_hex("C0") == '#8e4585'
# if '8e4585' gets parsed as a float before it gets detected as a hex
# colour it will be interpreted as a very large number.
# this mustn't happen.
assert mcolors.to_rgb("C0")[0] != np.inf
def test_conversions():
# to_rgba_array("none") returns a (0, 4) array.
assert_array_equal(mcolors.to_rgba_array("none"), np.zeros((0, 4)))
# alpha is properly set.
assert_equal(mcolors.to_rgba((1, 1, 1), .5), (1, 1, 1, .5))
assert_equal(mcolors.to_rgba(".1", .5), (.1, .1, .1, .5))
# builtin round differs between py2 and py3.
assert_equal(mcolors.to_hex((.7, .7, .7)), "#b2b2b2")
# hex roundtrip.
hex_color = "#1234abcd"
assert_equal(mcolors.to_hex(mcolors.to_rgba(hex_color), keep_alpha=True),
hex_color)
def test_grey_gray():
color_mapping = mcolors._colors_full_map
for k in color_mapping.keys():
if 'grey' in k:
assert color_mapping[k] == color_mapping[k.replace('grey', 'gray')]
if 'gray' in k:
assert color_mapping[k] == color_mapping[k.replace('gray', 'grey')]
def test_tableau_order():
dflt_cycle = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
'#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
'#bcbd22', '#17becf']
assert list(mcolors.TABLEAU_COLORS.values()) == dflt_cycle
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
gpl-3.0
|
ssh0/growing-string
|
triangular_lattice/span_fitting.py
|
1
|
3308
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2017-01-23
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import SpanSelector
class SpanFitting:
"""Fitting (x, y) data interactively"""
def __init__(self, ax, x, y, optimizer, parameters,
attacth_text=True, attacth_label=False):
"""
=== Arguments ===
ax: matplotlib.Axes
x, y: raw data (1-d array)
optimizer: Optimizer class
--- self.func(parameters, x, y): calc residual(= y - f(x))
--- self.fitting(): return fitted parameters
--- self.fitted(x): return f(x) with fitted patramere
parameters: (list) used for optimizer
attacth_text: (bool) fitted paramter will be shown near the fitted curve
attacth_label: (bool) fitted paramter will be shown in a legend
"""
self.ax = ax
xscale = self.ax.get_xscale()
yscale = self.ax.get_yscale()
if yscale == 'linear':
if xscale == 'linear':
self.plot = self.ax.plot
elif xscale == 'log':
self.plot = self.ax.semilogx
elif yscale == 'log':
if xscale == 'linear':
self.plot = self.ax.semilogy
elif xscale == 'log':
self.plot = self.ax.loglog
self.x, self.y = x, y
self.optimizer = optimizer
self.parameters = parameters
self.attach_text = attacth_text
self.attach_label = attacth_label
self.ln = None
def onselect(self, vmin, vmax):
if self.ln is not None:
self.ln.remove()
if self.attach_text:
self.text.remove()
selected_index = np.where((self.x >= vmin) & (self.x <= vmax))
optimizer = self.optimizer(
args=(self.x[selected_index], self.y[selected_index]),
parameters=self.parameters)
result = optimizer.fitting()
result_text = ', '.join(['{} = {}'.format(k, v)
for k, v in result.items()])
print(result_text)
X = self.x[selected_index]
Y = optimizer.fitted(X)
if self.attach_label:
self.ln, = self.plot(X, Y, ls='-', label=result_text,
marker='', color='k')
self.ax.legend(loc='best')
else:
self.ln, = self.plot(X, Y, ls='-', marker='', color='k')
if self.attach_text:
self.text = self.ax.text(
(X[0] + X[-1]) / 2., (Y[0] + Y[-1]) / 2.,
result_text, ha='center', va='bottom'
)
def start(self):
self.ax.plot(self.x, self.y, '.')
span = SpanSelector(self.ax, self.onselect, direction='horizontal')
plt.show()
if __name__ == '__main__':
import matplotlib.pyplot as plt
from optimize import Optimize_powerlaw, Optimize_linear
x = np.logspace(1, 10, base=2.)
y = x ** (2 + 0.1 * (np.random.rand() - 0.5))
fig, ax = plt.subplots()
ax.set_title('test')
ax.set_xscale('log')
ax.set_yscale('log')
spanfitting = SpanFitting(ax, x, y, Optimize_powerlaw, [0.5, 2])
spanfitting.start()
|
mit
|
ZhukovGreen/UMLND
|
finding_donors/visuals.py
|
1
|
5075
|
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
def distribution(data, transformed=False):
"""
Visualization code for displaying skewed distributions of features
"""
# Create figure
fig = plt.figure(figsize=(11, 5))
# Skewed feature plotting
for i, feature in enumerate(['capital-gain', 'capital-loss']):
ax = fig.add_subplot(1, 2, i + 1)
ax.hist(data[feature], bins=25, color='#00A0A0')
ax.set_title("'%s' Feature Distribution" % feature, fontsize=14)
ax.set_xlabel("Value")
ax.set_ylabel("Number of Records")
ax.set_ylim((0, 2000))
ax.set_yticks([0, 500, 1000, 1500, 2000])
ax.set_yticklabels([0, 500, 1000, 1500, ">2000"])
# Plot aesthetics
if transformed:
fig.suptitle(
"Log-transformed Distributions of Continuous Census Data Features", \
fontsize=16, y=1.03)
else:
fig.suptitle("Skewed Distributions of Continuous Census Data Features", \
fontsize=16, y=1.03)
fig.tight_layout()
plt.show()
def evaluate(results, accuracy, f1):
"""
Visualization code to display results of various learners.
inputs:
- learners: a list of supervised learners
- stats: a list of dictionaries of the statistic results from 'train_predict()'
- accuracy: The score for the naive predictor
- f1: The score for the naive predictor
"""
# Create figure
fig, ax = plt.subplots(2, 3, figsize=(11, 7))
# Constants
bar_width = 0.3
colors = ['#A00000', '#00A0A0', '#00A000']
# Super loop to plot four panels of data
for k, learner in enumerate(results.keys()):
for j, metric in enumerate(
['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test',
'f_test']):
for i in np.arange(3):
# Creative plot code
ax[int(j / 3), j % 3].bar(i + k * bar_width,
results[learner][i][metric],
width=bar_width, color=colors[k])
ax[int(j / 3), j % 3].set_xticks([0.45, 1.45, 2.45])
ax[int(j / 3), j % 3].set_xticklabels(["1%", "10%", "100%"])
ax[int(j / 3), j % 3].set_xlabel("Training Set Size")
ax[int(j / 3), j % 3].set_xlim((-0.1, 3.0))
# Add unique y-labels
ax[0, 0].set_ylabel("Time (in seconds)")
ax[0, 1].set_ylabel("Accuracy Score")
ax[0, 2].set_ylabel("F-score")
ax[1, 0].set_ylabel("Time (in seconds)")
ax[1, 1].set_ylabel("Accuracy Score")
ax[1, 2].set_ylabel("F-score")
# Add titles
ax[0, 0].set_title("Model Training")
ax[0, 1].set_title("Accuracy Score on Training Subset")
ax[0, 2].set_title("F-score on Training Subset")
ax[1, 0].set_title("Model Predicting")
ax[1, 1].set_title("Accuracy Score on Testing Set")
ax[1, 2].set_title("F-score on Testing Set")
# Add horizontal lines for naive predictors
ax[0, 1].axhline(y=accuracy, xmin=-0.1, xmax=3.0, linewidth=1, color='k',
linestyle='dashed')
ax[1, 1].axhline(y=accuracy, xmin=-0.1, xmax=3.0, linewidth=1, color='k',
linestyle='dashed')
ax[0, 2].axhline(y=f1, xmin=-0.1, xmax=3.0, linewidth=1, color='k',
linestyle='dashed')
ax[1, 2].axhline(y=f1, xmin=-0.1, xmax=3.0, linewidth=1, color='k',
linestyle='dashed')
# Set y-limits for score panels
ax[0, 1].set_ylim((0, 1))
ax[0, 2].set_ylim((0, 1))
ax[1, 1].set_ylim((0, 1))
ax[1, 2].set_ylim((0, 1))
# Create patches for the legend
patches = []
for i, learner in enumerate(results.keys()):
patches.append(mpatches.Patch(color=colors[i], label=learner))
plt.legend(handles=patches, bbox_to_anchor=(-.80, 2.53), \
loc='upper center', borderaxespad=0., ncol=3,
fontsize='x-large')
# Aesthetics
plt.suptitle("Performance Metrics for Three Supervised Learning Models",
fontsize=16, y=1.10)
plt.tight_layout()
plt.show()
def feature_plot(importances, X_train, y_train):
# Display the five most important features
indices = np.argsort(importances)[::-1]
columns = X_train.columns.values[indices[:5]]
values = importances[indices][:5]
# Creat the plot
fig = plt.figure(figsize=(9, 5))
plt.title("Normalized Weights for First Five Most Predictive Features",
fontsize=16)
plt.bar(np.arange(5), values, width=0.6, align="center", color='#00A000', \
label="Feature Weight")
plt.bar(np.arange(5) - 0.3, np.cumsum(values), width=0.2, align="center",
color='#00A0A0',
label="Cumulative Feature Weight")
plt.xticks(np.arange(5), columns)
plt.xlim((-0.5, 4.5))
plt.ylabel("Weight", fontsize=12)
plt.xlabel("Feature", fontsize=12)
plt.legend(loc='upper center')
plt.tight_layout()
plt.show()
|
gpl-3.0
|
UNR-AERIAL/scikit-learn
|
examples/covariance/plot_mahalanobis_distances.py
|
348
|
6232
|
r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
|
bsd-3-clause
|
OshynSong/scikit-learn
|
examples/model_selection/plot_roc_crossval.py
|
247
|
3253
|
"""
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.cross_validation.cross_val_score`,
:ref:`example_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.cross_validation import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(y, n_folds=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
all_tpr = []
for i, (train, test) in enumerate(cv):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Luck')
mean_tpr /= len(cv)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, 'k--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=2)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
xiaozhuchacha/OpenBottle
|
grammar_induction/earley_parser/nltk/app/__init__.py
|
5
|
1737
|
# Natural Language Toolkit: Applications package
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Edward Loper <[email protected]>
# Steven Bird <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Interactive NLTK Applications:
chartparser: Chart Parser
chunkparser: Regular-Expression Chunk Parser
collocations: Find collocations in text
concordance: Part-of-speech concordancer
nemo: Finding (and Replacing) Nemo regular expression tool
rdparser: Recursive Descent Parser
srparser: Shift-Reduce Parser
wordnet: WordNet Browser
"""
# Import Tkinter-based modules if Tkinter is installed
import nltk.compat
try:
import tkinter
except ImportError:
import warnings
warnings.warn("nltk.app package not loaded "
"(please install Tkinter library).")
else:
from nltk.app.chartparser_app import app as chartparser
from nltk.app.chunkparser_app import app as chunkparser
from nltk.app.collocations_app import app as collocations
from nltk.app.concordance_app import app as concordance
from nltk.app.nemo_app import app as nemo
from nltk.app.rdparser_app import app as rdparser
from nltk.app.srparser_app import app as srparser
from nltk.app.wordnet_app import app as wordnet
try:
from matplotlib import pylab
except ImportError:
import warnings
warnings.warn("nltk.app.wordfreq not loaded "
"(requires the matplotlib library).")
else:
from nltk.app.wordfreq_app import app as wordfreq
# skip doctests from this package
def setup_module(module):
from nose import SkipTest
raise SkipTest("nltk.app examples are not doctests")
|
mit
|
giorgiop/scikit-learn
|
sklearn/metrics/base.py
|
46
|
4627
|
"""
Common code for all metrics
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
from ..exceptions import UndefinedMetricWarning as _UndefinedMetricWarning
from ..utils import deprecated
@deprecated("UndefinedMetricWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class UndefinedMetricWarning(_UndefinedMetricWarning):
pass
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
|
bsd-3-clause
|
timtammittee/thorns
|
tests/test_map_nested.py
|
1
|
1507
|
# -*- coding: utf-8 -*-
"""Test nesting of thorns.unil.map
"""
from __future__ import division, print_function, absolute_import
from __future__ import unicode_literals
__author__ = "Marek Rudnicki"
__copyright__ = "Copyright 2014, Marek Rudnicki"
__license__ = "GPLv3+"
import tempfile
import shutil
import pytest
import numpy as np
from numpy.testing import assert_equal
import pandas as pd
from pandas.util.testing import assert_frame_equal
import thorns as th
import time
@pytest.fixture(scope="function")
def workdir(request):
workdir = tempfile.mkdtemp()
def fin():
print("Removing temp dir: {}".format(workdir))
shutil.rmtree(workdir, ignore_errors=True)
request.addfinalizer(fin)
return workdir
def square(x):
return x**2
def sum_of_squares(length, workdir):
vec = {'x': np.arange(length)}
squares = th.util.map(
square,
vec,
backend='multiprocessing', # should be inhibited to 'serial'
cache='no',
workdir=workdir
)
result = np.sum(squares.values)
return result
def test_map_nested(workdir):
lengths = {'length': [2, 3]}
actual = th.util.map(
sum_of_squares,
lengths,
backend='multiprocessing',
cache='no',
workdir=workdir,
kwargs={'workdir': workdir},
)
expected = pd.DataFrame({
'length': [2,3],
0: [1,5],
}).set_index('length')
assert_frame_equal(expected, actual)
|
gpl-3.0
|
phobson/seaborn
|
seaborn/tests/test_categorical.py
|
3
|
94050
|
import numpy as np
import pandas as pd
import scipy
from scipy import stats, spatial
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex
from distutils.version import LooseVersion
import pytest
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
from .. import categorical as cat
from .. import palettes
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
mpl_barplot_change = LooseVersion("2.0.1")
class CategoricalFixture(object):
"""Test boxplot (also base class for things like violinplots)."""
rs = np.random.RandomState(30)
n_total = 60
x = rs.randn(int(n_total / 3), 3)
x_df = pd.DataFrame(x, columns=pd.Series(list("XYZ"), name="big"))
y = pd.Series(rs.randn(n_total), name="y_data")
g = pd.Series(np.repeat(list("abc"), int(n_total / 3)), name="small")
h = pd.Series(np.tile(list("mn"), int(n_total / 2)), name="medium")
u = pd.Series(np.tile(list("jkh"), int(n_total / 3)))
df = pd.DataFrame(dict(y=y, g=g, h=h, u=u))
x_df["W"] = g
class TestCategoricalPlotter(CategoricalFixture):
def test_wide_df_data(self):
p = cat._CategoricalPlotter()
# Test basic wide DataFrame
p.establish_variables(data=self.x_df)
# Check data attribute
for x, y, in zip(p.plot_data, self.x_df[["X", "Y", "Z"]].values.T):
npt.assert_array_equal(x, y)
# Check semantic attributes
nt.assert_equal(p.orient, "v")
nt.assert_is(p.plot_hues, None)
nt.assert_is(p.group_label, "big")
nt.assert_is(p.value_label, None)
# Test wide dataframe with forced horizontal orientation
p.establish_variables(data=self.x_df, orient="horiz")
nt.assert_equal(p.orient, "h")
# Text exception by trying to hue-group with a wide dataframe
with nt.assert_raises(ValueError):
p.establish_variables(hue="d", data=self.x_df)
def test_1d_input_data(self):
p = cat._CategoricalPlotter()
# Test basic vector data
x_1d_array = self.x.ravel()
p.establish_variables(data=x_1d_array)
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test basic vector data in list form
x_1d_list = x_1d_array.tolist()
p.establish_variables(data=x_1d_list)
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test an object array that looks 1D but isn't
x_notreally_1d = np.array([self.x.ravel(),
self.x.ravel()[:int(self.n_total / 2)]])
p.establish_variables(data=x_notreally_1d)
nt.assert_equal(len(p.plot_data), 2)
nt.assert_equal(len(p.plot_data[0]), self.n_total)
nt.assert_equal(len(p.plot_data[1]), self.n_total / 2)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_2d_input_data(self):
p = cat._CategoricalPlotter()
x = self.x[:, 0]
# Test vector data that looks 2D but doesn't really have columns
p.establish_variables(data=x[:, np.newaxis])
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.x.shape[0])
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
# Test vector data that looks 2D but doesn't really have rows
p.establish_variables(data=x[np.newaxis, :])
nt.assert_equal(len(p.plot_data), 1)
nt.assert_equal(len(p.plot_data[0]), self.x.shape[0])
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_3d_input_data(self):
p = cat._CategoricalPlotter()
# Test that passing actually 3D data raises
x = np.zeros((5, 5, 5))
with nt.assert_raises(ValueError):
p.establish_variables(data=x)
def test_list_of_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in list form
x_list = self.x.T.tolist()
p.establish_variables(data=x_list)
nt.assert_equal(len(p.plot_data), 3)
lengths = [len(v_i) for v_i in p.plot_data]
nt.assert_equal(lengths, [self.n_total / 3] * 3)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_wide_array_input_data(self):
p = cat._CategoricalPlotter()
# Test 2D input in array form
p.establish_variables(data=self.x)
nt.assert_equal(np.shape(p.plot_data), (3, self.n_total / 3))
npt.assert_array_equal(p.plot_data, self.x.T)
nt.assert_is(p.group_label, None)
nt.assert_is(p.value_label, None)
def test_single_long_direct_inputs(self):
p = cat._CategoricalPlotter()
# Test passing a series to the x variable
p.establish_variables(x=self.y)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y_data")
nt.assert_is(p.group_label, None)
# Test passing a series to the y variable
p.establish_variables(y=self.y)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y_data")
nt.assert_is(p.group_label, None)
# Test passing an array to the y variable
p.establish_variables(y=self.y.values)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_is(p.value_label, None)
nt.assert_is(p.group_label, None)
def test_single_long_indirect_inputs(self):
p = cat._CategoricalPlotter()
# Test referencing a DataFrame series in the x variable
p.establish_variables(x="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y")
nt.assert_is(p.group_label, None)
# Test referencing a DataFrame series in the y variable
p.establish_variables(y="y", data=self.df)
npt.assert_equal(p.plot_data, [self.y])
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y")
nt.assert_is(p.group_label, None)
def test_longform_groupby(self):
p = cat._CategoricalPlotter()
# Test a vertically oriented grouped and nested plot
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(len(p.plot_data), 3)
nt.assert_equal(len(p.plot_hues), 3)
nt.assert_equal(p.orient, "v")
nt.assert_equal(p.value_label, "y")
nt.assert_equal(p.group_label, "g")
nt.assert_equal(p.hue_title, "h")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test a grouped and nested plot with direct array value data
p.establish_variables("g", self.y.values, "h", self.df)
nt.assert_is(p.value_label, None)
nt.assert_equal(p.group_label, "g")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test a grouped and nested plot with direct array hue data
p.establish_variables("g", "y", self.h.values, self.df)
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
# Test categorical grouping data
if pandas_has_categoricals:
df = self.df.copy()
df.g = df.g.astype("category")
# Test that horizontal orientation is automatically detected
p.establish_variables("y", "g", "h", data=df)
nt.assert_equal(len(p.plot_data), 3)
nt.assert_equal(len(p.plot_hues), 3)
nt.assert_equal(p.orient, "h")
nt.assert_equal(p.value_label, "y")
nt.assert_equal(p.group_label, "g")
nt.assert_equal(p.hue_title, "h")
for group, vals in zip(["a", "b", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
for group, hues in zip(["a", "b", "c"], p.plot_hues):
npt.assert_array_equal(hues, self.h[self.g == group])
def test_input_validation(self):
p = cat._CategoricalPlotter()
kws = dict(x="g", y="y", hue="h", units="u", data=self.df)
for input in ["x", "y", "hue", "units"]:
input_kws = kws.copy()
input_kws[input] = "bad_input"
with nt.assert_raises(ValueError):
p.establish_variables(**input_kws)
def test_order(self):
p = cat._CategoricalPlotter()
# Test inferred order from a wide dataframe input
p.establish_variables(data=self.x_df)
nt.assert_equal(p.group_names, ["X", "Y", "Z"])
# Test specified order with a wide dataframe input
p.establish_variables(data=self.x_df, order=["Y", "Z", "X"])
nt.assert_equal(p.group_names, ["Y", "Z", "X"])
for group, vals in zip(["Y", "Z", "X"], p.plot_data):
npt.assert_array_equal(vals, self.x_df[group])
with nt.assert_raises(ValueError):
p.establish_variables(data=self.x, order=[1, 2, 0])
# Test inferred order from a grouped longform input
p.establish_variables("g", "y", data=self.df)
nt.assert_equal(p.group_names, ["a", "b", "c"])
# Test specified order from a grouped longform input
p.establish_variables("g", "y", data=self.df, order=["b", "a", "c"])
nt.assert_equal(p.group_names, ["b", "a", "c"])
for group, vals in zip(["b", "a", "c"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
# Test inferred order from a grouped input with categorical groups
if pandas_has_categoricals:
df = self.df.copy()
df.g = df.g.astype("category")
df.g = df.g.cat.reorder_categories(["c", "b", "a"])
p.establish_variables("g", "y", data=df)
nt.assert_equal(p.group_names, ["c", "b", "a"])
for group, vals in zip(["c", "b", "a"], p.plot_data):
npt.assert_array_equal(vals, self.y[self.g == group])
df.g = (df.g.cat.add_categories("d")
.cat.reorder_categories(["c", "b", "d", "a"]))
p.establish_variables("g", "y", data=df)
nt.assert_equal(p.group_names, ["c", "b", "d", "a"])
def test_hue_order(self):
p = cat._CategoricalPlotter()
# Test inferred hue order
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.hue_names, ["m", "n"])
# Test specified hue order
p.establish_variables("g", "y", "h", data=self.df,
hue_order=["n", "m"])
nt.assert_equal(p.hue_names, ["n", "m"])
# Test inferred hue order from a categorical hue input
if pandas_has_categoricals:
df = self.df.copy()
df.h = df.h.astype("category")
df.h = df.h.cat.reorder_categories(["n", "m"])
p.establish_variables("g", "y", "h", data=df)
nt.assert_equal(p.hue_names, ["n", "m"])
df.h = (df.h.cat.add_categories("o")
.cat.reorder_categories(["o", "m", "n"]))
p.establish_variables("g", "y", "h", data=df)
nt.assert_equal(p.hue_names, ["o", "m", "n"])
def test_plot_units(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_is(p.plot_units, None)
p.establish_variables("g", "y", "h", data=self.df, units="u")
for group, units in zip(["a", "b", "c"], p.plot_units):
npt.assert_array_equal(units, self.u[self.g == group])
def test_infer_orient(self):
p = cat._CategoricalPlotter()
cats = pd.Series(["a", "b", "c"] * 10)
nums = pd.Series(self.rs.randn(30))
nt.assert_equal(p.infer_orient(cats, nums), "v")
nt.assert_equal(p.infer_orient(nums, cats), "h")
nt.assert_equal(p.infer_orient(nums, None), "h")
nt.assert_equal(p.infer_orient(None, nums), "v")
nt.assert_equal(p.infer_orient(nums, nums, "vert"), "v")
nt.assert_equal(p.infer_orient(nums, nums, "hori"), "h")
with nt.assert_raises(ValueError):
p.infer_orient(cats, cats)
if pandas_has_categoricals:
cats = pd.Series([0, 1, 2] * 10, dtype="category")
nt.assert_equal(p.infer_orient(cats, nums), "v")
nt.assert_equal(p.infer_orient(nums, cats), "h")
with nt.assert_raises(ValueError):
p.infer_orient(cats, cats)
def test_default_palettes(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
nt.assert_equal(p.colors, palettes.color_palette(n_colors=3))
# Test palette mapping the hue position
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors(None, None, 1)
nt.assert_equal(p.colors, palettes.color_palette(n_colors=2))
def test_default_palette_with_many_levels(self):
with palettes.color_palette(["blue", "red"], 2):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, None, 1)
npt.assert_array_equal(p.colors,
palettes.husl_palette(3, l=.7)) # noqa
def test_specific_color(self):
p = cat._CategoricalPlotter()
# Test the same color for each x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", None, 1)
blue_rgb = mpl.colors.colorConverter.to_rgb("blue")
nt.assert_equal(p.colors, [blue_rgb] * 3)
# Test a color-based blend for the hue mapping
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors("#ff0022", None, 1)
rgba_array = np.array(palettes.light_palette("#ff0022", 2))
npt.assert_array_almost_equal(p.colors,
rgba_array[:, :3])
def test_specific_palette(self):
p = cat._CategoricalPlotter()
# Test palette mapping the x position
p.establish_variables("g", "y", data=self.df)
p.establish_colors(None, "dark", 1)
nt.assert_equal(p.colors, palettes.color_palette("dark", 3))
# Test that non-None `color` and `hue` raises an error
p.establish_variables("g", "y", "h", data=self.df)
p.establish_colors(None, "muted", 1)
nt.assert_equal(p.colors, palettes.color_palette("muted", 2))
# Test that specified palette overrides specified color
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors("blue", "deep", 1)
nt.assert_equal(p.colors, palettes.color_palette("deep", 3))
def test_dict_as_palette(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", "h", data=self.df)
pal = {"m": (0, 0, 1), "n": (1, 0, 0)}
p.establish_colors(None, pal, 1)
nt.assert_equal(p.colors, [(0, 0, 1), (1, 0, 0)])
def test_palette_desaturation(self):
p = cat._CategoricalPlotter()
p.establish_variables("g", "y", data=self.df)
p.establish_colors((0, 0, 1), None, .5)
nt.assert_equal(p.colors, [(.25, .25, .75)] * 3)
p.establish_colors(None, [(0, 0, 1), (1, 0, 0), "w"], .5)
nt.assert_equal(p.colors, [(.25, .25, .75),
(.75, .25, .25),
(1, 1, 1)])
class TestCategoricalStatPlotter(CategoricalFixture):
def test_no_bootstrappig(self):
p = cat._CategoricalStatPlotter()
p.establish_variables("g", "y", data=self.df)
p.estimate_statistic(np.mean, None, 100)
npt.assert_array_equal(p.confint, np.array([]))
p.establish_variables("g", "y", "h", data=self.df)
p.estimate_statistic(np.mean, None, 100)
npt.assert_array_equal(p.confint, np.array([[], [], []]))
def test_single_layer_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000)
nt.assert_equal(p.statistic.shape, (3,))
nt.assert_equal(p.confint.shape, (3, 2))
npt.assert_array_almost_equal(p.statistic,
y.groupby(g).mean())
for ci, (_, grp_y) in zip(p.confint, y.groupby(g)):
sem = stats.sem(grp_y)
mean = grp_y.mean()
stats.norm.ppf(.975)
half_ci = stats.norm.ppf(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_single_layer_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
y = pd.Series(np.random.RandomState(0).randn(270))
u = pd.Series(np.repeat(np.tile(list("xyz"), 30), 3))
y[u == "x"] -= 3
y[u == "y"] += 3
p.establish_variables(g, y)
p.estimate_statistic(np.mean, 95, 10000)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, units=u)
p.estimate_statistic(np.mean, 95, 10000)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 1] - ci1[:, 0]
ci2_size = ci2[:, 1] - ci2[:, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_single_layer_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, order=list("abdc"))
p.estimate_statistic(np.mean, 95, 10000)
nt.assert_equal(p.statistic.shape, (4,))
nt.assert_equal(p.confint.shape, (4, 2))
mean = y[g == "b"].mean()
sem = stats.sem(y[g == "b"])
half_ci = stats.norm.ppf(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1], mean)
npt.assert_array_almost_equal(p.confint[1], ci, 2)
npt.assert_equal(p.statistic[2], np.nan)
npt.assert_array_equal(p.confint[2], (np.nan, np.nan))
def test_nested_stats(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 50000)
nt.assert_equal(p.statistic.shape, (3, 2))
nt.assert_equal(p.confint.shape, (3, 2, 2))
npt.assert_array_almost_equal(p.statistic,
y.groupby([g, h]).mean().unstack())
for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
sem = stats.sem(hue_y)
mean = hue_y.mean()
half_ci = stats.norm.ppf(.975) * sem
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_nested_stats_with_units(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 90))
h = pd.Series(np.tile(list("xy"), 135))
u = pd.Series(np.repeat(list("ijkijk"), 45))
y = pd.Series(np.random.RandomState(0).randn(270))
y[u == "i"] -= 3
y[u == "k"] += 3
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, 95, 10000)
stat1, ci1 = p.statistic, p.confint
p.establish_variables(g, y, h, units=u)
p.estimate_statistic(np.mean, 95, 10000)
stat2, ci2 = p.statistic, p.confint
npt.assert_array_equal(stat1, stat2)
ci1_size = ci1[:, 0, 1] - ci1[:, 0, 0]
ci2_size = ci2[:, 0, 1] - ci2[:, 0, 0]
npt.assert_array_less(ci1_size, ci2_size)
def test_nested_stats_with_missing_data(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
h = pd.Series(np.tile(list("xy"), 150))
p.establish_variables(g, y, h,
order=list("abdc"),
hue_order=list("zyx"))
p.estimate_statistic(np.mean, 95, 50000)
nt.assert_equal(p.statistic.shape, (4, 3))
nt.assert_equal(p.confint.shape, (4, 3, 2))
mean = y[(g == "b") & (h == "x")].mean()
sem = stats.sem(y[(g == "b") & (h == "x")])
half_ci = stats.norm.ppf(.975) * sem
ci = mean - half_ci, mean + half_ci
npt.assert_almost_equal(p.statistic[1, 2], mean)
npt.assert_array_almost_equal(p.confint[1, 2], ci, 2)
npt.assert_array_equal(p.statistic[:, 0], [np.nan] * 4)
npt.assert_array_equal(p.statistic[2], [np.nan] * 3)
npt.assert_array_equal(p.confint[:, 0],
np.zeros((4, 2)) * np.nan)
npt.assert_array_equal(p.confint[2],
np.zeros((3, 2)) * np.nan)
def test_sd_error_bars(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y)
p.estimate_statistic(np.mean, "sd", None)
nt.assert_equal(p.statistic.shape, (3,))
nt.assert_equal(p.confint.shape, (3, 2))
npt.assert_array_almost_equal(p.statistic,
y.groupby(g).mean())
for ci, (_, grp_y) in zip(p.confint, y.groupby(g)):
mean = grp_y.mean()
half_ci = np.std(grp_y)
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_nested_sd_error_bars(self):
p = cat._CategoricalStatPlotter()
g = pd.Series(np.repeat(list("abc"), 100))
h = pd.Series(np.tile(list("xy"), 150))
y = pd.Series(np.random.RandomState(0).randn(300))
p.establish_variables(g, y, h)
p.estimate_statistic(np.mean, "sd", None)
nt.assert_equal(p.statistic.shape, (3, 2))
nt.assert_equal(p.confint.shape, (3, 2, 2))
npt.assert_array_almost_equal(p.statistic,
y.groupby([g, h]).mean().unstack())
for ci_g, (_, grp_y) in zip(p.confint, y.groupby(g)):
for ci, hue_y in zip(ci_g, [grp_y[::2], grp_y[1::2]]):
mean = hue_y.mean()
half_ci = np.std(hue_y)
ci_want = mean - half_ci, mean + half_ci
npt.assert_array_almost_equal(ci_want, ci, 2)
def test_draw_cis(self):
p = cat._CategoricalStatPlotter()
# Test vertical CIs
p.orient = "v"
f, ax = plt.subplots()
at_group = [0, 1]
confints = [(.5, 1.5), (.25, .8)]
colors = [".2", ".3"]
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, [at, at])
npt.assert_array_equal(y, ci)
nt.assert_equal(line.get_color(), c)
plt.close("all")
# Test horizontal CIs
p.orient = "h"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors)
lines = ax.lines
for line, at, ci, c in zip(lines, at_group, confints, colors):
x, y = line.get_xydata().T
npt.assert_array_equal(x, ci)
npt.assert_array_equal(y, [at, at])
nt.assert_equal(line.get_color(), c)
plt.close("all")
# Test vertical CIs with endcaps
p.orient = "v"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, capsize=0.3)
capline = ax.lines[len(ax.lines) - 1]
caplinestart = capline.get_xdata()[0]
caplineend = capline.get_xdata()[1]
caplinelength = abs(caplineend - caplinestart)
nt.assert_almost_equal(caplinelength, 0.3)
nt.assert_equal(len(ax.lines), 6)
plt.close("all")
# Test horizontal CIs with endcaps
p.orient = "h"
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, capsize=0.3)
capline = ax.lines[len(ax.lines) - 1]
caplinestart = capline.get_ydata()[0]
caplineend = capline.get_ydata()[1]
caplinelength = abs(caplineend - caplinestart)
nt.assert_almost_equal(caplinelength, 0.3)
nt.assert_equal(len(ax.lines), 6)
# Test extra keyword arguments
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, lw=4)
line = ax.lines[0]
nt.assert_equal(line.get_linewidth(), 4)
plt.close("all")
# Test errwidth is set appropriately
f, ax = plt.subplots()
p.draw_confints(ax, at_group, confints, colors, errwidth=2)
capline = ax.lines[len(ax.lines)-1]
nt.assert_equal(capline._linewidth, 2)
nt.assert_equal(len(ax.lines), 2)
plt.close("all")
class TestBoxPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8, dodge=True,
fliersize=5, linewidth=None)
def test_nested_width(self):
kws = self.default_kws.copy()
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .4 * .98)
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .3 * .98)
kws = self.default_kws.copy()
kws["dodge"] = False
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .8)
def test_hue_offsets(self):
p = cat._BoxPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._BoxPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
p = cat._BoxPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
npt.assert_array_almost_equal(p.hue_offsets, [-.2, 0, .2])
def test_axes_data(self):
ax = cat.boxplot("g", "y", data=self.df)
nt.assert_equal(len(ax.artists), 3)
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.artists), 6)
plt.close("all")
def test_box_colors(self):
ax = cat.boxplot("g", "y", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=3)
for patch, color in zip(ax.artists, pal):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=2)
for patch, color in zip(ax.artists, pal * 2):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
def test_draw_missing_boxes(self):
ax = cat.boxplot("g", "y", data=self.df,
order=["a", "b", "c", "d"])
nt.assert_equal(len(ax.artists), 3)
def test_missing_data(self):
x = ["a", "a", "b", "b", "c", "c", "d", "d"]
h = ["x", "y", "x", "y", "x", "y", "x", "y"]
y = self.rs.randn(8)
y[-2:] = np.nan
ax = cat.boxplot(x, y)
nt.assert_equal(len(ax.artists), 3)
plt.close("all")
y[-1] = 0
ax = cat.boxplot(x, y, h)
nt.assert_equal(len(ax.artists), 7)
plt.close("all")
def test_boxplots(self):
# Smoke test the high level boxplot options
cat.boxplot("y", data=self.df)
plt.close("all")
cat.boxplot(y="y", data=self.df)
plt.close("all")
cat.boxplot("g", "y", data=self.df)
plt.close("all")
cat.boxplot("y", "g", data=self.df, orient="h")
plt.close("all")
cat.boxplot("g", "y", "h", data=self.df)
plt.close("all")
cat.boxplot("g", "y", "h", order=list("nabc"), data=self.df)
plt.close("all")
cat.boxplot("g", "y", "h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.boxplot("y", "g", "h", data=self.df, orient="h")
plt.close("all")
def test_axes_annotation(self):
ax = cat.boxplot("g", "y", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
nt.assert_equal(ax.get_xlim(), (-.5, 2.5))
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
plt.close("all")
ax = cat.boxplot("g", "y", "h", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
npt.assert_array_equal([l.get_text() for l in ax.legend_.get_texts()],
["m", "n"])
plt.close("all")
ax = cat.boxplot("y", "g", data=self.df, orient="h")
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
nt.assert_equal(ax.get_ylim(), (2.5, -.5))
npt.assert_array_equal(ax.get_yticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_yticklabels()],
["a", "b", "c"])
plt.close("all")
class TestViolinPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
bw="scott", cut=2, scale="area", scale_hue=True,
gridsize=100, width=.8, inner="box", split=False,
dodge=True, orient=None, linewidth=None,
color=None, palette=None, saturation=.75)
def test_split_error(self):
kws = self.default_kws.copy()
kws.update(dict(x="h", y="y", hue="g", data=self.df, split=True))
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_no_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[0]), 20)
nt.assert_equal(len(p.support[1]), 0)
nt.assert_equal(len(p.density[0]), 20)
nt.assert_equal(len(p.density[1]), 1)
nt.assert_equal(p.density[1].item(), 1)
p.estimate_densities("scott", 2, "count", True, 20)
nt.assert_equal(p.density[1].item(), 0)
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
p.establish_variables(x, y, h)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[1][0]), 20)
nt.assert_equal(len(p.support[1][1]), 0)
nt.assert_equal(len(p.density[1][0]), 20)
nt.assert_equal(len(p.density[1][1]), 1)
nt.assert_equal(p.density[1][1].item(), 1)
p.estimate_densities("scott", 2, "count", False, 20)
nt.assert_equal(p.density[1][1].item(), 0)
def test_single_observation(self):
p = cat._ViolinPlotter(**self.default_kws)
x = ["a", "a", "b"]
y = self.rs.randn(3)
p.establish_variables(x, y)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[0]), 20)
nt.assert_equal(len(p.support[1]), 1)
nt.assert_equal(len(p.density[0]), 20)
nt.assert_equal(len(p.density[1]), 1)
nt.assert_equal(p.density[1].item(), 1)
p.estimate_densities("scott", 2, "count", True, 20)
nt.assert_equal(p.density[1].item(), .5)
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
p.establish_variables(x, y, h)
p.estimate_densities("scott", 2, "area", True, 20)
nt.assert_equal(len(p.support[1][0]), 20)
nt.assert_equal(len(p.support[1][1]), 1)
nt.assert_equal(len(p.density[1][0]), 20)
nt.assert_equal(len(p.density[1][1]), 1)
nt.assert_equal(p.density[1][1].item(), 1)
p.estimate_densities("scott", 2, "count", False, 20)
nt.assert_equal(p.density[1][1].item(), .5)
def test_dwidth(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .4)
kws.update(dict(width=.4))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .2)
kws.update(dict(hue="h", width=.8))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .2)
kws.update(dict(split=True))
p = cat._ViolinPlotter(**kws)
nt.assert_equal(p.dwidth, .4)
def test_scale_area(self):
kws = self.default_kws.copy()
kws["scale"] = "area"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
max_before = np.array([d.max() for d in density])
p.scale_area(density, max_before, False)
max_after = np.array([d.max() for d in density])
nt.assert_equal(max_after[0], 1)
before_ratio = max_before[1] / max_before[0]
after_ratio = max_after[1] / max_after[0]
nt.assert_equal(before_ratio, after_ratio)
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, False)
max_after = np.array([[r.max() for r in row] for row in density])
nt.assert_equal(max_after[0, 0], 1)
before_ratio = max_before[1, 1] / max_before[0, 0]
after_ratio = max_after[1, 1] / max_after[0, 0]
nt.assert_equal(before_ratio, after_ratio)
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
max_before = np.array([[r.max() for r in row] for row in density])
p.scale_area(density, max_before, True)
max_after = np.array([[r.max() for r in row] for row in density])
nt.assert_equal(max_after[0, 0], 1)
nt.assert_equal(max_after[1, 0], 1)
before_ratio = max_before[1, 1] / max_before[1, 0]
after_ratio = max_after[1, 1] / max_after[1, 0]
nt.assert_equal(before_ratio, after_ratio)
def test_scale_width(self):
kws = self.default_kws.copy()
kws["scale"] = "width"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)]
p.scale_width(density)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [1, 1])
# Test nested grouping
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 50), self.rs.uniform(0, .2, 50)],
[self.rs.uniform(0, .1, 50), self.rs.uniform(0, .02, 50)]]
p.scale_width(density)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[1, 1], [1, 1]])
def test_scale_count(self):
kws = self.default_kws.copy()
kws["scale"] = "count"
p = cat._ViolinPlotter(**kws)
# Test single layer of grouping
p.hue_names = None
density = [self.rs.uniform(0, .8, 20), self.rs.uniform(0, .2, 40)]
counts = np.array([20, 40])
p.scale_count(density, counts, False)
max_after = np.array([d.max() for d in density])
npt.assert_array_equal(max_after, [.5, 1])
# Test nested grouping scaling across all densities
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, False)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.05, .4], [1, .5]])
# Test nested grouping scaling within hue
p.hue_names = ["foo", "bar"]
density = [[self.rs.uniform(0, .8, 5), self.rs.uniform(0, .2, 40)],
[self.rs.uniform(0, .1, 100), self.rs.uniform(0, .02, 50)]]
counts = np.array([[5, 40], [100, 50]])
p.scale_count(density, counts, True)
max_after = np.array([[r.max() for r in row] for row in density])
npt.assert_array_equal(max_after, [[.125, 1], [1, .5]])
def test_bad_scale(self):
kws = self.default_kws.copy()
kws["scale"] = "not_a_scale_type"
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_kde_fit(self):
p = cat._ViolinPlotter(**self.default_kws)
data = self.y
data_std = data.std(ddof=1)
# Bandwidth behavior depends on scipy version
if LooseVersion(scipy.__version__) < "0.11":
# Test ignoring custom bandwidth on old scipy
kde, bw = p.fit_kde(self.y, .2)
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, kde.scotts_factor())
else:
# Test reference rule bandwidth
kde, bw = p.fit_kde(data, "scott")
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, kde.scotts_factor())
nt.assert_equal(bw, kde.scotts_factor() * data_std)
# Test numeric scale factor
kde, bw = p.fit_kde(self.y, .2)
nt.assert_is_instance(kde, stats.gaussian_kde)
nt.assert_equal(kde.factor, .2)
nt.assert_equal(bw, .2 * data_std)
def test_draw_to_density(self):
p = cat._ViolinPlotter(**self.default_kws)
# p.dwidth will be 1 for easier testing
p.width = 2
# Test verical plots
support = np.array([.2, .6])
density = np.array([.1, .4])
# Test full vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test left vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.99 * -.4, 0])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Test right vertical plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .5, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, .99 * .4])
npt.assert_array_equal(y, [.5, .5])
plt.close("all")
# Switch orientation to test horizontal plots
p.orient = "h"
support = np.array([.2, .5])
density = np.array([.3, .7])
# Test full horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, False)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, .99 * .7])
plt.close("all")
# Test left horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "left")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [.99 * -.7, 0])
plt.close("all")
# Test right horizontal plot
_, ax = plt.subplots()
p.draw_to_density(ax, 0, .6, support, density, "right")
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [.6, .6])
npt.assert_array_equal(y, [0, .99 * .7])
plt.close("all")
def test_draw_single_observations(self):
p = cat._ViolinPlotter(**self.default_kws)
p.width = 2
# Test vertical plot
_, ax = plt.subplots()
p.draw_single_observation(ax, 1, 1.5, 1)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [0, 2])
npt.assert_array_equal(y, [1.5, 1.5])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_single_observation(ax, 2, 2.2, .5)
x, y = ax.lines[0].get_xydata().T
npt.assert_array_equal(x, [2.2, 2.2])
npt.assert_array_equal(y, [1.5, 2.5])
plt.close("all")
def test_draw_box_lines(self):
# Test vertical plot
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
nt.assert_equal(len(ax.lines), 2)
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
_, y = ax.lines[1].get_xydata().T
npt.assert_array_equal(y, [q25, q75])
_, y = ax.collections[0].get_offsets().T
nt.assert_equal(y, q50)
plt.close("all")
# Test horizontal plot
kws = self.default_kws.copy()
kws.update(dict(x="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_box_lines(ax, self.y, p.support[0], p.density[0], 0)
nt.assert_equal(len(ax.lines), 2)
q25, q50, q75 = np.percentile(self.y, [25, 50, 75])
x, _ = ax.lines[1].get_xydata().T
npt.assert_array_equal(x, [q25, q75])
x, _ = ax.collections[0].get_offsets().T
nt.assert_equal(x, q50)
plt.close("all")
def test_draw_quartiles(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_quartiles(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(np.percentile(self.y, [25, 50, 75]), ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
def test_draw_points(self):
p = cat._ViolinPlotter(**self.default_kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, np.zeros_like(self.y))
npt.assert_array_equal(y, self.y)
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_points(ax, self.y, 0)
x, y = ax.collections[0].get_offsets().T
npt.assert_array_equal(x, self.y)
npt.assert_array_equal(y, np.zeros_like(self.y))
plt.close("all")
def test_draw_sticks(self):
kws = self.default_kws.copy()
kws.update(dict(y="y", data=self.df, inner=None))
p = cat._ViolinPlotter(**kws)
# Test vertical plot
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
_, y = line.get_xydata().T
npt.assert_array_equal(y, [val, val])
plt.close("all")
# Test horizontal plot
p.orient = "h"
_, ax = plt.subplots()
p.draw_stick_lines(ax, self.y, p.support[0], p.density[0], 0)
for val, line in zip(self.y, ax.lines):
x, _ = line.get_xydata().T
npt.assert_array_equal(x, [val, val])
plt.close("all")
def test_validate_inner(self):
kws = self.default_kws.copy()
kws.update(dict(inner="bad_inner"))
with nt.assert_raises(ValueError):
cat._ViolinPlotter(**kws)
def test_draw_violinplots(self):
kws = self.default_kws.copy()
# Test single vertical violin
kws.update(dict(y="y", data=self.df, inner=None,
saturation=1, color=(1, 0, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(1, 0, 0, 1)])
plt.close("all")
# Test single horizontal violin
kws.update(dict(x="y", y=None, color=(0, 1, 0, 1)))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
npt.assert_array_equal(ax.collections[0].get_facecolors(),
[(0, 1, 0, 1)])
plt.close("all")
# Test multiple vertical violins
kws.update(dict(x="g", y="y", color=None,))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
for violin, color in zip(ax.collections, palettes.color_palette()):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple violins with hue nesting
kws.update(dict(hue="h"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 6)
for violin, color in zip(ax.collections,
palettes.color_palette(n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
# Test multiple split violins
kws.update(dict(split=True, palette="muted"))
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 6)
for violin, color in zip(ax.collections,
palettes.color_palette("muted",
n_colors=2) * 3):
npt.assert_array_equal(violin.get_facecolors()[0, :-1], color)
plt.close("all")
def test_draw_violinplots_no_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
y[-1] = np.nan
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 0)
plt.close("all")
# Test nested hue grouping
x = ["a"] * 4 + ["b"] * 2
y = self.rs.randn(6)
h = ["m", "n"] * 2 + ["m"] * 2
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 0)
plt.close("all")
def test_draw_violinplots_single_observations(self):
kws = self.default_kws.copy()
kws["inner"] = None
# Test single layer of grouping
x = ["a", "a", "b"]
y = self.rs.randn(3)
kws.update(x=x, y=y)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
# Test nested hue grouping
x = ["b"] * 4 + ["a"] * 3
y = self.rs.randn(7)
h = (["m", "n"] * 4)[:-1]
kws.update(x=x, y=y, hue=h)
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
# Test nested hue grouping with split
kws["split"] = True
p = cat._ViolinPlotter(**kws)
_, ax = plt.subplots()
p.draw_violins(ax)
nt.assert_equal(len(ax.collections), 3)
nt.assert_equal(len(ax.lines), 1)
plt.close("all")
def test_violinplots(self):
# Smoke test the high level violinplot options
cat.violinplot("y", data=self.df)
plt.close("all")
cat.violinplot(y="y", data=self.df)
plt.close("all")
cat.violinplot("g", "y", data=self.df)
plt.close("all")
cat.violinplot("y", "g", data=self.df, orient="h")
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df)
plt.close("all")
cat.violinplot("g", "y", "h", order=list("nabc"), data=self.df)
plt.close("all")
cat.violinplot("g", "y", "h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.violinplot("y", "g", "h", data=self.df, orient="h")
plt.close("all")
for inner in ["box", "quart", "point", "stick", None]:
cat.violinplot("g", "y", data=self.df, inner=inner)
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df, inner=inner)
plt.close("all")
cat.violinplot("g", "y", "h", data=self.df,
inner=inner, split=True)
plt.close("all")
class TestCategoricalScatterPlotter(CategoricalFixture):
def test_group_point_colors(self):
p = cat._CategoricalScatterPlotter()
p.establish_variables(x="g", y="y", data=self.df)
p.establish_colors(None, "deep", 1)
point_colors = p.point_colors
nt.assert_equal(len(point_colors), self.g.unique().size)
deep_colors = palettes.color_palette("deep", self.g.unique().size)
for i, group_colors in enumerate(point_colors):
nt.assert_equal(tuple(deep_colors[i]), tuple(group_colors[0]))
for channel in group_colors.T:
nt.assert_equals(np.unique(channel).size, 1)
def test_hue_point_colors(self):
p = cat._CategoricalScatterPlotter()
hue_order = self.h.unique().tolist()
p.establish_variables(x="g", y="y", hue="h",
hue_order=hue_order, data=self.df)
p.establish_colors(None, "deep", 1)
point_colors = p.point_colors
nt.assert_equal(len(point_colors), self.g.unique().size)
deep_colors = palettes.color_palette("deep", len(hue_order))
for i, group_colors in enumerate(point_colors):
for j, point_color in enumerate(group_colors):
hue_level = p.plot_hues[i][j]
nt.assert_equal(tuple(point_color),
deep_colors[hue_order.index(hue_level)])
def test_scatterplot_legend(self):
p = cat._CategoricalScatterPlotter()
hue_order = ["m", "n"]
p.establish_variables(x="g", y="y", hue="h",
hue_order=hue_order, data=self.df)
p.establish_colors(None, "deep", 1)
deep_colors = palettes.color_palette("deep", self.h.unique().size)
f, ax = plt.subplots()
p.add_legend_data(ax)
leg = ax.legend()
for i, t in enumerate(leg.get_texts()):
nt.assert_equal(t.get_text(), hue_order[i])
for i, h in enumerate(leg.legendHandles):
rgb = h.get_facecolor()[0, :3]
nt.assert_equal(tuple(rgb), tuple(deep_colors[i]))
class TestStripPlotter(CategoricalFixture):
def test_stripplot_vertical(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", jitter=False, data=self.df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i)
npt.assert_array_equal(y, vals)
npt.assert_equal(ax.collections[i].get_facecolors()[0, :3], pal[i])
@skipif(not pandas_has_categoricals)
def test_stripplot_horiztonal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", jitter=False, data=df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i)
def test_stripplot_jitter(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", data=self.df, jitter=True)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_less(np.ones(len(x)) * i - .1, x)
npt.assert_array_less(x, np.ones(len(x)) * i + .1)
npt.assert_array_equal(y, vals)
npt.assert_equal(ax.collections[i].get_facecolors()[0, :3], pal[i])
def test_dodge_nested_stripplot_vertical(self):
pal = palettes.color_palette()
ax = cat.stripplot("g", "y", "h", data=self.df,
jitter=False, dodge=True)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i + [-.2, .2][j])
npt.assert_array_equal(y, vals)
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
@skipif(not pandas_has_categoricals)
def test_dodge_nested_stripplot_horizontal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", "h", data=df,
jitter=False, dodge=True)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_equal(x, vals)
npt.assert_array_equal(y, np.ones(len(x)) * i + [-.2, .2][j])
def test_nested_stripplot_vertical(self):
# Test a simple vertical strip plot
ax = cat.stripplot("g", "y", "h", data=self.df,
jitter=False, dodge=False)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, np.ones(len(x)) * i)
npt.assert_array_equal(y, group_vals)
@skipif(not pandas_has_categoricals)
def test_nested_stripplot_horizontal(self):
df = self.df.copy()
df.g = df.g.astype("category")
ax = cat.stripplot("y", "g", "h", data=df,
jitter=False, dodge=False)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_equal(x, group_vals)
npt.assert_array_equal(y, np.ones(len(x)) * i)
def test_three_strip_points(self):
x = np.arange(3)
ax = cat.stripplot(x=x)
facecolors = ax.collections[0].get_facecolor()
nt.assert_equal(facecolors.shape, (3, 4))
npt.assert_array_equal(facecolors[0], facecolors[1])
class TestSwarmPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None, dodge=False,
orient=None, color=None, palette=None)
def test_could_overlap(self):
p = cat._SwarmPlotter(**self.default_kws)
neighbors = p.could_overlap((1, 1), [(0, 0), (1, .5), (.5, .5)], 1)
npt.assert_array_equal(neighbors, [(1, .5), (.5, .5)])
def test_position_candidates(self):
p = cat._SwarmPlotter(**self.default_kws)
xy_i = (0, 1)
neighbors = [(0, 1), (0, 1.5)]
candidates = p.position_candidates(xy_i, neighbors, 1)
dx1 = 1.05
dx2 = np.sqrt(1 - .5 ** 2) * 1.05
npt.assert_array_equal(candidates,
[(0, 1), (-dx1, 1), (dx1, 1),
(dx2, 1), (-dx2, 1)])
def test_find_first_non_overlapping_candidate(self):
p = cat._SwarmPlotter(**self.default_kws)
candidates = [(.5, 1), (1, 1), (1.5, 1)]
neighbors = np.array([(0, 1)])
first = p.first_non_overlapping_candidate(candidates, neighbors, 1)
npt.assert_array_equal(first, (1, 1))
def test_beeswarm(self):
p = cat._SwarmPlotter(**self.default_kws)
d = self.y.diff().mean() * 1.5
x = np.zeros(self.y.size)
y = np.sort(self.y)
orig_xy = np.c_[x, y]
swarm = p.beeswarm(orig_xy, d)
dmat = spatial.distance.cdist(swarm, swarm)
triu = dmat[np.triu_indices_from(dmat, 1)]
npt.assert_array_less(d, triu)
npt.assert_array_equal(y, swarm[:, 1])
def test_add_gutters(self):
p = cat._SwarmPlotter(**self.default_kws)
points = np.array([0, -1, .4, .8])
points = p.add_gutters(points, 0, 1)
npt.assert_array_equal(points,
np.array([0, -.5, .4, .5]))
def test_swarmplot_vertical(self):
pal = palettes.color_palette()
ax = cat.swarmplot("g", "y", data=self.df)
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_almost_equal(y, np.sort(vals))
fc = ax.collections[i].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[i])
def test_swarmplot_horizontal(self):
pal = palettes.color_palette()
ax = cat.swarmplot("y", "g", data=self.df, orient="h")
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
x, y = ax.collections[i].get_offsets().T
npt.assert_array_almost_equal(x, np.sort(vals))
fc = ax.collections[i].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[i])
def test_dodge_nested_swarmplot_vetical(self):
pal = palettes.color_palette()
ax = cat.swarmplot("g", "y", "h", data=self.df, dodge=True)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_almost_equal(y, np.sort(vals))
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
def test_dodge_nested_swarmplot_horizontal(self):
pal = palettes.color_palette()
ax = cat.swarmplot("y", "g", "h", data=self.df, orient="h", dodge=True)
for i, (_, group_vals) in enumerate(self.y.groupby(self.g)):
for j, (_, vals) in enumerate(group_vals.groupby(self.h)):
x, y = ax.collections[i * 2 + j].get_offsets().T
npt.assert_array_almost_equal(x, np.sort(vals))
fc = ax.collections[i * 2 + j].get_facecolors()[0, :3]
npt.assert_equal(fc, pal[j])
def test_nested_swarmplot_vertical(self):
ax = cat.swarmplot("g", "y", "h", data=self.df)
pal = palettes.color_palette()
hue_names = self.h.unique().tolist()
grouped_hues = list(self.h.groupby(self.g))
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
points = ax.collections[i]
x, y = points.get_offsets().T
sorter = np.argsort(vals)
npt.assert_array_almost_equal(y, vals.iloc[sorter])
_, hue_vals = grouped_hues[i]
for hue, fc in zip(hue_vals.values[sorter.values],
points.get_facecolors()):
npt.assert_equal(fc[:3], pal[hue_names.index(hue)])
def test_nested_swarmplot_horizontal(self):
ax = cat.swarmplot("y", "g", "h", data=self.df, orient="h")
pal = palettes.color_palette()
hue_names = self.h.unique().tolist()
grouped_hues = list(self.h.groupby(self.g))
for i, (_, vals) in enumerate(self.y.groupby(self.g)):
points = ax.collections[i]
x, y = points.get_offsets().T
sorter = np.argsort(vals)
npt.assert_array_almost_equal(x, vals.iloc[sorter])
_, hue_vals = grouped_hues[i]
for hue, fc in zip(hue_vals.values[sorter.values],
points.get_facecolors()):
npt.assert_equal(fc[:3], pal[hue_names.index(hue)])
class TestBarPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, errcolor=".26", errwidth=None,
capsize=None, dodge=True)
def test_nested_width(self):
kws = self.default_kws.copy()
p = cat._BarPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
nt.assert_equal(p.nested_width, .8 / 2)
p = cat._BarPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
nt.assert_equal(p.nested_width, .8 / 3)
kws["dodge"] = False
p = cat._BarPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
nt.assert_equal(p.nested_width, .8)
def test_draw_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data))
nt.assert_equal(len(ax.lines), len(p.plot_data))
for bar, color in zip(ax.patches, p.colors):
nt.assert_equal(bar.get_facecolor()[:-1], color)
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
nt.assert_equal(bar.get_x(), pos)
nt.assert_equal(bar.get_width(), p.width)
if mpl.__version__ >= mpl_barplot_change:
nt.assert_equal(bar.get_y(), 0)
nt.assert_equal(bar.get_height(), stat)
else:
nt.assert_equal(bar.get_y(), min(0, stat))
nt.assert_equal(bar.get_height(), abs(stat))
def test_draw_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data))
nt.assert_equal(len(ax.lines), len(p.plot_data))
for bar, color in zip(ax.patches, p.colors):
nt.assert_equal(bar.get_facecolor()[:-1], color)
positions = np.arange(len(p.plot_data)) - p.width / 2
for bar, pos, stat in zip(ax.patches, positions, p.statistic):
nt.assert_equal(bar.get_y(), pos)
nt.assert_equal(bar.get_height(), p.width)
if mpl.__version__ >= mpl_barplot_change:
nt.assert_equal(bar.get_x(), 0)
nt.assert_equal(bar.get_width(), stat)
else:
nt.assert_equal(bar.get_x(), min(0, stat))
nt.assert_equal(bar.get_width(), abs(stat))
def test_draw_nested_vertical_bars(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
nt.assert_equal(len(ax.patches), n_groups * n_hues)
nt.assert_equal(len(ax.lines), n_groups * n_hues)
for bar in ax.patches[:n_groups]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[0])
for bar in ax.patches[n_groups:]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[1])
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
nt.assert_almost_equal(bar.get_x(), pos - p.width / 2)
nt.assert_almost_equal(bar.get_width(), p.nested_width)
for bar, stat in zip(ax.patches, p.statistic.T.flat):
if LooseVersion(mpl.__version__) >= mpl_barplot_change:
nt.assert_almost_equal(bar.get_y(), 0)
nt.assert_almost_equal(bar.get_height(), stat)
else:
nt.assert_almost_equal(bar.get_y(), min(0, stat))
nt.assert_almost_equal(bar.get_height(), abs(stat))
def test_draw_nested_horizontal_bars(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
n_groups, n_hues = len(p.plot_data), len(p.hue_names)
nt.assert_equal(len(ax.patches), n_groups * n_hues)
nt.assert_equal(len(ax.lines), n_groups * n_hues)
for bar in ax.patches[:n_groups]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[0])
for bar in ax.patches[n_groups:]:
nt.assert_equal(bar.get_facecolor()[:-1], p.colors[1])
positions = np.arange(len(p.plot_data))
for bar, pos in zip(ax.patches[:n_groups], positions):
nt.assert_almost_equal(bar.get_y(), pos - p.width / 2)
nt.assert_almost_equal(bar.get_height(), p.nested_width)
for bar, stat in zip(ax.patches, p.statistic.T.flat):
if LooseVersion(mpl.__version__) >= mpl_barplot_change:
nt.assert_almost_equal(bar.get_x(), 0)
nt.assert_almost_equal(bar.get_width(), stat)
else:
nt.assert_almost_equal(bar.get_x(), min(0, stat))
nt.assert_almost_equal(bar.get_width(), abs(stat))
def test_draw_missing_bars(self):
kws = self.default_kws.copy()
order = list("abcd")
kws.update(x="g", y="y", order=order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(order))
nt.assert_equal(len(ax.lines), len(order))
plt.close("all")
hue_order = list("mno")
kws.update(x="g", y="y", hue="h", hue_order=hue_order, data=self.df)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
nt.assert_equal(len(ax.patches), len(p.plot_data) * len(hue_order))
nt.assert_equal(len(ax.lines), len(p.plot_data) * len(hue_order))
plt.close("all")
def test_barplot_colors(self):
# Test unnested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, palette="muted")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("muted", len(self.g.unique()))
for patch, pal_color in zip(ax.patches, palette):
nt.assert_equal(patch.get_facecolor()[:-1], pal_color)
plt.close("all")
# Test single color
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df,
saturation=1, color=color)
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
for patch in ax.patches:
nt.assert_equal(patch.get_facecolor(), color)
plt.close("all")
# Test nested palette colors
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df,
saturation=1, palette="Set2")
p = cat._BarPlotter(**kws)
f, ax = plt.subplots()
p.draw_bars(ax, {})
palette = palettes.color_palette("Set2", len(self.h.unique()))
for patch in ax.patches[:len(self.g.unique())]:
nt.assert_equal(patch.get_facecolor()[:-1], palette[0])
for patch in ax.patches[len(self.g.unique()):]:
nt.assert_equal(patch.get_facecolor()[:-1], palette[1])
plt.close("all")
def test_simple_barplots(self):
ax = cat.barplot("g", "y", data=self.df)
nt.assert_equal(len(ax.patches), len(self.g.unique()))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
plt.close("all")
ax = cat.barplot("y", "g", orient="h", data=self.df)
nt.assert_equal(len(ax.patches), len(self.g.unique()))
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
ax = cat.barplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.patches),
len(self.g.unique()) * len(self.h.unique()))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
plt.close("all")
ax = cat.barplot("y", "g", "h", orient="h", data=self.df)
nt.assert_equal(len(ax.patches),
len(self.g.unique()) * len(self.h.unique()))
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
class TestPointPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
estimator=np.mean, ci=95, n_boot=100, units=None,
order=None, hue_order=None,
markers="o", linestyles="-", dodge=0,
join=True, scale=1,
orient=None, color=None, palette=None)
def test_different_defualt_colors(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", data=self.df))
p = cat._PointPlotter(**kws)
color = palettes.color_palette()[0]
npt.assert_array_equal(p.colors, [color, color, color])
def test_hue_offsets(self):
kws = self.default_kws.copy()
kws.update(dict(x="g", y="y", hue="h", data=self.df))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0])
kws.update(dict(dodge=.5))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.25, .25])
kws.update(dict(x="h", hue="g", dodge=0))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [0, 0, 0])
kws.update(dict(dodge=.3))
p = cat._PointPlotter(**kws)
npt.assert_array_equal(p.hue_offsets, [-.15, 0, .15])
def test_draw_vertical_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(p.plot_data) + 1)
points = ax.collections[0]
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, p.statistic)
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
def test_draw_horizontal_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(p.plot_data) + 1)
points = ax.collections[0]
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, p.statistic)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color, want_color in zip(points.get_facecolors(),
p.colors):
npt.assert_array_equal(got_color[:-1], want_color)
def test_draw_vertical_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="g", y="y", hue="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 2)
nt.assert_equal(len(ax.lines),
len(p.plot_data) * len(p.hue_names) + len(p.hue_names))
for points, numbers, color in zip(ax.collections,
p.statistic.T,
p.colors):
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, np.arange(len(p.plot_data)))
npt.assert_array_equal(y, numbers)
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
def test_draw_horizontal_nested_points(self):
kws = self.default_kws.copy()
kws.update(x="y", y="g", hue="h", orient="h", data=self.df)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
nt.assert_equal(len(ax.collections), 2)
nt.assert_equal(len(ax.lines),
len(p.plot_data) * len(p.hue_names) + len(p.hue_names))
for points, numbers, color in zip(ax.collections,
p.statistic.T,
p.colors):
nt.assert_equal(len(points.get_offsets()), len(p.plot_data))
x, y = points.get_offsets().T
npt.assert_array_equal(x, numbers)
npt.assert_array_equal(y, np.arange(len(p.plot_data)))
for got_color in points.get_facecolors():
npt.assert_array_equal(got_color[:-1], color)
def test_pointplot_colors(self):
# Test a single-color unnested plot
color = (.2, .2, .3, 1)
kws = self.default_kws.copy()
kws.update(x="g", y="y", data=self.df, color=color)
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines:
nt.assert_equal(line.get_color(), color[:-1])
for got_color in ax.collections[0].get_facecolors():
npt.assert_array_equal(rgb2hex(got_color), rgb2hex(color))
plt.close("all")
# Test a multi-color unnested plot
palette = palettes.color_palette("Set1", 3)
kws.update(x="g", y="y", data=self.df, palette="Set1")
p = cat._PointPlotter(**kws)
nt.assert_true(not p.join)
f, ax = plt.subplots()
p.draw_points(ax)
for line, pal_color in zip(ax.lines, palette):
npt.assert_array_equal(line.get_color(), pal_color)
for point_color, pal_color in zip(ax.collections[0].get_facecolors(),
palette):
npt.assert_array_equal(rgb2hex(point_color), rgb2hex(pal_color))
plt.close("all")
# Test a multi-colored nested plot
palette = palettes.color_palette("dark", 2)
kws.update(x="g", y="y", hue="h", data=self.df, palette="dark")
p = cat._PointPlotter(**kws)
f, ax = plt.subplots()
p.draw_points(ax)
for line in ax.lines[:(len(p.plot_data) + 1)]:
nt.assert_equal(line.get_color(), palette[0])
for line in ax.lines[(len(p.plot_data) + 1):]:
nt.assert_equal(line.get_color(), palette[1])
for i, pal_color in enumerate(palette):
for point_color in ax.collections[i].get_facecolors():
npt.assert_array_equal(point_color[:-1], pal_color)
plt.close("all")
def test_simple_pointplots(self):
ax = cat.pointplot("g", "y", data=self.df)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(self.g.unique()) + 1)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
plt.close("all")
ax = cat.pointplot("y", "g", orient="h", data=self.df)
nt.assert_equal(len(ax.collections), 1)
nt.assert_equal(len(ax.lines), len(self.g.unique()) + 1)
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
ax = cat.pointplot("g", "y", "h", data=self.df)
nt.assert_equal(len(ax.collections), len(self.h.unique()))
nt.assert_equal(len(ax.lines),
(len(self.g.unique()) *
len(self.h.unique()) +
len(self.h.unique())))
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
plt.close("all")
ax = cat.pointplot("y", "g", "h", orient="h", data=self.df)
nt.assert_equal(len(ax.collections), len(self.h.unique()))
nt.assert_equal(len(ax.lines),
(len(self.g.unique()) *
len(self.h.unique()) +
len(self.h.unique())))
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
plt.close("all")
class TestCountPlot(CategoricalFixture):
def test_plot_elements(self):
ax = cat.countplot("g", data=self.df)
nt.assert_equal(len(ax.patches), self.g.unique().size)
for p in ax.patches:
nt.assert_equal(p.get_y(), 0)
nt.assert_equal(p.get_height(),
self.g.size / self.g.unique().size)
plt.close("all")
ax = cat.countplot(y="g", data=self.df)
nt.assert_equal(len(ax.patches), self.g.unique().size)
for p in ax.patches:
nt.assert_equal(p.get_x(), 0)
nt.assert_equal(p.get_width(),
self.g.size / self.g.unique().size)
plt.close("all")
ax = cat.countplot("g", hue="h", data=self.df)
nt.assert_equal(len(ax.patches),
self.g.unique().size * self.h.unique().size)
plt.close("all")
ax = cat.countplot(y="g", hue="h", data=self.df)
nt.assert_equal(len(ax.patches),
self.g.unique().size * self.h.unique().size)
plt.close("all")
def test_input_error(self):
with nt.assert_raises(TypeError):
cat.countplot()
with nt.assert_raises(TypeError):
cat.countplot(x="g", y="h", data=self.df)
class TestCatPlot(CategoricalFixture):
def test_facet_organization(self):
g = cat.catplot("g", "y", data=self.df)
nt.assert_equal(g.axes.shape, (1, 1))
g = cat.catplot("g", "y", col="h", data=self.df)
nt.assert_equal(g.axes.shape, (1, 2))
g = cat.catplot("g", "y", row="h", data=self.df)
nt.assert_equal(g.axes.shape, (2, 1))
g = cat.catplot("g", "y", col="u", row="h", data=self.df)
nt.assert_equal(g.axes.shape, (2, 3))
def test_plot_elements(self):
g = cat.catplot("g", "y", data=self.df, kind="point")
nt.assert_equal(len(g.ax.collections), 1)
want_lines = self.g.unique().size + 1
nt.assert_equal(len(g.ax.lines), want_lines)
g = cat.catplot("g", "y", "h", data=self.df, kind="point")
want_collections = self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_collections)
want_lines = (self.g.unique().size + 1) * self.h.unique().size
nt.assert_equal(len(g.ax.lines), want_lines)
g = cat.catplot("g", "y", data=self.df, kind="bar")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), want_elements)
g = cat.catplot("g", "y", "h", data=self.df, kind="bar")
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), want_elements)
g = cat.catplot("g", data=self.df, kind="count")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), 0)
g = cat.catplot("g", hue="h", data=self.df, kind="count")
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.patches), want_elements)
nt.assert_equal(len(g.ax.lines), 0)
g = cat.catplot("g", "y", data=self.df, kind="box")
want_artists = self.g.unique().size
nt.assert_equal(len(g.ax.artists), want_artists)
g = cat.catplot("g", "y", "h", data=self.df, kind="box")
want_artists = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.artists), want_artists)
g = cat.catplot("g", "y", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.catplot("g", "y", "h", data=self.df,
kind="violin", inner=None)
want_elements = self.g.unique().size * self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.catplot("g", "y", data=self.df, kind="strip")
want_elements = self.g.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
g = cat.catplot("g", "y", "h", data=self.df, kind="strip")
want_elements = self.g.unique().size + self.h.unique().size
nt.assert_equal(len(g.ax.collections), want_elements)
def test_bad_plot_kind_error(self):
with nt.assert_raises(ValueError):
cat.catplot("g", "y", data=self.df, kind="not_a_kind")
def test_count_x_and_y(self):
with nt.assert_raises(ValueError):
cat.catplot("g", "y", data=self.df, kind="count")
def test_plot_colors(self):
ax = cat.barplot("g", "y", data=self.df)
g = cat.catplot("g", "y", data=self.df, kind="bar")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.barplot("g", "y", data=self.df, color="purple")
g = cat.catplot("g", "y", data=self.df,
kind="bar", color="purple")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.barplot("g", "y", data=self.df, palette="Set2")
g = cat.catplot("g", "y", data=self.df,
kind="bar", palette="Set2")
for p1, p2 in zip(ax.patches, g.ax.patches):
nt.assert_equal(p1.get_facecolor(), p2.get_facecolor())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df)
g = cat.catplot("g", "y", data=self.df)
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df, color="purple")
g = cat.catplot("g", "y", data=self.df, color="purple")
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
ax = cat.pointplot("g", "y", data=self.df, palette="Set2")
g = cat.catplot("g", "y", data=self.df, palette="Set2")
for l1, l2 in zip(ax.lines, g.ax.lines):
nt.assert_equal(l1.get_color(), l2.get_color())
plt.close("all")
def test_factorplot(self):
with pytest.warns(UserWarning):
g = cat.factorplot("g", "y", data=self.df)
nt.assert_equal(len(g.ax.collections), 1)
want_lines = self.g.unique().size + 1
nt.assert_equal(len(g.ax.lines), want_lines)
class TestBoxenPlotter(CategoricalFixture):
default_kws = dict(x=None, y=None, hue=None, data=None,
order=None, hue_order=None,
orient=None, color=None, palette=None,
saturation=.75, width=.8, dodge=True,
k_depth='proportion', linewidth=None,
scale='exponential', outlier_prop=None)
def ispatch(self, c):
return isinstance(c, mpl.collections.PatchCollection)
def edge_calc(self, n, data):
q = np.asanyarray([0.5 ** n, 1 - 0.5 ** n]) * 100
q = list(np.unique(q))
return np.percentile(data, q)
def test_box_ends_finite(self):
p = cat._LVPlotter(**self.default_kws)
p.establish_variables("g", "y", data=self.df)
box_k = np.asarray([[b, k]
for b, k in map(p._lv_box_ends, p.plot_data)])
box_ends = box_k[:, 0]
k_vals = box_k[:, 1]
# Check that all the box ends are finite and are within
# the bounds of the data
b_e = map(lambda a: np.all(np.isfinite(a)), box_ends)
npt.assert_equal(np.sum(list(b_e)), len(box_ends))
def within(t):
a, d = t
return ((np.ravel(a) <= d.max()) &
(np.ravel(a) >= d.min())).all()
b_w = map(within, zip(box_ends, p.plot_data))
npt.assert_equal(np.sum(list(b_w)), len(box_ends))
k_f = map(lambda k: (k > 0.) & np.isfinite(k), k_vals)
npt.assert_equal(np.sum(list(k_f)), len(k_vals))
def test_box_ends_correct(self):
n = 100
linear_data = np.arange(n)
expected_k = int(np.log2(n)) - int(np.log2(n * 0.007)) + 1
expected_edges = [self.edge_calc(i, linear_data)
for i in range(expected_k + 2, 1, -1)]
p = cat._LVPlotter(**self.default_kws)
calc_edges, calc_k = p._lv_box_ends(linear_data)
npt.assert_equal(list(expected_edges), calc_edges)
npt.assert_equal(expected_k, calc_k)
def test_outliers(self):
n = 100
outlier_data = np.append(np.arange(n - 1), 2 * n)
expected_k = int(np.log2(n)) - int(np.log2(n * 0.007)) + 1
expected_edges = [self.edge_calc(i, outlier_data)
for i in range(expected_k + 2, 1, -1)]
p = cat._LVPlotter(**self.default_kws)
calc_edges, calc_k = p._lv_box_ends(outlier_data)
npt.assert_equal(list(expected_edges), calc_edges)
npt.assert_equal(expected_k, calc_k)
out_calc = p._lv_outliers(outlier_data, calc_k)
out_exp = p._lv_outliers(outlier_data, expected_k)
npt.assert_equal(out_exp, out_calc)
def test_hue_offsets(self):
p = cat._LVPlotter(**self.default_kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.2, .2])
kws = self.default_kws.copy()
kws["width"] = .6
p = cat._LVPlotter(**kws)
p.establish_variables("g", "y", "h", data=self.df)
npt.assert_array_equal(p.hue_offsets, [-.15, .15])
p = cat._LVPlotter(**kws)
p.establish_variables("h", "y", "g", data=self.df)
npt.assert_array_almost_equal(p.hue_offsets, [-.2, 0, .2])
def test_axes_data(self):
ax = cat.boxenplot("g", "y", data=self.df)
patches = filter(self.ispatch, ax.collections)
nt.assert_equal(len(list(patches)), 3)
plt.close("all")
ax = cat.boxenplot("g", "y", "h", data=self.df)
patches = filter(self.ispatch, ax.collections)
nt.assert_equal(len(list(patches)), 6)
plt.close("all")
def test_box_colors(self):
ax = cat.boxenplot("g", "y", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=3)
for patch, color in zip(ax.artists, pal):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
ax = cat.boxenplot("g", "y", "h", data=self.df, saturation=1)
pal = palettes.color_palette(n_colors=2)
for patch, color in zip(ax.artists, pal * 2):
nt.assert_equal(patch.get_facecolor()[:3], color)
plt.close("all")
def test_draw_missing_boxes(self):
ax = cat.boxenplot("g", "y", data=self.df,
order=["a", "b", "c", "d"])
patches = filter(self.ispatch, ax.collections)
nt.assert_equal(len(list(patches)), 3)
plt.close("all")
def test_missing_data(self):
x = ["a", "a", "b", "b", "c", "c", "d", "d"]
h = ["x", "y", "x", "y", "x", "y", "x", "y"]
y = self.rs.randn(8)
y[-2:] = np.nan
ax = cat.boxenplot(x, y)
nt.assert_equal(len(ax.lines), 3)
plt.close("all")
y[-1] = 0
ax = cat.boxenplot(x, y, h)
nt.assert_equal(len(ax.lines), 7)
plt.close("all")
def test_boxenplots(self):
# Smoke test the high level boxenplot options
cat.boxenplot("y", data=self.df)
plt.close("all")
cat.boxenplot(y="y", data=self.df)
plt.close("all")
cat.boxenplot("g", "y", data=self.df)
plt.close("all")
cat.boxenplot("y", "g", data=self.df, orient="h")
plt.close("all")
cat.boxenplot("g", "y", "h", data=self.df)
plt.close("all")
cat.boxenplot("g", "y", "h", order=list("nabc"), data=self.df)
plt.close("all")
cat.boxenplot("g", "y", "h", hue_order=list("omn"), data=self.df)
plt.close("all")
cat.boxenplot("y", "g", "h", data=self.df, orient="h")
plt.close("all")
def test_axes_annotation(self):
ax = cat.boxenplot("g", "y", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
nt.assert_equal(ax.get_xlim(), (-.5, 2.5))
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
plt.close("all")
ax = cat.boxenplot("g", "y", "h", data=self.df)
nt.assert_equal(ax.get_xlabel(), "g")
nt.assert_equal(ax.get_ylabel(), "y")
npt.assert_array_equal(ax.get_xticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_xticklabels()],
["a", "b", "c"])
npt.assert_array_equal([l.get_text() for l in ax.legend_.get_texts()],
["m", "n"])
plt.close("all")
ax = cat.boxenplot("y", "g", data=self.df, orient="h")
nt.assert_equal(ax.get_xlabel(), "y")
nt.assert_equal(ax.get_ylabel(), "g")
nt.assert_equal(ax.get_ylim(), (2.5, -.5))
npt.assert_array_equal(ax.get_yticks(), [0, 1, 2])
npt.assert_array_equal([l.get_text() for l in ax.get_yticklabels()],
["a", "b", "c"])
plt.close("all")
def test_lvplot(self):
with pytest.warns(UserWarning):
ax = cat.lvplot("g", "y", data=self.df)
patches = filter(self.ispatch, ax.collections)
nt.assert_equal(len(list(patches)), 3)
plt.close("all")
|
bsd-3-clause
|
mmottahedi/neuralnilm_prototype
|
scripts/e415.py
|
2
|
6329
|
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
e400
'learn_init': False
independently_centre_inputs : True
e401
input is in range [0,1]
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 500
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer']
# 'hair straighteners',
# 'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
# max_input_power=100,
max_diff = 100,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=1,
skip_probability_for_first_appliance=0,
one_target_per_seq=False,
n_seq_per_batch=64,
# subsample_target=4,
include_diff=True,
include_power=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs=True,
# standardise_input=True,
# standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-4,
learning_rate_changes_by_iteration={
# 1000: 1e-4,
# 4000: 1e-5
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
# auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=10)
)
def exp_a(name):
# ReLU hidden layers
# linear output
# output one appliance
# 0% skip prob for first appliance
# 100% skip prob for other appliances
# input is diff
global source
source_dict_copy = deepcopy(source_dict)
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config']= [
{
'type': RecurrentLayer,
'num_units': 50,
'W_in_to_hid': Normal(std=1),
'W_hid_to_hid': Identity(scale=0.9),
'nonlinearity': rectify,
'learn_init': True,
'precompute_input': True
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None,
'W': Normal(std=1/sqrt(50))
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
# EXPERIMENTS = list('abcdefghi')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=5000)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
|
mit
|
adykstra/mne-python
|
examples/inverse/plot_label_from_stc.py
|
3
|
4105
|
"""
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <[email protected]>
# Alex Gramfort <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
# 8.5% of original source space vertices were omitted during forward
# calculation, suppress the warning here with verbose='error'
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True,
verbose='error')
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
|
bsd-3-clause
|
gear/HPSC
|
lec_code/bem/step02.py
|
1
|
1816
|
import numpy, math
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
n = 64
pi = math.pi
x = numpy.zeros(n)
y = numpy.zeros(n)
u = numpy.zeros(n)
bc = numpy.zeros(n)
for i in range(0,n):
if i < (n/4):
x[i] = i*8./n
y[i] = 0
u[i] = 0
bc[i] = 1
elif i < n/2+1:
x[i] = 2
y[i] = (i-n/4)*4./n
u[i] = (i-n/4)*4./n
bc[i] = 0
elif i < 3*n/4:
x[i] = (3*n/4-i)*8./n
y[i] = 1
u[i] = 0
bc[i] = 1
else:
x[i] = 0
y[i] = (n-i)*4./n
u[i] = 0
bc[i] = 0
ip1 = numpy.arange(n)
ip1 += 1
ip1[n-1] = 0
xm = 0.5*(x+x[ip1])
ym = 0.5*(y+y[ip1])
dx = x[ip1]-x
dy = y[ip1]-y
d = numpy.zeros(n)
for i in range(0,n):
d[i] = math.sqrt(dx[i]*dx[i]+dy[i]*dy[i])
G = numpy.zeros((n,n))
H = numpy.zeros((n,n))
for i in range(0,n):
for j in range(0,n):
if i !=j:
rx = xm[i]-xm[j]
ry = ym[i]-ym[j]
r = math.sqrt(rx*rx+ry*ry)
G[i,j] = -math.log(r)*d[j]/2/pi
H[i,j] = (rx*dy[j]-ry*dx[j])/r/r/2/pi
G[i,i] = d[i]*(1-math.log(d[i]/2))/2/pi
H[i,i] = 0.5
for i in range(0,n):
if bc[i] == 1:
tmp = G[:,i]
G[:,i] = -H[:,i]
H[:,i] = -tmp
b = H.dot(u)
un = numpy.linalg.solve(G,b)
for i in range(0,n):
if bc[i] == 1:
tmp = u[i]
u[i] = un[i]
un[i] = tmp
ux = numpy.zeros(n)
for i in range(0,n):
uxi = 0
for j in range(1,101,2):
uxi += 1/(j*pi)**2/math.sinh(2*j*pi)*math.sinh(j*pi*x[i])*math.cos(j*pi*y[i])
ux[i] = x[i]/4-4*uxi
fig = pyplot.figure(figsize=(11,7), dpi=100)
ax = fig.gca(projection='3d')
ax.scatter(x, y, u, c='b')
ax.scatter(x, y, ux, c='r')
ax.set_zlim3d(0, 1)
ax.view_init(elev=40., azim=-130.)
pyplot.show()
|
gpl-3.0
|
uahic/nest-simulator
|
testsuite/manualtests/stdp_check.py
|
13
|
4713
|
# -*- coding: utf-8 -*-
#
# stdp_check.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from matplotlib.pylab import *
# Test script to reproduce changes in weight of a STDP synapse in an event-driven way.
# Pre- and post-synaptic spike trains are read in from spike_detector-0-0-3.gdf
# (output of test_stdp_poiss.sli).
# output: pre/post \t spike time \t weight
#
# Synaptic dynamics for STDP synapses according to Abigail Morrison's
# STDP model (see stdp_rec.pdf).
#
# first version: Moritz Helias, april 2006
# adapted to python MH, SK, May 2008
def stdp(w_init, w_max, pre_spikes, post_spikes, alpha, mu_plus, mu_minus, lmbd, tau_plus, tau_minus, delay):
w = w_init # initial weight
i = 0 # index of next presynaptic spike
j = 0 # index of next postsynaptic spike
K_plus = 0.
K_minus = 0.
last_t = 0.
advance = True
while advance:
advance = False
# next spike is presynaptic
if pre_spikes[i] < post_spikes[j]:
dt = pre_spikes[i] - last_t
# evolve exponential filters
K_plus *= exp(-dt/tau_plus)
K_minus *= exp(-dt/tau_minus)
# depression
w = w/w_max - lmbd * alpha * (w/w_max)**mu_minus * K_minus
if w > 0.:
w *= w_max
else:
w = 0.
print "pre\t%.16f\t%.16f" % (pre_spikes[i],w)
K_plus += 1.
last_t = pre_spikes[i] # time evolved until here
if i < len(pre_spikes) - 1:
i += 1
advance = True
# same timing of next pre- and postsynaptic spike
elif pre_spikes[i] == post_spikes[j]:
dt = pre_spikes[i] - last_t
# evolve exponential filters
K_plus *= exp(-dt/tau_plus)
K_minus *= exp(-dt/tau_minus)
# facilitation
w = w/w_max + lmbd * (1.-w/w_max)**mu_plus * K_plus
if w < 1.:
w *= w_max
else:
w = w_max
print "post\t%.16f\t%.16f" % (post_spikes[j]-delay,w)
# depression
w = w/w_max - lmbd * alpha * (w/w_max)**mu_minus * K_minus
if w > 0.:
w *= w_max
else:
w = 0.
print "pre\t%.16f\t%.16f" % (pre_spikes[i],w)
K_plus += 1.
K_minus += 1.
last_t = pre_spikes[i] # time evolved until here
if i < len(pre_spikes) - 1:
i += 1
advance = True
if j < len(post_spikes) - 1:
j += 1
advance = True
# next spike is postsynaptic
else:
dt = post_spikes[j] - last_t
# evolve exponential filters
K_plus *= exp(-dt / tau_plus)
K_minus *= exp(-dt / tau_minus)
# facilitation
w = w/w_max + lmbd * (1.-w/w_max)**mu_plus * K_plus
if w < 1.:
w *= w_max
else:
w = w_max
print "post\t%.16f\t%.16f" % (post_spikes[j]-delay,w)
K_minus += 1.
last_t = post_spikes[j] # time evolved until here
if j < len(post_spikes) - 1:
j += 1
advance = True
return w
# stdp parameters
w_init = 35.
w_max = 70.
alpha = .95
mu_plus = .05
mu_minus = .05
lmbd = .025
tau_plus = 20.
tau_minus = 20.
# dendritic delay
delay = 1.
# load spikes from simulation with test_stdp_poiss.sli
spikes = load("spike_detector-0-0-3.gdf")
pre_spikes = spikes[find(spikes[:,0] == 5), 1]
# delay is purely dendritic
# postsynaptic spike arrives at sp_j + delay at the synapse
post_spikes = spikes[find(spikes[:,0] == 6), 1] + delay
# calculate development of stdp weight
stdp(w_init, w_max, pre_spikes, post_spikes, alpha, mu_plus, mu_minus, lmbd, tau_plus, tau_minus, delay)
|
gpl-2.0
|
ashhher3/scikit-learn
|
examples/decomposition/plot_pca_vs_lda.py
|
182
|
1743
|
"""
=======================================================
Comparison of LDA and PCA 2D projection of Iris dataset
=======================================================
The Iris dataset represents 3 kind of Iris flowers (Setosa, Versicolour
and Virginica) with 4 attributes: sepal length, sepal width, petal length
and petal width.
Principal Component Analysis (PCA) applied to this data identifies the
combination of attributes (principal components, or directions in the
feature space) that account for the most variance in the data. Here we
plot the different samples on the 2 first principal components.
Linear Discriminant Analysis (LDA) tries to identify attributes that
account for the most variance *between classes*. In particular,
LDA, in contrast to PCA, is a supervised method, using known class labels.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.lda import LDA
iris = datasets.load_iris()
X = iris.data
y = iris.target
target_names = iris.target_names
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
lda = LDA(n_components=2)
X_r2 = lda.fit(X, y).transform(X)
# Percentage of variance explained for each components
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('PCA of IRIS dataset')
plt.figure()
for c, i, target_name in zip("rgb", [0, 1, 2], target_names):
plt.scatter(X_r2[y == i, 0], X_r2[y == i, 1], c=c, label=target_name)
plt.legend()
plt.title('LDA of IRIS dataset')
plt.show()
|
bsd-3-clause
|
aerler/WRF-Projects
|
src/archive/plotVar.py
|
1
|
26278
|
'''
Created on 2012-11-05
A simple script that reads a WRF and NARR data and displays it in a proper geographic projection;
application here is plotting precipitation in the inner WRF domain.
@author: Andre R. Erler
'''
## includes
from copy import copy # to copy map projection objects
# matplotlib config: size etc.
import numpy as np
import matplotlib.pylab as pyl
import matplotlib as mpl
mpl.rc('lines', linewidth=1.)
mpl.rc('font', size=10)
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import maskoceans
# pygeode stuff
from myDatasets.loadWRF import openWRFclim, WRFtitle
from myDatasets.loadCESM import openCESMclim, CESMtitle
from myDatasets.loadCFSR import openCFSRclim
from myDatasets.loadNARR import openNARRclim
from myDatasets.loadGPCC import openGPCCclim
from myDatasets.loadCRU import openCRUclim
from myDatasets.loadPRISM import openPRISMclim
#from pygeode.plot import plot_v1 as pl
#from pygeode.plot import basemap as bm
## figure settings
def getFigureSettings(nexp, cbo):
sf = dict(dpi=150) # print properties
figformat = 'png'
folder = '/home/me/Research/Dynamical Downscaling/Figures/' # figure directory
# figure out colorbar placement
if cbo == 'vertical':
margins = dict(bottom=0.02, left=0.065, right=.885, top=.925, hspace=0.05, wspace=0.05)
caxpos = [0.91, 0.05, 0.03, 0.9]
else:# 'horizontal'
margins = dict(bottom=0.1, left=0.065, right=.9725, top=.925, hspace=0.05, wspace=0.05)
caxpos = [0.05, 0.05, 0.9, 0.03]
# pane settings
if nexp == 1:
## 1 panel
subplot = (1,1)
figsize = (3.75,3.75) #figsize = (6.25,6.25) #figsize = (7,5.5)
margins = dict(bottom=0.025, left=0.075, right=0.875, top=0.875, hspace=0.0, wspace=0.0)
# margins = dict(bottom=0.12, left=0.075, right=.9725, top=.95, hspace=0.05, wspace=0.05)
# margins = dict(bottom=0.025, left=0.065, right=.885, top=.925, hspace=0.05, wspace=0.05)
elif nexp == 2:
## 2 panel
subplot = (1,2)
figsize = (6.25,5.5)
elif nexp == 4:
# 4 panel
subplot = (2,2)
figsize = (6.25,6.25)
margins = dict(bottom=0.025, left=0.065, right=.885, top=.925, hspace=0.05, wspace=0.05)
elif nexp == 4:
# 4 panel
subplot = (2,2)
figsize = (6.25,6.25)
margins = dict(bottom=0.025, left=0.065, right=.885, top=.925, hspace=0.05, wspace=0.05)
elif nexp == 6:
# 6 panel
subplot = (2,3) # rows, columns
figsize = (9.25,6.5) # width, height (inches)
cbo = 'horizontal'
margins = dict(bottom=0.09, left=0.05, right=.97, top=.92, hspace=0.1, wspace=0.05)
caxpos = [0.05, 0.025, 0.9, 0.03]
# margins = dict(bottom=0.025, left=0.065, right=.885, top=.925, hspace=0.05, wspace=0.05)
# return values
return sf, figformat, folder, margins, caxpos, subplot, figsize, cbo
## setup projection: lambert conformal
def getProjectionSettings(projtype):
# lon_0,lat_0 is central point. lat_ts is latitude of true scale.
if projtype == 'lcc-new':
## Lambert Conic Conformal - New Fine Domain
projection = dict(projection='lcc', lat_0=55, lon_0=-120, lat_1=52, rsphere=(6378137.00,6356752.3142),#
width=180*10e3, height=180*10e3, area_thresh = 1000., resolution='l')
elif projtype == 'lcc-fine':
## Lambert Conic Conformal - Fine Domain
projection = dict(projection='lcc', lat_0=58, lon_0=-132, lat_1=53, rsphere=(6378137.00,6356752.3142),#
width=200*10e3, height=300*10e3, area_thresh = 1000., resolution='l')
elif projtype == 'lcc-small':
## Lambert Conic Conformal - Small Domain
projection = dict(projection='lcc', lat_0=56, lon_0=-130, lat_1=53, rsphere=(6378137.00,6356752.3142),#
width=2500e3, height=2650e3, area_thresh = 1000., resolution='l')
elif projtype == 'lcc-intermed':
## Lambert Conic Conformal - Intermed Domain
projection = dict(projection='lcc', lat_0=57, lon_0=-140, lat_1=53, rsphere=(6378137.00,6356752.3142),#
width=4000e3, height=3400e3, area_thresh = 1000., resolution='l')
elif projtype == 'lcc-large':
## Lambert Conic Conformal - Large Domain
projection = dict(projection='lcc', lat_0=54.5, lon_0=-140, lat_1=53, #rsphere=(6378137.00,6356752.3142),#
width=11000e3, height=7500e3, area_thresh = 10e3, resolution='l')
elif projtype == 'laea':
## Lambert Azimuthal Equal Area
projection = dict(projection='laea', lat_0=57, lon_0=-137, lat_ts=53, resolution='l', #
width=259*30e3, height=179*30e3, rsphere=(6378137.00,6356752.3142), area_thresh = 1000.)
elif projtype == 'ortho-NA':
## Orthographic Projection
projection = dict(projection='ortho', lat_0 = 75, lon_0 = -137, resolution = 'l', area_thresh = 1000.)
# resolution of coast lines
grid = 10; res = projection['resolution']
# return values
return projection, grid, res
# used for annotation
monthnames = ['January ', 'February ', 'March ', 'April ', 'May ', 'June ', #
'July ', 'August ', 'September', 'October ', 'November ', 'December ']
if __name__ == '__main__':
# filename = 'wrfsrfc_d%02i_clim.nc' # domain substitution
# CFSR = openCFSRclim(filename='CFSRclimFineRes1979-2009.nc')
# RRTMG = openWRFclim(exp='rrtmg-arb1', filetypes=['wrfsrfc_d%02i_clim.nc'], domains=dom)
# axtitles = ['CRU Climatology', 'WRF Control', 'WRF RRTMG', 'Polar WRF']
## general settings and shortcuts
H01 = '1979'; H02 = '1979-1980'; H03 = '1979-1981'; H30 = '1979-2009' # for tests
H05 = '1979-1983'; H10 = '1979-1988'; H15 = '1979-1993' # historical validation periods
G10 = '1969-1978'; I10 = '1989-1998'; J10 = '1999-2008' # additional historical periods
A03 = '2045-2047'; A05 = '2045-2049'; A10 = '2045-2054'; A15 = '2045-2059' # mid-21st century
B03 = '2095-2097'; B05 = '2095-2099'; B10 = '2095-2104'; B15 = '2095-2109' # late 21st century
lprint = True # write plots to disk
ltitle = True # plot/figure title
lcontour = False # contour or pcolor plot
lframe = True # draw domain boundary
cbo = 'vertical' # vertical horizontal
resolution=None # only for GPCC (None = default/highest)
## case settings
# observations
case = 'new' # name tag
projtype = 'lcc-new' # 'lcc-new'
period = H01; dom = (1,2,)
# explist = ['CRU']*3 + ['NARR', 'CRU', 'CRU']; period = [H30, G10, H10, None, I10, J10]
# explist = ['PRISM-10km','ctrl-1','NARR','PRISM','max','CRU']
explist = ['PRISM-10km','new','noah','nogulf','max','CRU']; period = [H01]*5 + [H10]
# explist = ['GPCC']; vars = ['stns']; seasons = ['annual']
# explist = ['cfsr', 'ctrl-1', 'max', 'NARR', 'PRISM', 'CRU']; # period = [H10]*5 + [None]
# explist = ['cfsr', 'ens-Z', 'max', 'ctrl-1']
# explist = ['tom', 'ens-Z', 'max', 'ctrl-1']
# explist = ['CFSR', 'ctrl-1', 'CRU', 'NARR', 'CESM', 'GPCC']
# explist = ['ctrl-1', 'PRISM', 'Ctrl-1', 'CRU']
# explist = ['ctrl-1', 'grell', 'tiedt', 'PRISM', 'CRU', 'GPCC']
# explist = ['milb', 'PRISM', 'wdm6', 'tom', 'ctrl-1', 'max']
# explist = ['wdm6','tom', 'ctrl-1', 'max']
# explist = ['ctrl-1', 'cam3', 'noahmp', 'pwrf']
# explist = ['PRISM', 'max', 'ctrl-1', 'GPCC']
# explist = ['PRISM', 'max', 'ctrl-1', 'CFSR']
# explist = ['nmpbar', 'nmpsnw', 'nmpdef', 'ctrl-1']
# explist = ['nmpbar', 'clm4', 'max', 'ctrl-1']
# explist = ['PRISM', 'tom', 'tiedt', 'ctrl-1']
# explist = ['PRISM', 'tom', 'nmpbar', 'ctrl-1']
# explist = ['grell','PRISM', 'tiedt', 'nmpbar', 'ctrl-1', 'max']
# explist = ['ctrl-1', 'grell', 'tiedt', 'pbl4']
# explist = ['PRISM', 'ctrl-1-2000', 'cam3', 'Ctrl-1']
# explist = ['PRISM', 'nmpbar', 'tom', 'ctrl-1']
# explist = ['ctrl-1', 'tom', 'tiedt', 'nmpbar']
# explist = ['ens-Z', 'ens-B', 'ens-A', 'ens-C'];
# explist = ['Ens-Z', 'Ens-B', 'Ens-A', 'Ens-C']; # CESM historical
# explist = ['SeaIce', 'Ens-B-rcp85', 'Ens-A-rcp85', 'Ens-Z-rcp85']; # CESM RCP 8.5 projections
# explist = ['SeaIce']; # CESM RCP 8.5 projections
# explist = ['Ctrl-1', 'Ctrl-1-rcp85', 'Ctrl-1-rcp85', 'SeaIce']; period = (H10, A10, B10, A10)
# explist = ['ens-Z', 'CRU', 'ens-B', 'ens-A', 'GPCC', 'ens-C']; dom = (1,)
# explist = ['ctrl-1', 'ctrl-1-2000','ctrl-1-2050','ctrl-2-2050']; period = (H10, H10, A10, A10)
# explist = ['ctrl-1', 'CRU', 'NARR', 'CESM']
# explist = ['max', 'PRISM', 'grell', 'ctrl-1']
# explist = ['ctrl-1', 'PRISM', 'GPCC', 'NARR']
# explist = ['ctrl-1']
# explist = ['modis']
# explist = ['PRISM']; period = None
## select variables and seasons
# vars = ['rainnc', 'rainc', 'T2']
# vars = ['snowh']; seasons = [8]
# vars = ['rain']
# vars = ['evap']
# vars = ['snow']
# vars = ['rain', 'T2', 'p-et','evap']
# vars = ['p-et','rain','snow']
# vars = ['GLW','OLR','qtfx']
# vars = ['SWDOWN','GLW','OLR']
# vars = ['hfx','lhfx']
# vars = ['qtfx','lhfr']
vars = ['rain','T2']
# vars = ['T2']
# vars = ['seaice']; seasons = [8] # September seaice
# vars = ['rain','T2','snow']
# vars = ['snow', 'snowh']
# vars = ['SST','T2','rain','snow','snowh']
# seasons = [ [i] for i in xrange(12) ] # monthly
# seasons = ['annual']
# seasons = ['summer']
# seasons = ['winter']
seasons = ['winter', 'summer', 'annual']
# vars = ['snow']; seasons = ['fall','winter','spring']
# vars = ['rain']; seasons = ['annual']
# vars = ['zs']; seasons = ['hidef']
# vars = ['stns']; seasons = ['annual']
# vars = ['lndcls']; seasons = [''] # static
## load data
if not isinstance(period,(tuple,list)): period = (period,)*len(explist)
exps = []; axtitles = []
for exp,prd in zip(explist,period):
ext = exp; axt = ''
if isinstance(exp,str):
if exp[0].isupper():
if exp == 'GPCC': ext = (openGPCCclim(resolution=resolution,period=prd),); axt = 'GPCC Observations' # ,period=prd
elif exp == 'CRU': ext = (openCRUclim(period=prd),); axt = 'CRU Observations'
elif exp[0:5] == 'PRISM': # all PRISM derivatives
if exp == 'PRISM': prismfile = 'prism_clim.nc'
elif exp == 'PRISM-10km': prismfile = 'prism_10km.nc'
if len(vars) ==1 and vars[0] == 'rain':
ext = (openGPCCclim(resolution='0.25'), openPRISMclim(filename=prismfile)); axt = 'PRISM (and GPCC)'
else: ext = (openCRUclim(period='1979-2009'), openPRISMclim(filename=prismfile)); axt = 'PRISM (and CRU)'
# ext = (openPRISMclim(),)
elif exp == 'CFSR': ext = (openCFSRclim(period=prd),); axt = 'CFSR Reanalysis'
elif exp == 'NARR': ext = (openNARRclim(),); axt = 'NARR Reanalysis'
else: # all other uppercase names are CESM runs
ext = (openCESMclim(exp=exp, period=prd),)
axt = CESMtitle.get(exp,exp)
else: # WRF runs are all in lower case
ext = openWRFclim(exp=exp, period=prd, domains=dom)
axt = WRFtitle.get(exp,exp)
exps.append(ext); axtitles.append(axt)
print(exps[-1][-1])
# count experiment tuples (layers per panel)
nexps = []; nlen = len(exps)
for n in range(nlen):
if not isinstance(exps[n],(tuple,list)): # should not be necessary
exps[n] = (exps[n],)
nexps.append(len(exps[n])) # layer counter for each panel
# get figure settings
sf, figformat, folder, margins, caxpos, subplot, figsize, cbo = getFigureSettings(nexp=nlen, cbo=cbo)
# get projections settings
projection, grid, res = getProjectionSettings(projtype=projtype)
## loop over vars and seasons
maps = []; x = []; y = [] # projection objects and coordinate fields (only computed once)
# start loop
for var in vars:
oldvar = var
for season in seasons:
## settings
# plot variable and averaging
cbl = None; clim = None
lmskocn = False; lmsklnd = False # mask ocean or land?
# color maps and scale (contour levels)
cmap = mpl.cm.gist_ncar; cmap.set_over('white'); cmap.set_under('black')
if var == 'snow': # snow (liquid water equivalent)
lmskocn = True; clbl = '%2.0f' # kg/m^2
clevs = np.linspace(0,200,41)
elif var == 'snowh': # snow (depth/height)
lmskocn = True; clbl = '%2.1f' # m
clevs = np.linspace(0,2,41)
elif var=='hfx' or var=='lhfx' or var=='qtfx': # heat fluxes (W / m^2)
clevs = np.linspace(-20,100,41); clbl = '%03.0f'
if var == 'qtfx': clevs = clevs * 2
if season == 'winter': clevs = clevs - 30
elif season == 'summer': clevs = clevs + 30
elif var=='GLW': # heat fluxes (W / m^2)
clevs = np.linspace(200,320,41); clbl = '%03.0f'
if season == 'winter': clevs = clevs - 40
elif season == 'summer': clevs = clevs + 40
elif var=='OLR': # heat fluxes (W / m^2)
clevs = np.linspace(190,240,31); clbl = '%03.0f'
if season == 'winter': clevs = clevs - 20
elif season == 'summer': clevs = clevs + 30
elif var=='rfx': # heat fluxes (W / m^2)
clevs = np.linspace(320,470,51); clbl = '%03.0f'
if season == 'winter': clevs = clevs - 100
elif season == 'summer': clevs = clevs + 80
elif var=='SWDOWN' or var=='SWNORM': # heat fluxes (W / m^2)
clevs = np.linspace(80,220,51); clbl = '%03.0f'
if season == 'winter': clevs = clevs - 80
elif season == 'summer': clevs = clevs + 120
elif var == 'lhfr': # relative latent heat flux (fraction)
clevs = np.linspace(0,1,26); clbl = '%2.1f' # fraction
elif var == 'evap': # moisture fluxes (kg /(m^2 s))
clevs = np.linspace(-4,4,25); clbl = '%02.1f'
cmap = mpl.cm.PuOr
elif var == 'p-et': # moisture fluxes (kg /(m^2 s))
# clevs = np.linspace(-3,22,51); clbl = '%02.1f'
clevs = np.linspace(-2,2,25); cmap = mpl.cm.PuOr; clbl = '%02.1f'
elif var == 'rain' or var == 'rainnc': # total precipitation
clevs = np.linspace(0,20,41); clbl = '%02.1f' # mm/day
elif var == 'rainc': # convective precipitation
clevs = np.linspace(0,5,26); clbl = '%02.1f' # mm/day
elif oldvar=='SST' or var=='SST': # skin temperature (SST)
clevs = np.linspace(240,300,61); clbl = '%03.0f' # K
var = 'Ts'; lmsklnd = True # mask land
elif var=='T2' or var=='Ts': # 2m or skin temperature (SST)
clevs = np.linspace(255,290,36); clbl = '%03.0f' # K
if season == 'winter': clevs = clevs - 10
elif season == 'summer': clevs = clevs + 10
elif var == 'seaice': # sea ice fraction
lmsklnd = True # mask land
clevs = np.linspace(0.04,1,25); clbl = '%2.1f' # fraction
cmap.set_under('white')
elif var == 'zs': # surface elevation / topography
if season == 'topo':
lmskocn = True; clim = (-1.,2.5); # nice geographic map feel
clevs = np.hstack((np.array((-1.5,)), np.linspace(0,2.5,26))); clbl = '%02.1f' # km
cmap = mpl.cm.gist_earth; cmap.set_over('white'); cmap.set_under('blue') # topography
elif season == 'hidef':
lmskocn = True; clim = (-0.5,2.5); # good contrast for high elevation
clevs = np.hstack((np.array((-.5,)), np.linspace(0,2.5,26))); clbl = '%02.1f' # km
cmap = mpl.cm.gist_ncar; cmap.set_over('white'); cmap.set_under('blue')
cbl = np.linspace(0,clim[-1],6)
elif var=='stns': # station density
clevs = np.linspace(0,5,6); clbl = '%2i' # stations per grid points
cmap.set_over('purple'); cmap.set_under('white')
elif var=='lndcls': # land use classes (works best with contour plot)
clevs = np.linspace(0.5,24.5,25); cbl = np.linspace(4,24,6)
clbl = '%2i'; cmap.set_over('purple'); cmap.set_under('white')
# time frame / season
if isinstance(season,str):
if season == 'annual': # all month
month = list(range(1,13)); plottype = 'Annual Average'
elif season == 'winter':# DJF
month = [12, 1, 2]; plottype = 'Winter Average'
elif season == 'spring': # MAM
month = [3, 4, 5]; plottype = 'Spring Average'
elif season == 'summer': # JJA
month = [6, 7, 8]; plottype = 'Summer Average'
elif season == 'fall': # SON
month = [9, 10, 11]; plottype = 'Fall Average'
else:
plottype = '' # for static fields
month = [1]
else:
month = season
if len(season) == 1 and isinstance(season[0],int):
plottype = '%s Average'%monthnames[season[0]].strip()
season = '%02i'%(season[0]+1) # number of month, used for file name
else: plottype = 'Average'
# assemble plot title
filename = '%s_%s_%s.%s'%(var,season,case,figformat)
plat = exps[0][0].vardict[var].plotatts
if plat['plotunits']: figtitle = '%s %s [%s]'%(plottype,plat['plottitle'],plat['plotunits'])
else: figtitle = '%s %s'%(plottype,plat['plottitle'])
# feedback
print(('\n\n *** %s %s (%s) *** \n'%(plottype,plat['plottitle'],var)))
## compute data
data = []; lons = []; lats=[] # list of data and coordinate fields to be plotted
# compute average WRF precip
wrfmon = np.array([31.,28.,31.,30.,31.,30.,31.,31.,30.,31.,30.,31.])
stdmon = np.array([31.,28.25,31.,30.,31.,30.,31.,31.,30.,31.,30.,31.])
print(' - loading data\n')
for exptpl in exps:
lontpl = []; lattpl = []; datatpl = []
for exp in exptpl:
# handle dimensions
assert (exp.lon.naxes == 2) and (exp.lat.naxes == 2), '\nWARNING: no coordinate fields found!'
lon = exp.lon.get(); lat = exp.lat.get()
lontpl.append(lon); lattpl.append(lat) # append to data list
# figure out calendar
if 'WRF' in exp.atts.get('description',''): mon = wrfmon
else: mon = stdmon
# extract data field
vardata = np.zeros((exp.y.size,exp.x.size)) # allocate array
# compute average over seasonal range
days = 0
expvar = exp.vardict[var]
if expvar.hasaxis('time'):
for m in month:
n = m-1
vardata += expvar(time=exp.time.values[n]).get().squeeze() * mon[n]
days += mon[n]
vardata /= days # normalize
else:
vardata = expvar.get().squeeze()
vardata = vardata * expvar.plotatts.get('scalefactor',1) # apply unit conversion
if lmskocn:
if 'lnd' in exp.vardict: # CESM and CFSR
vardata[exp.lnd.get()<0.5] = -2. # use land fraction
elif 'lndidx' in exp.vardict:
mask = exp.lndidx.get()
vardata[mask==16] = -2. # use land use index (ocean)
vardata[mask==24] = -2. # use land use index (lake)
else : vardata = maskoceans(lon,lat,vardata,resolution=res,grid=grid)
if lmsklnd:
if 'lnd' in exp.vardict: # CESM and CFSR
vardata[exp.lnd.get()>0.5] = 0 # use land fraction
elif 'lndidx' in exp.vardict: # use land use index (ocean and lake)
mask = exp.lndidx.get(); tmp = vardata.copy(); vardata[:] = 0.
vardata[mask==16] = tmp[mask==16]; vardata[mask==24] = tmp[mask==24]
datatpl.append(vardata) # append to data list
# add tuples to master list
lons.append(lontpl); lats.append(lattpl); data.append(datatpl)
## setup projection
#print(' - setting up figure\n')
nax = subplot[0]*subplot[1] # number of panels
# make figure and axes
f = pyl.figure(facecolor='white', figsize=figsize)
ax = []
for n in range(nax):
ax.append(f.add_subplot(subplot[0],subplot[1],n+1))
f.subplots_adjust(**margins) # hspace, wspace
# lat_1 is first standard parallel.
# lat_2 is second standard parallel (defaults to lat_1).
# lon_0,lat_0 is central point.
# rsphere=(6378137.00,6356752.3142) specifies WGS4 ellipsoid
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
if not maps:
print(' - setting up map projection\n')
mastermap = Basemap(ax=ax[n],**projection)
for axi in ax:
tmp = copy(mastermap)
tmp.ax = axi
maps.append(tmp) # one map for each panel!!
else:
print(' - resetting map projection\n')
for n in range(nax):
maps[n].ax=ax[n] # assign new axes to old projection
# transform coordinates (on per-map basis)
if not (x and y):
print(' - transforming coordinate fields\n')
for n in range(nax):
xtpl = []; ytpl = []
for m in range(nexps[n]):
xx, yy = maps[n](lons[n][m],lats[n][m]) # convert to map-native coordinates
xtpl.append(xx); ytpl.append(yy)
x.append(xtpl); y.append(ytpl)
## Plot data
# draw boundaries of inner domain
if lframe:
print(' - drawing data frames\n')
for n in range(nax):
for m in range(nexps[n]):
bdy = np.ones_like(x[n][m]); bdy[0,:]=0; bdy[-1,:]=0; bdy[:,0]=0; bdy[:,-1]=0
maps[n].contour(x[n][m],y[n][m],bdy,[0],ax=ax[n], colors='k') # draw boundary of inner domain
# draw data
norm = mpl.colors.Normalize(vmin=min(clevs),vmax=max(clevs),clip=True) # for colormap
cd = []
# print(' - creating plots\n')
for n in range(nax):
for m in range(nexps[n]):
print(('panel %i: min %f / max %f / mean %f'%(n,data[n][m].min(),data[n][m].max(),data[n][m].mean())))
if lcontour: cd.append(maps[n].contourf(x[n][m],y[n][m],data[n][m],clevs,ax=ax[n],cmap=cmap, norm=norm,extend='both'))
else: cd.append(maps[n].pcolormesh(x[n][m],y[n][m],data[n][m],cmap=cmap,shading='gouraud'))
# add colorbar
cax = f.add_axes(caxpos)
for cn in cd: # [c1d1, c1d2, c2d2]:
if clim: cn.set_clim(vmin=clim[0],vmax=clim[1])
else: cn.set_clim(vmin=min(clevs),vmax=max(clevs))
cbar = f.colorbar(cax=cax,mappable=cd[0],orientation=cbo,extend='both') # ,size='3%',pad='2%'
if cbl is None: cbl = np.linspace(min(clevs),max(clevs),6)
cbar.set_ticks(cbl); cbar.set_ticklabels([clbl%(lev) for lev in cbl])
## Annotation
#print('\n - annotating plots\n')
# add labels
if ltitle: f.suptitle(figtitle,fontsize=12)
# ax.set_xlabel('Longitude'); ax.set_ylabel('Latitude')
msn = len(maps)/2 # place scale
if projtype == 'lcc-new':
maps[msn].drawmapscale(-128, 48, -120, 55, 400, barstyle='fancy',
fontsize=8, yoffset=0.01*(maps[n].ymax-maps[n].ymin))
elif projtype == 'lcc-small':
maps[msn].drawmapscale(-136, 49, -137, 57, 800, barstyle='fancy', yoffset=0.01*(maps[n].ymax-maps[n].ymin))
elif projtype == 'lcc-large':
maps[msn].drawmapscale(-171, 21, -137, 57, 2000, barstyle='fancy', yoffset=0.01*(maps[n].ymax-maps[n].ymin))
n = -1 # axes counter
for i in range(subplot[0]):
for j in range(subplot[1]):
n += 1 # count up
ax[n].set_title(axtitles[n],fontsize=11) # axes title
if j == 0 : Left = True
else: Left = False
if i == subplot[0]-1: Bottom = True
else: Bottom = False
# land/sea mask
maps[n].drawlsmask(ocean_color='blue', land_color='green',resolution=res,grid=grid)
# black-out continents, if we have no proper land mask
if lmsklnd and not ('lnd' in exps[n][0].vardict or 'lndidx' in exps[n][0].vardict):
maps[n].fillcontinents(color='black',lake_color='black')
# add maps stuff
maps[n].drawcoastlines(linewidth=0.5)
maps[n].drawcountries(linewidth=0.5)
maps[n].drawmapboundary(fill_color='k',linewidth=2)
# labels = [left,right,top,bottom]
if projtype=='lcc-new':
maps[n].drawparallels([40,50,60,70],linewidth=1, labels=[Left,False,False,False])
maps[n].drawparallels([45,55,65],linewidth=0.5, labels=[Left,False,False,False])
maps[n].drawmeridians([-180,-160,-140,-120,-100],linewidth=1, labels=[False,False,False,Bottom])
maps[n].drawmeridians([-170,-150,-130,-110],linewidth=0.5, labels=[False,False,False,Bottom])
elif projtype=='lcc-fine' or projtype=='lcc-small' or projtype=='lcc-intermed':
maps[n].drawparallels([45,65],linewidth=1, labels=[Left,False,False,False])
maps[n].drawparallels([55,75],linewidth=0.5, labels=[Left,False,False,False])
maps[n].drawmeridians([-180,-160,-140,-120,-100],linewidth=1, labels=[False,False,False,Bottom])
maps[n].drawmeridians([-170,-150,-130,-110],linewidth=0.5, labels=[False,False,False,Bottom])
elif projtype == 'lcc-large':
maps[n].drawparallels(list(range(0,90,30)),linewidth=1, labels=[Left,False,False,False])
maps[n].drawparallels(list(range(15,90,30)),linewidth=0.5, labels=[Left,False,False,False])
maps[n].drawmeridians(list(range(-180,180,30)),linewidth=1, labels=[False,False,False,Bottom])
maps[n].drawmeridians(list(range(-165,180,30)),linewidth=0.5, labels=[False,False,False,Bottom])
elif projtype == 'ortho':
maps[n].drawparallels(list(range(-90,90,30)),linewidth=1)
maps[n].drawmeridians(list(range(-180,180,30)),linewidth=1)
# mark stations
# ST_LINA, WESTLOCK_LITKE, JASPER, FORT_MCMURRAY, SHINING_BANK
sn = ['SL', 'WL', 'J', 'FM', 'SB']
slon = [-111.45,-113.85,-118.07,-111.22,-115.97]
slat = [54.3,54.15,52.88,56.65,53.85]
for (axn,mapt) in zip(ax,maps):
for (name,lon,lat) in zip(sn,slon,slat):
xx,yy = mapt(lon, lat)
mapt.plot(xx,yy,'ko',markersize=3)
axn.text(xx+1.5e4,yy-1.5e4,name,ha='left',va='top',fontsize=8)
# save figure to disk
if lprint:
print(('\nSaving figure in '+filename))
f.savefig(folder+filename, **sf) # save figure to pdf
print(folder)
## show plots after all iterations
pyl.show()
|
gpl-3.0
|
olologin/scikit-learn
|
sklearn/decomposition/tests/test_pca.py
|
21
|
18046
|
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for solver in solver_list:
for n_components in [-1, 3]:
assert_raises(ValueError,
PCA(n_components, svd_solver=solver).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten components_. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
|
bsd-3-clause
|
pcmagic/stokes_flow
|
head_Force/do_calculate.py
|
1
|
10156
|
import sys
import matplotlib
import petsc4py
matplotlib.use('agg')
petsc4py.init(sys.argv)
import numpy as np
import pickle
from time import time
from codeStore import support_fun_table as spf_tb
from petsc4py import PETSc
from datetime import datetime
from tqdm import tqdm
# get kewrgs
def get_problem_kwargs(**main_kwargs):
calculate_fun_dict = {'do_calculate_helix_Petsc4n':
spf_tb.do_calculate_helix_Petsc4n,
'do_calculate_helix_AvrPetsc4n':
spf_tb.do_calculate_helix_AvrPetsc4n,
'do_calculate_ellipse_Petsc4n':
spf_tb.do_calculate_ellipse_Petsc4n,
'do_calculate_ellipse_AvrPetsc4n':
spf_tb.do_calculate_ellipse_AvrPetsc4n,
'do_calculate_ecoli_Petsc4n':
spf_tb.do_calculate_ecoli_Petsc4n,
'do_calculate_ecoli_Petsc4nPsi':
spf_tb.do_calculate_ecoli_Petsc4nPsi,
'do_ShearFlowPetsc4nPsiObj':
spf_tb.do_ShearFlowPetsc4nPsiObj,
'do_ShearFlowPetsc4nPsiObj_dbg':
spf_tb.do_ShearFlowPetsc4nPsiObj_dbg,
'do_calculate_ecoli_AvrPetsc4n':
spf_tb.do_calculate_ecoli_AvrPetsc4n,
'do_calculate_ecoli_passive_Petsc4n':
spf_tb.do_calculate_ecoli_passive_Petsc4n,
'do_calculate_ecoli_passive_AvrPetsc4n':
spf_tb.do_calculate_ecoli_passive_AvrPetsc4n, }
OptDB = PETSc.Options()
ini_theta = OptDB.getReal('ini_theta', 0)
ini_phi = OptDB.getReal('ini_phi', 0)
ini_psi = OptDB.getReal('ini_psi', 0)
ini_t = OptDB.getReal('ini_t', 0)
max_t = OptDB.getReal('max_t', 1)
rtol = OptDB.getReal('rtol', 1e-3)
atol = OptDB.getReal('atol', 1e-6)
eval_dt = OptDB.getReal('eval_dt', 0.01)
calculate_fun = OptDB.getString('calculate_fun', 'do_calculate_helix_Petsc4n')
update_fun = OptDB.getString('update_fun', '5bs')
table_name = OptDB.getString('table_name', 'hlxB01_tau1a')
fileHandle = OptDB.getString('f', '')
omega_tail = OptDB.getReal('omega_tail', 0)
flow_strength = OptDB.getReal('flow_strength', 0)
problem_kwargs = {'ini_theta': ini_theta,
'ini_phi': ini_phi,
'ini_psi': ini_psi,
'ini_t': ini_t,
'max_t': max_t,
'update_fun': update_fun,
'calculate_fun': calculate_fun_dict[calculate_fun],
'rtol': rtol,
'atol': atol,
'eval_dt': eval_dt,
'table_name': table_name,
'fileHandle': fileHandle,
'omega_tail': omega_tail,
'flow_strength': flow_strength, }
kwargs_list = (main_kwargs,)
for t_kwargs in kwargs_list:
for key in t_kwargs:
problem_kwargs[key] = t_kwargs[key]
return problem_kwargs
def do_pickel(Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi, Table_psi,
Table_eta, simulate_t, **problem_kwargs):
ini_theta = problem_kwargs['ini_theta']
ini_phi = problem_kwargs['ini_phi']
ini_psi = problem_kwargs['ini_psi']
ini_t = problem_kwargs['ini_t']
max_t = problem_kwargs['max_t']
update_fun = problem_kwargs['update_fun']
calculate_fun = problem_kwargs['calculate_fun']
rtol = problem_kwargs['rtol']
atol = problem_kwargs['atol']
eval_dt = problem_kwargs['eval_dt']
table_name = problem_kwargs['table_name']
omega_tail = problem_kwargs['omega_tail']
flow_strength = problem_kwargs['flow_strength']
save_every = problem_kwargs['save_every']
t_name = problem_kwargs['t_name']
expt_str = ''
expt_str = expt_str + 'table_name: %s \n' % table_name
expt_str = expt_str + 'omega_tail: %f \n' % omega_tail
expt_str = expt_str + 'flow_strength: %f \n' % flow_strength
expt_str = expt_str + 'init normal angle: %f, %f, %f \n' % \
(ini_theta, ini_phi, ini_psi)
expt_str = expt_str + 'last normal angle: %f, %f, %f \n' % \
(Table_theta[-1], Table_phi[-1], Table_psi[-1])
expt_str = expt_str + '%s: ini_t=%f, max_t=%f, n_t=%d \n' % \
(calculate_fun, ini_t, max_t, Table_t.size)
expt_str = expt_str + '%s rt%.0e, at%.0e, %.1fs \n' % \
(update_fun, rtol, atol, simulate_t)
save_list = ('ini_theta', 'ini_phi', 'ini_psi', 'ini_t', 'max_t', 'eval_dt', 'update_fun',
'rtol', 'atol', 'table_name', 'omega_tail', 'flow_strength', 't_name',
'save_every', 'simulate_t', 'Table_t', 'Table_dt',
'Table_X', 'Table_P', 'Table_P2',
'Table_theta', 'Table_phi', 'Table_psi', 'Table_eta')
t_pick = {}
for var_name in save_list:
t_pick[var_name] = locals()[var_name]
t_pick['problem_kwargs'] = problem_kwargs
with open('%s.pickle' % t_name, 'wb') as handle:
pickle.dump(t_pick, handle, protocol=pickle.HIGHEST_PROTOCOL)
expt_str = expt_str + 'save to %s \n' % t_name
# spf_tb.save_table_result('%s.jpg' % t_name, Table_t, Table_dt, Table_X, Table_P, Table_P2,
# Table_theta, Table_phi, Table_psi, Table_eta, save_every)
spf_tb.save_table_result_v2('%s.jpg' % t_name, Table_t, Table_dt, Table_X, Table_P, Table_P2,
Table_theta, Table_phi, Table_psi, Table_eta, save_every, dpi=200)
return expt_str
def main_fun(**main_kwargs):
problem_kwargs = get_problem_kwargs(**main_kwargs)
ini_theta = problem_kwargs['ini_theta']
ini_phi = problem_kwargs['ini_phi']
ini_psi = problem_kwargs['ini_psi']
ini_t = problem_kwargs['ini_t']
max_t = problem_kwargs['max_t']
update_fun = problem_kwargs['update_fun']
calculate_fun = problem_kwargs['calculate_fun']
rtol = problem_kwargs['rtol']
atol = problem_kwargs['atol']
eval_dt = problem_kwargs['eval_dt']
table_name = problem_kwargs['table_name']
omega_tail = problem_kwargs['omega_tail']
problem_kwargs['save_every'] = 1
save_every = problem_kwargs['save_every']
idx_time = datetime.today().strftime('D%Y%m%d_T%H%M%S')
fileHandle = problem_kwargs['fileHandle']
fileHandle = fileHandle + '_' if len(fileHandle) > 0 else ''
t_name = '%sth%5.3f_ph%5.3f_ps%5.3f_%s' % (fileHandle, ini_theta, ini_phi, ini_psi, idx_time)
problem_kwargs['t_name'] = t_name
t0 = time()
tnorm = np.array((np.sin(ini_theta) * np.cos(ini_phi), np.sin(ini_theta) * np.sin(ini_phi),
np.cos(ini_theta)))
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi, Table_psi, Table_eta = \
calculate_fun(tnorm, ini_psi, max_t, update_fun=update_fun, rtol=rtol, atol=atol,
eval_dt=eval_dt, ini_t=ini_t, table_name=table_name, save_every=save_every,
tqdm_fun=tqdm, omega_tail=omega_tail)
t1 = time()
simulate_t = t1 - t0
expt_str = do_pickel(Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi,
Table_psi, Table_eta, simulate_t, **problem_kwargs)
print(expt_str)
with open('%s.txt' % t_name, 'w') as text_file:
text_file.write(expt_str)
return True
def main_fun_base_flow(**main_kwargs):
OptDB = PETSc.Options()
assert OptDB.getString('calculate_fun') in ('do_ShearFlowPetsc4nPsiObj',
'do_ShearFlowPetsc4nPsiObj_dbg',)
problem_kwargs = get_problem_kwargs(**main_kwargs)
ini_theta = problem_kwargs['ini_theta']
ini_phi = problem_kwargs['ini_phi']
ini_psi = problem_kwargs['ini_psi']
ini_t = problem_kwargs['ini_t']
max_t = problem_kwargs['max_t']
update_fun = problem_kwargs['update_fun']
rtol = problem_kwargs['rtol']
atol = problem_kwargs['atol']
eval_dt = problem_kwargs['eval_dt']
table_name = problem_kwargs['table_name']
omega_tail = problem_kwargs['omega_tail']
flow_strength = problem_kwargs['flow_strength']
problem_kwargs['save_every'] = 1
save_every = problem_kwargs['save_every']
idx_time = datetime.today().strftime('D%Y%m%d_T%H%M%S')
fileHandle = problem_kwargs['fileHandle']
fileHandle = fileHandle + '_' if len(fileHandle) > 0 else ''
t_name = '%sth%5.3f_ph%5.3f_ps%5.3f_%s' % (fileHandle, ini_theta, ini_phi, ini_psi, idx_time)
problem_kwargs['t_name'] = t_name
calculate_fun = problem_kwargs['calculate_fun']
t0 = time()
tnorm = np.array((np.sin(ini_theta) * np.cos(ini_phi), np.sin(ini_theta) * np.sin(ini_phi),
np.cos(ini_theta)))
Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi, Table_psi, Table_eta = \
calculate_fun(tnorm, ini_psi, max_t, update_fun=update_fun, rtol=rtol, atol=atol,
eval_dt=eval_dt, ini_t=ini_t, table_name=table_name, save_every=save_every,
tqdm_fun=tqdm, omega_tail=omega_tail, flow_strength=flow_strength,
return_psi_body=False)
t1 = time()
simulate_t = t1 - t0
expt_str = do_pickel(Table_t, Table_dt, Table_X, Table_P, Table_P2, Table_theta, Table_phi,
Table_psi, Table_eta, simulate_t, **problem_kwargs)
print(expt_str)
with open('%s.txt' % t_name, 'w') as text_file:
text_file.write(expt_str)
return True
if __name__ == '__main__':
OptDB = PETSc.Options()
if OptDB.getString('calculate_fun') in ('do_ShearFlowPetsc4nPsiObj',
'do_ShearFlowPetsc4nPsiObj_dbg',):
OptDB.setValue('main_fun', False)
main_fun_base_flow()
if OptDB.getBool('main_fun', True):
main_fun()
|
mit
|
liangz0707/scikit-learn
|
benchmarks/bench_plot_approximate_neighbors.py
|
244
|
6011
|
"""
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
|
bsd-3-clause
|
IndraVikas/scikit-learn
|
sklearn/linear_model/setup.py
|
169
|
1567
|
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('linear_model', parent_package, top_path)
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
config.add_extension('cd_fast', sources=['cd_fast.c'],
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]), **blas_info)
config.add_extension('sgd_fast',
sources=['sgd_fast.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
libraries=cblas_libs,
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
**blas_info)
# add other directories
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
huobaowangxi/scikit-learn
|
examples/ensemble/plot_voting_probas.py
|
316
|
2824
|
"""
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
|
bsd-3-clause
|
bloyl/mne-python
|
mne/viz/backends/_utils.py
|
8
|
2928
|
# -*- coding: utf-8 -*-
#
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Joan Massich <[email protected]>
# Guillaume Favelier <[email protected]>
#
# License: Simplified BSD
from contextlib import contextmanager
import numpy as np
import collections.abc
from ...externals.decorator import decorator
VALID_3D_BACKENDS = (
'pyvista', # default 3d backend
'mayavi',
'notebook',
)
ALLOWED_QUIVER_MODES = ('2darrow', 'arrow', 'cone', 'cylinder', 'sphere',
'oct')
def _get_colormap_from_array(colormap=None, normalized_colormap=False,
default_colormap='coolwarm'):
from matplotlib import cm
from matplotlib.colors import ListedColormap
if colormap is None:
cmap = cm.get_cmap(default_colormap)
elif isinstance(colormap, str):
cmap = cm.get_cmap(colormap)
elif normalized_colormap:
cmap = ListedColormap(colormap)
else:
cmap = ListedColormap(np.array(colormap) / 255.0)
return cmap
def _check_color(color):
from matplotlib.colors import colorConverter
if isinstance(color, str):
color = colorConverter.to_rgb(color)
elif isinstance(color, collections.abc.Iterable):
np_color = np.array(color)
if np_color.size % 3 != 0 and np_color.size % 4 != 0:
raise ValueError("The expected valid format is RGB or RGBA.")
if np_color.dtype in (np.int64, np.int32):
if (np_color < 0).any() or (np_color > 255).any():
raise ValueError("Values out of range [0, 255].")
elif np_color.dtype == np.float64:
if (np_color < 0.0).any() or (np_color > 1.0).any():
raise ValueError("Values out of range [0.0, 1.0].")
else:
raise TypeError("Expected data type is `np.int64`, `np.int32`, or "
"`np.float64` but {} was given."
.format(np_color.dtype))
else:
raise TypeError("Expected type is `str` or iterable but "
"{} was given.".format(type(color)))
return color
def _alpha_blend_background(ctable, background_color):
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
return (use_table * alphas) + background_color * (1 - alphas)
@decorator
def run_once(fun, *args, **kwargs):
"""Run the function only once."""
if not hasattr(fun, "_has_run"):
fun._has_run = True
return fun(*args, **kwargs)
@run_once
def _init_qt_resources():
from ...icons import resources
resources.qInitResources()
@contextmanager
def _qt_disable_paint(widget):
paintEvent = widget.paintEvent
widget.paintEvent = lambda *args, **kwargs: None
try:
yield
finally:
widget.paintEvent = paintEvent
|
bsd-3-clause
|
raman-sharma/pyAudioAnalysis
|
analyzeMovieSound.py
|
3
|
6014
|
import os, sys, shutil, glob, numpy, csv, cPickle
import scipy.io.wavfile as wavfile
import audioBasicIO
import audioTrainTest as aT
import audioSegmentation as aS
import matplotlib.pyplot as plt
import scipy.spatial.distance
minDuration = 7;
def classifyFolderWrapper(inputFolder, modelType, modelName, outputMode=False):
if not os.path.isfile(modelName):
raise Exception("Input modelName not found!")
if modelType=='svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType=='knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
PsAll = numpy.zeros((len(classNames), ))
files = "*.wav"
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
if len(wavFilesList)==0:
print "No WAV files found!"
return
Results = []
for wavFile in wavFilesList:
[Fs, x] = audioBasicIO.readAudioFile(wavFile)
signalLength = x.shape[0] / float(Fs)
[Result, P, classNames] = aT.fileClassification(wavFile, modelName, modelType)
PsAll += (numpy.array(P) * signalLength)
Result = int(Result)
Results.append(Result)
if outputMode:
print "{0:s}\t{1:s}".format(wavFile,classNames[Result])
Results = numpy.array(Results)
# print distribution of classes:
[Histogram, _] = numpy.histogram(Results, bins=numpy.arange(len(classNames)+1))
if outputMode:
for i,h in enumerate(Histogram):
print "{0:20s}\t\t{1:d}".format(classNames[i], h)
PsAll = PsAll / numpy.sum(PsAll)
if outputMode:
fig = plt.figure()
ax = fig.add_subplot(111)
plt.title("Classes percentage " + inputFolder.replace('Segments',''))
ax.axis((0, len(classNames)+1, 0, 1))
ax.set_xticks(numpy.array(range(len(classNames)+1)))
ax.set_xticklabels([" "] + classNames)
ax.bar(numpy.array(range(len(classNames)))+0.5, PsAll)
plt.show()
return classNames, PsAll
def getMusicSegmentsFromFile(inputFile):
modelType = "svm"
modelName = "data/svmMovies8classes"
dirOutput = inputFile[0:-4] + "_musicSegments"
if os.path.exists(dirOutput) and dirOutput!=".":
shutil.rmtree(dirOutput)
os.makedirs(dirOutput)
[Fs, x] = audioBasicIO.readAudioFile(inputFile)
if modelType=='svm':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadSVModel(modelName)
elif modelType=='knn':
[Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, computeBEAT] = aT.loadKNNModel(modelName)
flagsInd, classNames, acc = aS.mtFileClassification(inputFile, modelName, modelType, plotResults = False, gtFile = "")
segs, classes = aS.flags2segs(flagsInd, mtStep)
for i, s in enumerate(segs):
if (classNames[int(classes[i])] == "Music") and (s[1] - s[0] >= minDuration):
strOut = "{0:s}{1:.3f}-{2:.3f}.wav".format(dirOutput+os.sep, s[0], s[1])
wavfile.write( strOut, Fs, x[int(Fs*s[0]):int(Fs*s[1])])
def analyzeDir(dirPath):
for i,f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file
getMusicSegmentsFromFile(f)
[c, P]= classifyFolderWrapper(f[0:-4] + "_musicSegments", "svm", "data/svmMusicGenre8", False)
if i==0:
print "".ljust(100)+"\t",
for C in c:
print C.ljust(12)+"\t",
print
print f.ljust(100)+"\t",
for p in P:
print "{0:.2f}".format(p).ljust(12)+"\t",
print
def main(argv):
if argv[1]=="--file":
getMusicSegmentsFromFile(argv[2])
classifyFolderWrapper(argv[2][0:-4] + "_musicSegments", "svm", "data/svmMusicGenre8", True)
elif argv[1]=="--dir":
analyzeDir(argv[2])
elif argv[1]=="--sim":
csvFile = argv[2]
f = []
fileNames = []
with open(csvFile, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter='\t', quotechar='|')
for j,row in enumerate(spamreader):
if j>0:
ftemp = []
for i in range(1,9):
ftemp.append(float(row[i]))
f.append(ftemp)
R = row[0]
II = R.find(".wav");
fileNames.append(row[0][0:II])
f = numpy.array(f)
Sim = numpy.zeros((f.shape[0], f.shape[0]))
for i in range(f.shape[0]):
for j in range(f.shape[0]):
Sim[i,j] = scipy.spatial.distance.cdist(numpy.reshape(f[i,:], (f.shape[1],1)).T, numpy.reshape(f[j,:], (f.shape[1],1)).T, 'cosine')
Sim1 = numpy.reshape(Sim, (Sim.shape[0]*Sim.shape[1], 1))
plt.hist(Sim1)
plt.show()
fo = open(csvFile + "_simMatrix", "wb")
cPickle.dump(fileNames, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(f, fo, protocol = cPickle.HIGHEST_PROTOCOL)
cPickle.dump(Sim, fo, protocol = cPickle.HIGHEST_PROTOCOL)
fo.close()
elif argv[1]=="--loadsim":
try:
fo = open(argv[2], "rb")
except IOError:
print "didn't find file"
return
try:
fileNames = cPickle.load(fo)
f = cPickle.load(fo)
Sim = cPickle.load(fo)
except:
fo.close()
fo.close()
print fileNames
Sim1 = numpy.reshape(Sim, (Sim.shape[0]*Sim.shape[1], 1))
plt.hist(Sim1)
plt.show()
elif argv[1]=="--audio-event-dir":
files = "*.wav"
inputFolder = argv[2]
if os.path.isdir(inputFolder):
strFilePattern = os.path.join(inputFolder, files)
else:
strFilePattern = inputFolder + files
wavFilesList = []
wavFilesList.extend(glob.glob(strFilePattern))
wavFilesList = sorted(wavFilesList)
for i,w in enumerate(wavFilesList):
[flagsInd, classesAll, acc] = aS.mtFileClassification(w, "data/svmMovies8classes", "svm", False, '')
histTemp = numpy.zeros( (len(classesAll), ) )
for f in flagsInd:
histTemp[int(f)] += 1.0
histTemp /= histTemp.sum()
if i==0:
print "".ljust(100)+"\t",
for C in classesAll:
print C.ljust(12)+"\t",
print
print w.ljust(100)+"\t",
for h in histTemp:
print "{0:.2f}".format(h).ljust(12)+"\t",
print
return 0
if __name__ == '__main__':
main(sys.argv)
|
apache-2.0
|
OpenDA-Association/OpenDA
|
course/exercise_black_box_enkf_polution/plot_movie_seq.py
|
1
|
1544
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Plot movie of model results of the original model
@author: Nils van Velzen
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from time import sleep
#import polution_utils as util
import sequentialSimulation_results as sim
#offsets for chunks in state vector
ngrid = 61
no_sources_sim = len(sim.x_a[0])-2*ngrid
# create initial plot
plt.close("all")
fig,ax = plt.subplots(2,1)
xdata, ydata = [], []
ln, = plt.plot([], [], 'ro')
#for i in range(len(c1)):
# ax[0].clear();
# ax[1].clear();
# ax[0].plot(c1[i],'k')
# ax[1].plot(c2[i],'k')
# ax[0].set_ylabel("c_1")
# ax[1].set_ylabel("c_2")
# ax[0].set_ylim([0, 200])
# ax[1].set_ylim([0, 250])
# ax[0].set_title("t="+str(60*i))
# plt.draw()
# plt.pause(0.02)
def init():
ax[0].set_ylim([0, 200])
ax[1].set_ylim([0, 250])
return ln,
def update(frame):
ax[0].clear();
ax[1].clear();
time = sim.analysis_time[frame]
c1_sim = sim.x_a[frame,(no_sources_sim):(no_sources_sim+ngrid)]
c2_sim = sim.x_a[frame,(no_sources_sim+ngrid):(no_sources_sim+2*ngrid)]
ax[0].plot(c1_sim,'k')
ax[1].plot(c2_sim,'k')
ax[0].set_ylabel("c_1")
ax[1].set_ylabel("c_2")
ax[0].set_ylim([0, 200])
ax[1].set_ylim([0, 250])
ax[0].set_title("t="+str(int(time)))
return ln,
ani = FuncAnimation(fig, update, frames=range(len(sim.analysis_time)),
init_func=init, repeat=False, interval=20,blit=True)
plt.show()
|
lgpl-3.0
|
jay-johnson/redten-python
|
redten/redten_client.py
|
1
|
44960
|
import sys, os, json, logging, datetime, uuid, time
import requests
import pandas as pd
from redten.shellprinting import lg, good, boom, info, anmt
def ppj(json_data):
return str(json.dumps(json_data, sort_keys=True, indent=4, separators=(',', ': ')))
# end of ppj
################################################################
#
# Python client methods for Red10
#
class RedTenClient(object):
def __init__(self, name="rt", user="", password="", url=""):
self.rt_user = str(os.getenv("ENV_REDTEN_USER", "USER")).strip().lstrip()
self.rt_pass = str(os.getenv("ENV_REDTEN_PASS", "PASSWORD")).strip().lstrip()
self.rt_email = str(os.getenv("ENV_REDTEN_EMAIL", "[email protected]")).strip().lstrip()
self.rt_url = str(os.getenv("ENV_REDTEN_URL", "https://api.redten.io")).strip().lstrip()
self.rt_user_id = 0
self.csv_file = ""
self.api_urls = self.build_api_urls()
# log in:
self.last_login = {}
self.user_token = ""
try:
self.last_login = self.rest_full_login()
self.rt_user_id = int(self.last_login["user"]["user_id"])
self.user_token = self.last_login["token"]
except Exception as e:
print "Failed to login with user=" + str(self.rt_user) + " url=" + str(self.rt_url) + " with exception=" + str(e)
self.last_login = {}
self.rt_user_id = 0
self.user_token = ""
# end of try/ex
# end of __init__
def lg(self, msg, level=6):
lg(msg, level)
# end of lg
def get_uid(self):
return self.rt_user_id
# end of get_uid
def uni_key(self, length=-1):
return str(str(uuid.uuid4()).replace("-", "")[0:length])
# end of uni_key
def download_url_to_disk(self, image_url, path_to_file):
f = open(path_to_file, 'wb')
f.write(requests.get(image_url).content)
f.close()
return path_to_file
# end of download_url_to_disk
def download_to_uni_file(self, image_url, file_prefix="tfile", storage_loc="/tmp", extension=""):
path_to_file = str(storage_loc) + "/" + file_prefix + "_" + str(uuid.uuid4()).replace("-", "")
if extension != "":
path_to_file += "." + str(extension)
with open(path_to_file, "w") as output_file:
output_file.write(requests.get(image_url).content)
output_file.close()
return path_to_file
# end of download_to_uni_file
def rest_login_as_user(self, username, password, api_url="", debug=False):
url = self.rt_url
if api_url != "":
url = api_url
auth_headers = {
"Content-Type" : "application/json",
"Accept" : "application/json"
}
data = {
"username" : username,
"password" : password
}
if debug:
lg("Sending Post Request", 5)
use_url = url + "/login/"
response = requests.post(use_url, headers=auth_headers, data=json.dumps(data))
if response.status_code != 200:
if response.status_code == 400:
lg("Login url=" + str(url) + " Failed - Wrong Username or Password for User=" + str(username) + " with Status=" + str(response.status_code) + " Reason=" + str(response.reason), 0)
else:
lg("Login url=" + str(url) + " Failed for User=" + str(username) + " with Status=" + str(response.status_code) + " Reason=" + str(response.reason), 0)
lg("Response Test:\n" + str(response.text) + "\n", 0)
return ""
else:
if debug:
lg("Post Response Status=" + str(response.status_code) + " Reason=" + str(response.reason), 5)
res = {}
try:
res = json.loads(str(response.text))
except Exception as e:
lg("ERROR: Failed to Login=" + str(use_url), 0)
user_token = str(res["token"])
if debug:
lg("Response:", 6)
lg(user_token, 6)
lg("", 6)
return user_token
# end of rest_login_as_user
def rest_full_login(self, org_username="", org_password="", api_url="", debug=False):
username = str(self.rt_user)
password = str(self.rt_pass)
url = str(self.rt_url)
if org_username != "":
username = str(org_username).strip().lstrip()
if org_password != "":
password = str(org_password).strip().lstrip()
if api_url != "":
url = api_url
auth_headers = {
"Content-Type" : "application/json",
"Accept" : "application/json"
}
data = {
"username" : username,
"password" : password
}
if debug:
lg("Sending Post Request", 5)
use_url = url + "/login/"
response = requests.post(use_url, headers=auth_headers, data=json.dumps(data))
if response.status_code != 200:
if response.status_code == 400:
lg("Login url=" + str(url) + " Failed - Wrong Username or Password for User=" + str(username) + " with Status=" + str(response.status_code) + " Reason=" + str(response.reason), 0)
else:
lg("Login url=" + str(url) + " Failed for User=" + str(username) + " with Status=" + str(response.status_code) + " Reason=" + str(response.reason), 0)
lg("Response Test:\n" + str(response.text) + "\n", 0)
else:
if debug:
lg("Post Response Status=" + str(response.status_code) + " Reason=" + str(response.reason), 5)
self.last_login = {}
try:
self.last_login = json.loads(str(response.text))
except Exception as e:
lg("ERROR: Failed to Login=" + str(use_url), 0)
if debug:
lg("Response:", 6)
lg(res, 6)
lg("", 6)
return self.last_login
# end of rest_full_login
def wait_on_job(self, job_id, debug=False):
status = "Failed"
err_msg = ""
record = {}
results = {}
start_time = datetime.datetime.now()
end_time = datetime.datetime.now()
total_steps = 10
progress = None
label = None
box = None
try:
every = 1
if total_steps <= 200:
every = 1
else:
every = int(total_steps / 200) # every 0.5%
query_params = {}
post_data = {}
resource_url = self.rt_url + "/ml/" + str(job_id) + "/"
lg("Waiting on job=" + str(job_id) + " url=" + str(resource_url), 5)
last_status = ""
job_status = "active"
max_retries = 10
retry = 0
sleep_interval = 1.0
not_done = True
while not_done:
# log in again for log running jobs
user_token = self.rest_login_as_user(self.rt_user, self.rt_pass, self.rt_url)
auth_headers = {
"Authorization" : "JWT " + str(user_token)
}
get_response = requests.get(resource_url, params=query_params, data=post_data, headers=auth_headers)
if get_response.status_code != 201 and get_response.status_code != 200:
err_msg = "Failed with GET Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason)
lg(err_msg, 0)
lg("Details:\n" + str(get_response.text) + "\n", 0)
status = "Failed"
retry += 1
if retry > max_retries:
not_done = False
lg("Failed to get job=" + str(job_id) + " status", 0)
break
else:
lg("Failed to get job=" + str(job_id) + " status retry=" + str(retry) + "/" + str(max_retries), 0)
# end of if/else
else:
if debug:
lg("SUCCESS - GET Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason), 5)
retry = 0
status = "SUCCESS"
err_msg = ""
job_status = "unknown"
as_json = True
record = {}
if as_json:
record = json.loads(get_response.text)
job_status = str(record["job"]["status"]).strip().lstrip().lower()
# end of as_json
if job_status == "requested":
if last_status != job_status:
lg("Job=" + str(job_id) + " is requested - Step: 0/10", 5)
elif job_status == "initial":
if last_status != job_status:
lg("Job=" + str(job_id) + " is initial - Step 1/10", 5)
elif job_status == "active":
if last_status != job_status:
lg("Job=" + str(job_id) + " is active - Step 2/10", 5)
elif job_status == "training":
if last_status != job_status:
lg("Job=" + str(job_id) + " is training - Step 3/10", 5)
elif job_status == "predicting":
if last_status != job_status:
lg("Job=" + str(job_id) + " is predicting - Step 4/10", 5)
elif job_status == "analyzing":
if last_status != job_status:
lg("Job=" + str(job_id) + " is analyzing - Step 5/10", 5)
elif job_status == "caching":
if last_status != job_status:
lg("Job=" + str(job_id) + " is caching - Step 6/10", 5)
elif job_status == "plotting":
if last_status != job_status:
lg("Job=" + str(job_id) + " is plotting - Step 7/10", 5)
elif job_status == "emailing":
if last_status != job_status:
lg("Job=" + str(job_id) + " is emailing - Step 8/10", 5)
elif job_status == "uploading":
if last_status != job_status:
lg("Job=" + str(job_id) + " is uploading - Step 9/10", 5)
elif job_status == "archiving":
if last_status != job_status:
lg("Job=" + str(job_id) + " is archiving - Step 10/10", 5)
elif job_status == "completed":
not_done = False
lg("Job=" + str(job_id) + " completed", 5)
end_time = datetime.datetime.now()
prefix_label = "Done waiting on job=" + str(job_id) + " status=" + str(job_status) + " after waiting " + str((end_time - start_time).total_seconds())[0:5] + "s"
prefix_label = str((end_time - start_time).total_seconds())[0:5] + "s - Done waiting on job=" + str(job_id) + " status=" + str(job_status)
break
elif job_status == "cancelled":
not_done = False
lg("Job=" + str(job_id) + " cancelled", 5)
end_time = datetime.datetime.now()
break
elif job_status == "error":
not_done = False
lg("Job=" + str(job_id) + " error", 5)
end_time = datetime.datetime.now()
break
else:
not_done = False
lg("Job=" + str(job_id) + " in unexpected status=" + str(job_status) + "", 0)
end_time = datetime.datetime.now()
break
# end of if/else
last_status = job_status
# end of post for running an ML Job
end_time = datetime.datetime.now()
if not_done:
time.sleep(sleep_interval)
# end of while not_done
except Exception as w:
status = "Exception"
err_msg = "Failed waiting on job=" + str(job_id) + " with Ex=" + str(w)
lg(err_msg, 0)
end_time = datetime.datetime.now()
# end of try/ex
results = {
"status" : status,
"error" : err_msg,
"record" : record
}
return results
# end of wait_on_job
def wait_for_job_to_finish(self, job_id):
status = "Failed"
err_msg = ""
record = {}
results = {}
try:
query_params = {}
post_data = {}
resource_url = self.rt_url + "/ml/" + str(job_id) + "/"
lg("Waiting on job=" + str(job_id) + " url=" + str(resource_url), 5)
job_status = "active"
max_retries = 10
retry = 0
sleep_interval = 1.0
not_done = True
while not_done:
user_token = self.rest_login_as_user(self.rt_user, self.rt_pass, self.rt_url)
auth_headers = {
"Authorization" : "JWT " + str(user_token)
}
get_response = requests.get(resource_url, params=query_params, data=post_data, headers=auth_headers)
if get_response.status_code != 201 and get_response.status_code != 200:
err_msg = "Failed with GET Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason)
lg(err_msg, 0)
lg("Details:\n" + str(get_response.text) + "\n", 0)
status = "Failed"
retry += 1
if retry > max_retries:
not_done = False
lg("Failed to get job=" + str(job_id) + " status", 0)
break
else:
lg("Failed to get job=" + str(job_id) + " status retry=" + str(retry) + "/" + str(max_retries), 0)
# end of if/else
else:
lg("SUCCESS - GET Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason), 5)
retry = 0
status = "SUCCESS"
err_msg = ""
job_status = "unknown"
as_json = True
record = {}
if as_json:
record = json.loads(get_response.text)
job_status = str(record["job"]["status"]).strip().lstrip().lower()
# end of as_json
if job_status == "requested":
lg("Job=" + str(job_id) + " is requested - Step: 0/10", 5)
progress = 0
elif job_status == "initial":
lg("Job=" + str(job_id) + " is initial - Step 1/10", 5)
progress = 1
elif job_status == "active":
lg("Job=" + str(job_id) + " is active - Step 2/10", 5)
progress = 2
elif job_status == "training":
lg("Job=" + str(job_id) + " is training - Step 3/10", 5)
progress = 3
elif job_status == "predicting":
lg("Job=" + str(job_id) + " is predicting - Step 4/10", 5)
progress = 4
elif job_status == "analyzing":
lg("Job=" + str(job_id) + " is analyzing - Step 5/10", 5)
progress = 5
elif job_status == "caching":
lg("Job=" + str(job_id) + " is caching - Step 6/10", 5)
progress = 6
elif job_status == "plotting":
lg("Job=" + str(job_id) + " is plotting - Step 7/10", 5)
progress = 7
elif job_status == "emailing":
lg("Job=" + str(job_id) + " is emailing - Step 8/10", 5)
progress = 8
elif job_status == "uploading":
lg("Job=" + str(job_id) + " is uploading - Step 9/10", 5)
progress = 9
elif job_status == "archiving":
lg("Job=" + str(job_id) + " is archiving - Step 10/10", 5)
progress = 10
elif job_status == "completed":
progress = 10
not_done = False
lg("Job=" + str(job_id) + " completed", 5)
break
elif job_status == "cancelled":
not_done = False
lg("Job=" + str(job_id) + " cancelled", 5)
break
elif job_status == "error":
not_done = False
lg("Job=" + str(job_id) + " error", 5)
break
else:
not_done = False
lg("Job=" + str(job_id) + " completed", 5)
break
# end of if/else
# end of post for running an ML Job
if not_done:
time.sleep(sleep_interval)
# end of while not_done
except Exception as w:
status = "Exception"
err_msg = "Failed waiting for job=" + str(job_id) + " with Ex=" + str(w)
lg(err_msg, 0)
# end of try/ex
results = {
"status" : status,
"error" : err_msg,
"record" : record
}
return results
# end of wait_for_job_to_finish
def helper_get_job_analysis(self, job_id):
status = "Failed"
err_msg = ""
record = {}
results = {}
try:
query_params = {}
post_data = {}
resource_url = self.rt_url + "/ml/analysis/" + str(job_id) + "/"
lg("Getting analysis for job=" + str(job_id) + " url=" + str(resource_url), 5)
job_status = "active"
max_retries = 10
retry = 0
sleep_interval = 1.0
not_done = True
while not_done:
user_token = self.rest_login_as_user(self.rt_user, self.rt_pass, self.rt_url)
auth_headers = {
"Authorization" : "JWT " + str(user_token)
}
get_response = requests.get(resource_url, params=query_params, data=post_data, headers=auth_headers)
if get_response.status_code != 201 and get_response.status_code != 200:
# if logged out while waiting, just log back in a retry
if "Signature has expired." in str(get_response.text):
user_token = self.user_login(self.rt_user, self.self.rt_pass, self.rt_url)
auth_headers = {
"Authorization" : "JWT " + str(user_token)
}
else:
err_msg = "Failed with GET Analysis Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason)
lg(err_msg, 0)
lg("Details:\n" + str(get_response.text) + "\n", 0)
status = "Failed"
retry += 1
if retry > max_retries:
not_done = False
lg("Failed to get analysis for job=" + str(job_id) + " status", 0)
break
else:
lg("Failed to get analysis for job=" + str(job_id) + " status retry=" + str(retry) + "/" + str(max_retries), 0)
# end of if/else
else:
lg("SUCCESS - GET Analysis Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason), 5)
retry = 0
status = "SUCCESS"
err_msg = ""
job_status = "unknown"
as_json = True
record = {}
if as_json:
record = json.loads(get_response.text)
not_done = False
lg("Found Job=" + str(job_id) + " analysis", 5)
break
# end of if/else
# end of post for running an ML Job
if not_done:
time.sleep(sleep_interval)
# end of while not_done
except Exception as w:
status = "Exception"
err_msg = "Failed waiting on get analysis for job=" + str(job_id) + " with Ex=" + str(w)
lg(err_msg, 0)
# end of try/ex
results = {
"status" : status,
"error" : err_msg,
"record" : record
}
return results
# end of helper_get_job_analysis
def search_ml_jobs(self, search_req, debug=False):
status = "Failed"
err_msg = ""
record = {}
results = {}
try:
query_params = {
"title" : search_req["title"],
"dsname" : search_req["dsname"],
"desc" : search_req["desc"],
"features" : search_req["features"],
"target_column" : search_req["target_column"]
}
post_data = {}
# Get the ML Job
resource_url = self.rt_url + "/ml/search/"
lg("Searching ML Jobs url=" + str(resource_url), 6)
job_status = "active"
max_retries = 10
retry = 0
sleep_interval = 1.0
not_done = True
while not_done:
user_token = self.rest_login_as_user(self.rt_user, self.rt_pass, self.rt_url)
auth_headers = {
"Authorization" : "JWT " + str(user_token)
}
get_response = requests.get(resource_url, params=query_params, data=post_data, headers=auth_headers)
if get_response.status_code != 201 and get_response.status_code != 200:
err_msg = "Failed with SEARCH Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason)
lg(err_msg, 0)
lg("Details:\n" + str(get_response.text) + "\n", 0)
status = "Failed"
retry += 1
if retry > max_retries:
not_done = False
lg("Failed to search=" + str(search_req) + " for jobs status", 0)
break
else:
lg("Failed to search=" + str(search_req) + " for jobs status retry=" + str(retry) + "/" + str(max_retries), 0)
# end of if/else
else:
lg("SUCCESS - Job Search Response Status=" + str(get_response.status_code) + " Reason=" + str(get_response.reason), 5)
retry = 0
status = "SUCCESS"
err_msg = ""
job_status = "unknown"
as_json = True
record = {}
if as_json:
record = json.loads(get_response.text)
not_done = False
lg("Found Job=" + str(search_req) + " results", 5)
break
# end of if/else
# end of post for running an ML Job
if not_done:
time.sleep(sleep_interval)
# end of while not_done
except Exception as w:
status = "Exception"
err_msg = "Failed waiting on search=" + str(search_req) + " for jobs with Ex=" + str(w)
lg(err_msg, 0)
# end of try/ex
results = {
"status" : status,
"error" : err_msg,
"record" : record
}
return results
# end of search_ml_jobs
def build_api_urls(self, use_base_url=""):
base_url = str(os.getenv("ENV_REDTEN_URL", "https://api.redten.io"))
if use_base_url != "":
base_url = use_base_url
self.api_urls = {
"create-job" : base_url + "/ml/run/",
"get-job" : base_url + "/ml/%s/",
"get-analysis" : base_url + "/ml/analysis/%s/",
"import" : base_url + "/ml/import/"
}
return self.api_urls
# end of build_api_urls
def run_job(self, query_params={}, post_data={}, retry=False, debug=False):
result = {
"status" : "invalid",
"data" : {}
}
try:
if post_data["csv_file"] == "" and post_data["sloc"] == "" and post_data["rloc"] == "":
boom("")
boom("Please provide one of these to start a machine learning job a 'csv_file' or 'sloc' or 'rloc' in the post_data to run a job")
boom(" csv_file - locally deployed csv file on all worker nodes")
boom(" sloc - S3 location formatted: <bucket:key> for the csv file")
boom(" rloc - Redis location formatted: <server name:key> for the csv data in Redis")
boom("")
result["status"] = "invalid"
result["data"] = {}
return result
# provide an error message
use_url = str(self.api_urls["create-job"])
if debug:
lg("Running ML Job url=" + str(use_url), 5)
if "user_id" not in post_data:
post_data["user_id"] = self.rt_user_id
# add in my user
if "ds_name" in post_data:
ds_name = str(post_data["ds_name"])
# end of changing the csv file by the data set name
user_token = self.user_login(self.rt_user, self.rt_pass, self.rt_url)
auth_headers = {
"Content-type": "application/json",
"Authorization" : "JWT " + str(user_token)
}
post_response = requests.post(use_url, params=query_params, data=json.dumps(post_data), headers=auth_headers)
if post_response.status_code != 201 and post_response.status_code != 200:
if not retry:
if "Signature has expired." in str(post_response.text):
user_token = self.user_login(self.rt_user, self.rt_pass, self.rt_url)
auth_headers = {
"Content-type": "application/json",
"Authorization" : "JWT " + str(user_token)
}
self.run_job(query_params=query_params, post_data=post_data, headers=auth_headers, retry=True, debug=debug)
else:
boom("Failed with Post Response Status=" + str(post_response.status_code) + " Reason=" + str(post_response.reason))
boom("Details:\n" + str(post_response.text) + "\n")
else:
boom("Failed with Post Response Status=" + str(post_response.status_code) + " Reason=" + str(post_response.reason))
boom("Details:\n" + str(post_response.text) + "\n")
else:
if debug:
lg("SUCCESS - Post Response Status=" + str(post_response.status_code) + " Reason=" + str(post_response.reason), 5)
result["data"] = json.loads(post_response.text)
result["status"] = "valid"
# end of valid response from the api
except Exception as e:
result["status"] = "Failed to Run ML Job with exception='" + str(e) + "'"
result["data"] = {}
boom(result["status"])
# end of try/ex
return result
# end of run_job
def get_job_analysis(self, job_id, show_plots=True, debug=False):
job_report = {}
if job_id == None:
boom("Failed to start a new job")
else:
job_res = self.helper_get_job_analysis(job_id)
if job_res["status"] != "SUCCESS":
boom("Job=" + str(job_id) + " failed with status=" + str(job_res["status"]) + " err=" + str(job_res["error"]))
else:
job_report = job_res["record"]
# end of get job analysis
if show_plots:
if "images" in job_report:
for img in job_report["images"]:
anmt(img["title"])
lg("URL: " + str(img["image"]))
lg("---------------------------------------------------------------------------------------")
else:
boom("Job=" + str(job_id) + " does not have any images yet")
# end of if images exist
# end of downloading job plots
return job_report
# end of get_job_analysis
def get_job_results(self, job_report, cell=0, debug=False):
try:
if len(job_report["analyses"]) < cell:
boom("Failed to get accuracy because job does not have a cell=" + str(cell) + " length=" + str(len(job_report["analyses"])))
return {}
else:
return job_report["analyses"][cell]["analysis_json"]["manifest"]["job_results"]
except Exception as e:
boom("Invalid get_job_results - please pass in the full analysis report to this method - exception=" + str(e))
return {}
# end of get_job_results
def get_job_cache_manifest(self, job_report, cell=0, debug=False):
try:
if len(job_report["analyses"]) < cell:
boom("Failed to get cache manifest because job does not have a cell=" + str(cell) + " length=" + str(len(job_report["analyses"])))
return {}
else:
return job_report["analyses"][cell]["analysis_json"]["manifest"]["manifest"]
except Exception as e:
boom("Invalid get_job_cache_manifest - please pass in the full analysis report to this method - exception=" + str(e))
# end of get_job_cache_manifest
def get_analysis_manifest(self, job_report, cell=0, debug=False):
try:
if len(job_report["analyses"]) < cell:
boom("Failed to get manifest because job does not have a cell=" + str(cell) + " length=" + str(len(job_report["analyses"])))
return {}
else:
return job_report["analyses"][cell]["analysis_json"]["manifest"]
except Exception as e:
boom("Invalid get_analysis_manifest - please pass in the full analysis report to this method - exception=" + str(e))
# end of get_analysis_manifest
def build_prediction_results(self, job_report, cell=0, debug=False):
res = {}
try:
if len(job_report["analyses"]) < cell:
boom("Failed to build_prediction_results because job does not have a cell=" + str(cell) + " length=" + str(len(job_report["analyses"])))
return {}
else:
job_results = self.get_job_results(job_report, cell, debug)
if len(job_results) == 0:
boom("Failed to find job_results for building prediction results")
return res
else:
for col_name in job_results["accuracy_json"]:
res[col_name] = {}
mse = None
try:
mse = float(job_results["accuracy_json"][col_name]["MSE"])
except Exception as f:
mse = None
accuracy = None
try:
accuracy = float(job_results["accuracy_json"][col_name]["TrainAccuracy"])
except Exception as f:
accuracy = None
predictions_df = None
try:
predictions_df = pd.DataFrame(job_results["accuracy_json"][col_name]["Predictions"])
except Exception as f:
boom("Failed Converting column=" + str(col_name) + " Predictions to pd.DataFrame with exception=" + str(f))
predictions_df = None
res[col_name]["mse"] = mse
res[col_name]["accuracy"] = accuracy
res[col_name]["predictions_df"] = predictions_df
# for all columns in the accuracy payload
# if/else valid report to analyze
except Exception as e:
boom("Invalid build_prediction_results - please pass in the full analysis report to this method - exception=" + str(e))
# end of try/ex
return res
# end of build_prediction_results
def build_forecast_results(self, job_report, cell=0, debug=False):
res = {}
try:
if len(job_report["analyses"]) < cell:
boom("Failed to build_forecast_results because job does not have a cell=" + str(cell) + " length=" + str(len(job_report["analyses"])))
return {}
else:
job_results = self.get_job_results(job_report, cell, debug)
if len(job_results) == 0:
boom("Failed to find job_results for building prediction results")
return res
else:
for col_name in job_results["accuracy_json"]:
res[col_name] = {}
mse = None
try:
mse = float(job_results["accuracy_json"][col_name]["MSE"])
except Exception as f:
mse = None
accuracy = None
try:
accuracy = float(job_results["accuracy_json"][col_name]["TrainAccuracy"])
except Exception as f:
accuracy = None
predictions_df = None
try:
predictions_df = pd.DataFrame(job_results["accuracy_json"][col_name]["Predictions"])
except Exception as f:
predictions_df = None
forecast_mse = None
try:
forecast_mse = float(job_results["accuracy_json"][col_name]["ForecastMSE"])
except Exception as f:
forecast_mse = None
train_accuracy = None
try:
train_accuracy = float(job_results["accuracy_json"][col_name]["TrainAccuracy"])
except Exception as f:
train_accuracy = None
train_predictions = None
try:
train_predictions = job_results["accuracy_json"][col_name]["TrainPredictions"]
except Exception as f:
train_predictions = None
train_predictions_df = None
try:
train_predictions_df = pd.DataFrame(json.loads(job_results["accuracy_json"][col_name]["TrainPredictionsDF"]))
except Exception as f:
boom("Failed Converting column=" + str(col_name) + " TrainPredictionsDF to pd.DataFrame with exception=" + str(f))
train_predictions_df = None
# end of TrainPredictionsDF
date_predictions_df = None
try:
date_predictions_df = pd.DataFrame(json.loads(job_results["accuracy_json"][col_name]["DatePredictionsDF"])).sort_values(by="Date", ascending=True)
date_predictions_df["Date"] = pd.to_datetime(date_predictions_df["Date"], unit='ms')
if "Invalid" in date_predictions_df.columns:
del date_predictions_df["Invalid"]
except Exception as f:
boom("Failed Converting column=" + str(col_name) + " DatePredictionsDF to pd.DataFrame with exception=" + str(f))
date_predictions_df = None
# end of DatePredictionsDF
res[col_name]["accuracy"] = accuracy
res[col_name]["mse"] = forecast_mse
res[col_name]["predictions_df"] = predictions_df
res[col_name]["train_mse"] = mse
res[col_name]["train_accuracy"] = train_accuracy
res[col_name]["train_predictions"] = train_predictions
res[col_name]["train_predictions_df"] = train_predictions_df
res[col_name]["date_predictions_df"] = date_predictions_df
# for all columns in the accuracy payload
# if/else valid report to analyze
except Exception as e:
boom("Invalid build_forecast_results - please pass in the full analysis report to this method - exception=" + str(e))
# end of try/ex
return res
# end of build_forecast_results
def generic_search(self, url, term):
"""Simple Elasticsearch Query"""
query = json.dumps({
"query": {
"match": {
"message": term
}
}
})
response = requests.get(uri, data=query)
results = json.loads(response.text)
return results
# end of search
def get_latest(self, url, limit=50):
"""Simple Elasticsearch Query"""
query = json.dumps(
{
"query": {
"match_all": {}
},
"size": limit,
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
}
)
response = requests.get(url, data=query)
results = json.loads(response.text)
return results
# end of get_latest
def get_latest_errors_in_es(self, url, limit=50):
query = json.dumps(
{
"query": {
"bool" : {
"should" : [
{
"match" : {
"level" : {
"query" : "ERROR",
"boost" : 50
}
}
}
]
}
},
"size": limit,
"sort": [
{
"@timestamp": {
"order": "desc"
}
}
]
}
)
response = requests.get(url, data=query)
results = json.loads(response.text)
return results
# end of get_latest
def convert_date_string_to_date(self, date_str, optional_format="%Y-%m-%dT%H:%M:%S.%fZ"):
date_to_return = None
try:
import datetime
date_to_return = datetime.datetime.strptime(str(date_str), optional_format)
except Exception,f:
self.lg("ERROR: Failed Converting Date(" + str(date_str) + ") with Format(" + str(optional_format) + ")", 0)
# end of tries to read this string as a valid date...
return date_to_return
# end of convert_date_string_to_date
def format_results(self, results, debug=False):
data = [doc for doc in results['hits']['hits']]
logs = []
for doc in reversed(data):
try:
new_log = {
"date" : str(convert_date_string_to_date(doc["_source"]["@timestamp"]).strftime("%Y-%m-%d %H:%M:%S")),
"level" : str(doc["_source"]["level"]).strip().lstrip(),
"msg" : str(doc["_source"]["message"]).strip().lstrip(),
"host" : str(doc["_source"]["host"]).strip().lstrip(),
"logger" : str(doc["_source"]["host"]).strip().lstrip(),
"tags" : doc["_source"]["tags"]
}
if debug:
print("%s - %s - %s" % (new_log["date"], new_log["level"], new_log["msg"]))
if "/login/" not in new_log["msg"]:
logs.append(new_log)
# ignore the login messages...
except Exception as e:
lg("Failed to process log=" + str(doc) + " with ex=" + str(e), 0)
# end try/ex
# end of for all docs
return logs
# end of format_results
def get_latest_logs(self, es_endpoint="https://api.redten.io:9201/", index="logstash-*", doc_type="restlogs", num_logs=20):
search_url = str(es_endpoint) + str(index) + "/" + str(doc_type) + "/_search"
results = self.get_latest(search_url, num_logs)
return format_results(results)
# end of get_latest_logs
def get_latest_errors(self, es_endpoint="https://api.redten.io:9201/", index="logstash-*", doc_type="restlogs", num_logs=20):
search_url = str(es_endpoint) + str(index) + "/" + str(doc_type) + "/_search"
results = self.get_latest_errors_in_es(search_url, num_logs)
return format_results(results)
# end of get_latest_errors
def display_logs(self, log_data=[], debug=False):
for cur_log in log_data:
lg(str(cur_log["date"]) + " - " + str(cur_log["level"]) + " - " + str(cur_log["msg"]))
# end for for all logs to show
# end of display_logs
def show_logs(self, limit=20, debug=False):
display_logs(get_latest_logs(num_logs=limit))
return None
# end of show_logs
def show_errors(self, limit=20, debug=False):
display_logs(get_latest_errors(num_logs=limit))
return None
# end of show_errors
# returns a user token (jwt) for now
def user_login(self, rt_user, rt_pass, rt_url):
user_token = self.rest_login_as_user(rt_user, rt_pass, rt_url)
if user_token == "":
boom("Failed logging in - Stopping")
return ""
# end of if logged in work
return user_token
# end of user_login
#
#
################################################################
|
apache-2.0
|
haribharadwaj/PySurfer
|
surfer/viz.py
|
3
|
110246
|
from math import floor
import os
from os.path import join as pjoin
from tempfile import mkdtemp
from warnings import warn
import numpy as np
from scipy import stats, ndimage, misc
from scipy.interpolate import interp1d
from matplotlib.colors import colorConverter
import nibabel as nib
from mayavi import mlab
from mayavi.tools.mlab_scene_model import MlabSceneModel
from mayavi.core import lut_manager
from mayavi.core.ui.api import SceneEditor
from mayavi.core.ui.mayavi_scene import MayaviScene
from traits.api import (HasTraits, Range, Int, Float,
Bool, Enum, on_trait_change, Instance)
from . import utils, io
from .utils import (Surface, verbose, create_color_lut, _get_subjects_dir,
string_types, assert_ffmpeg_is_available, ffmpeg)
import logging
logger = logging.getLogger('surfer')
lh_viewdict = {'lateral': {'v': (180., 90.), 'r': 90.},
'medial': {'v': (0., 90.), 'r': -90.},
'rostral': {'v': (90., 90.), 'r': -180.},
'caudal': {'v': (270., 90.), 'r': 0.},
'dorsal': {'v': (180., 0.), 'r': 90.},
'ventral': {'v': (180., 180.), 'r': 90.},
'frontal': {'v': (120., 80.), 'r': 106.739},
'parietal': {'v': (-120., 60.), 'r': 49.106}}
rh_viewdict = {'lateral': {'v': (180., -90.), 'r': -90.},
'medial': {'v': (0., -90.), 'r': 90.},
'rostral': {'v': (-90., -90.), 'r': 180.},
'caudal': {'v': (90., -90.), 'r': 0.},
'dorsal': {'v': (180., 0.), 'r': 90.},
'ventral': {'v': (180., 180.), 'r': 90.},
'frontal': {'v': (60., 80.), 'r': -106.739},
'parietal': {'v': (-60., 60.), 'r': -49.106}}
viewdicts = dict(lh=lh_viewdict, rh=rh_viewdict)
def make_montage(filename, fnames, orientation='h', colorbar=None,
border_size=15):
"""Save montage of current figure
Parameters
----------
filename : str
The name of the file, e.g, 'montage.png'. If None, the image
will not be saved.
fnames : list of str | list of array
The images to make the montage of. Can be a list of filenames
or a list of image data arrays.
orientation : 'h' | 'v' | list
The orientation of the montage: horizontal, vertical, or a nested
list of int (indexes into fnames).
colorbar : None | list of int
If None remove colorbars, else keep the ones whose index
is present.
border_size : int
The size of the border to keep.
Returns
-------
out : array
The montage image data array.
"""
import Image
# This line is only necessary to overcome a PIL bug, see:
# http://stackoverflow.com/questions/10854903/what-is-causing-
# dimension-dependent-attributeerror-in-pil-fromarray-function
fnames = [f if isinstance(f, string_types) else f.copy() for f in fnames]
if isinstance(fnames[0], string_types):
images = map(Image.open, fnames)
else:
images = map(Image.fromarray, fnames)
# get bounding box for cropping
boxes = []
for ix, im in enumerate(images):
# sum the RGB dimension so we do not miss G or B-only pieces
gray = np.sum(np.array(im), axis=-1)
gray[gray == gray[0, 0]] = 0 # hack for find_objects that wants 0
if np.all(gray == 0):
raise ValueError("Empty image (all pixels have the same color).")
labels, n_labels = ndimage.label(gray.astype(np.float))
slices = ndimage.find_objects(labels, n_labels) # slice roi
if colorbar is not None and ix in colorbar:
# we need all pieces so let's compose them into single min/max
slices_a = np.array([[[xy.start, xy.stop] for xy in s]
for s in slices])
# TODO: ideally gaps could be deduced and cut out with
# consideration of border_size
# so we need mins on 0th and maxs on 1th of 1-nd dimension
mins = np.min(slices_a[:, :, 0], axis=0)
maxs = np.max(slices_a[:, :, 1], axis=0)
s = (slice(mins[0], maxs[0]), slice(mins[1], maxs[1]))
else:
# we need just the first piece
s = slices[0]
# box = (left, top, width, height)
boxes.append([s[1].start - border_size, s[0].start - border_size,
s[1].stop + border_size, s[0].stop + border_size])
# convert orientation to nested list of int
if orientation == 'h':
orientation = [range(len(images))]
elif orientation == 'v':
orientation = [[i] for i in range(len(images))]
# find bounding box
n_rows = len(orientation)
n_cols = max(len(row) for row in orientation)
if n_rows > 1:
min_left = min(box[0] for box in boxes)
max_width = max(box[2] for box in boxes)
for box in boxes:
box[0] = min_left
box[2] = max_width
if n_cols > 1:
min_top = min(box[1] for box in boxes)
max_height = max(box[3] for box in boxes)
for box in boxes:
box[1] = min_top
box[3] = max_height
# crop images
cropped_images = []
for im, box in zip(images, boxes):
cropped_images.append(im.crop(box))
images = cropped_images
# Get full image size
row_w = [sum(images[i].size[0] for i in row) for row in orientation]
row_h = [max(images[i].size[1] for i in row) for row in orientation]
out_w = max(row_w)
out_h = sum(row_h)
# compose image
new = Image.new("RGBA", (out_w, out_h))
y = 0
for row, h in zip(orientation, row_h):
x = 0
for i in row:
im = images[i]
pos = (x, y)
new.paste(im, pos)
x += im.size[0]
y += h
if filename is not None:
try:
new.save(filename)
except Exception:
print("Error saving %s" % filename)
return np.array(new)
def _prepare_data(data):
"""Ensure data is float64 and has proper endianness.
Note: this is largely aimed at working around a Mayavi bug.
"""
data = data.copy()
data = data.astype(np.float64)
if data.dtype.byteorder == '>':
data.byteswap(True)
return data
def _force_render(figures, backend):
"""Ensure plots are updated before properties are used"""
if not isinstance(figures, list):
figures = [[figures]]
for ff in figures:
for f in ff:
f.render()
mlab.draw(figure=f)
if backend == 'TraitsUI':
from pyface.api import GUI
_gui = GUI()
orig_val = _gui.busy
_gui.set_busy(busy=True)
_gui.process_events()
_gui.set_busy(busy=orig_val)
_gui.process_events()
def _make_viewer(figure, n_row, n_col, title, scene_size, offscreen):
"""Triage viewer creation
If n_row == n_col == 1, then we can use a Mayavi figure, which
generally guarantees that things will be drawn before control
is returned to the command line. With the multi-view, TraitsUI
unfortunately has no such support, so we only use it if needed.
"""
if figure is None:
# spawn scenes
h, w = scene_size
if offscreen is True:
orig_val = mlab.options.offscreen
mlab.options.offscreen = True
figures = [[mlab.figure(size=(h / n_row, w / n_col))
for _ in range(n_col)] for __ in range(n_row)]
mlab.options.offscreen = orig_val
_v = None
else:
# Triage: don't make TraitsUI if we don't have to
if n_row == 1 and n_col == 1:
figure = mlab.figure(title, size=(w, h))
mlab.clf(figure)
figures = [[figure]]
_v = None
else:
window = _MlabGenerator(n_row, n_col, w, h, title)
figures, _v = window._get_figs_view()
else:
if not isinstance(figure, (list, tuple)):
figure = [figure]
if not len(figure) == n_row * n_col:
raise ValueError('For the requested view, figure must be a '
'list or tuple with exactly %i elements, '
'not %i' % (n_row * n_col, len(figure)))
_v = None
figures = [figure[slice(ri * n_col, (ri + 1) * n_col)]
for ri in range(n_row)]
return figures, _v
class _MlabGenerator(HasTraits):
"""TraitsUI mlab figure generator"""
from traitsui.api import View
view = Instance(View)
def __init__(self, n_row, n_col, width, height, title, **traits):
HasTraits.__init__(self, **traits)
self.mlab_names = []
self.n_row = n_row
self.n_col = n_col
self.width = width
self.height = height
for fi in range(n_row * n_col):
name = 'mlab_view%03g' % fi
self.mlab_names.append(name)
self.add_trait(name, Instance(MlabSceneModel, ()))
self.view = self._get_gen_view()
self._v = self.edit_traits(view=self.view)
self._v.title = title
def _get_figs_view(self):
figures = []
ind = 0
for ri in range(self.n_row):
rfigs = []
for ci in range(self.n_col):
x = getattr(self, self.mlab_names[ind])
rfigs.append(x.mayavi_scene)
ind += 1
figures.append(rfigs)
return figures, self._v
def _get_gen_view(self):
from traitsui.api import (View, Item, VGroup, HGroup)
ind = 0
va = []
for ri in range(self.n_row):
ha = []
for ci in range(self.n_col):
ha += [Item(name=self.mlab_names[ind], style='custom',
resizable=True, show_label=False,
editor=SceneEditor(scene_class=MayaviScene))]
ind += 1
va += [HGroup(*ha)]
view = View(VGroup(*va), resizable=True,
height=self.height, width=self.width)
return view
class Brain(object):
"""Class for visualizing a brain using multiple views in mlab
Parameters
----------
subject_id : str
subject name in Freesurfer subjects dir
hemi : str
hemisphere id (ie 'lh', 'rh', 'both', or 'split'). In the case
of 'both', both hemispheres are shown in the same window.
In the case of 'split' hemispheres are displayed side-by-side
in different viewing panes.
surf : geometry name
freesurfer surface mesh name (ie 'white', 'inflated', etc.)
curv : boolean
if true, loads curv file and displays binary curvature
(default: True)
title : str
title for the window
cortex : str or tuple
specifies how binarized curvature values are rendered.
either the name of a preset PySurfer cortex colorscheme (one of
'classic', 'bone', 'low_contrast', or 'high_contrast'), or the
name of mayavi colormap, or a tuple with values (colormap, min,
max, reverse) to fully specify the curvature colors.
size : float or pair of floats
the size of the window, in pixels. can be one number to specify
a square window, or the (width, height) of a rectangular window.
background, foreground : matplotlib colors
color of the background and foreground of the display window
figure : list of instances of mayavi.core.scene.Scene | None
If None, a new window will be created with the appropriate
views.
subjects_dir : str | None
If not None, this directory will be used as the subjects directory
instead of the value set using the SUBJECTS_DIR environment
variable.
views : list | str
views to use
show_toolbar : bool
If True, toolbars will be shown for each view.
offscreen : bool
If True, rendering will be done offscreen (not shown). Useful
mostly for generating images or screenshots, but can be buggy.
Use at your own risk.
Attributes
----------
brains : list
List of the underlying brain instances.
"""
def __init__(self, subject_id, hemi, surf, curv=True, title=None,
cortex="classic", size=800, background="black",
foreground="white", figure=None, subjects_dir=None,
views=['lat'], show_toolbar=False, offscreen=False,
config_opts=None):
# Keep backwards compatability
if config_opts is not None:
msg = ("The `config_opts` dict has been deprecated and will "
"be removed in future versions. You should update your "
"code and pass these options directly to the `Brain` "
"constructor.")
warn(msg)
cortex = config_opts.get("cortex", cortex)
background = config_opts.get("background", background)
foreground = config_opts.get("foreground", foreground)
size = config_opts.get("size", size)
width = config_opts.get("width", size)
height = config_opts.get("height", size)
size = (width, height)
col_dict = dict(lh=1, rh=1, both=1, split=2)
n_col = col_dict[hemi]
if hemi not in col_dict.keys():
raise ValueError('hemi must be one of [%s], not %s'
% (', '.join(col_dict.keys()), hemi))
# Get the subjects directory from parameter or env. var
subjects_dir = _get_subjects_dir(subjects_dir=subjects_dir)
self._hemi = hemi
if title is None:
title = subject_id
self.subject_id = subject_id
if not isinstance(views, list):
views = [views]
n_row = len(views)
# load geometry for one or both hemispheres as necessary
offset = None if hemi != 'both' else 0.0
self.geo = dict()
if hemi in ['split', 'both']:
geo_hemis = ['lh', 'rh']
elif hemi == 'lh':
geo_hemis = ['lh']
elif hemi == 'rh':
geo_hemis = ['rh']
else:
raise ValueError('bad hemi value')
for h in geo_hemis:
# Initialize a Surface object as the geometry
geo = Surface(subject_id, h, surf, subjects_dir, offset)
# Load in the geometry and (maybe) curvature
geo.load_geometry()
if curv:
geo.load_curvature()
self.geo[h] = geo
# deal with making figures
self._set_window_properties(size, background, foreground)
figures, _v = _make_viewer(figure, n_row, n_col, title,
self._scene_size, offscreen)
self._figures = figures
self._v = _v
self._window_backend = 'Mayavi' if self._v is None else 'TraitsUI'
for ff in self._figures:
for f in ff:
if f.scene is not None:
f.scene.background = self._bg_color
f.scene.foreground = self._fg_color
# force rendering so scene.lights exists
_force_render(self._figures, self._window_backend)
self.toggle_toolbars(show_toolbar)
_force_render(self._figures, self._window_backend)
self._toggle_render(False)
# fill figures with brains
kwargs = dict(surf=surf, curv=curv, title=None,
cortex=cortex, subjects_dir=subjects_dir,
bg_color=self._bg_color, offset=offset)
brains = []
brain_matrix = []
for ri, view in enumerate(views):
brain_row = []
for hi, h in enumerate(['lh', 'rh']):
if not (hemi in ['lh', 'rh'] and h != hemi):
ci = hi if hemi == 'split' else 0
kwargs['hemi'] = h
kwargs['geo'] = self.geo[h]
kwargs['figure'] = figures[ri][ci]
kwargs['backend'] = self._window_backend
brain = _Hemisphere(subject_id, **kwargs)
brain.show_view(view)
brains += [dict(row=ri, col=ci, brain=brain, hemi=h)]
brain_row += [brain]
brain_matrix += [brain_row]
self._toggle_render(True)
self._original_views = views
self._brain_list = brains
for brain in self._brain_list:
brain['brain']._orient_lights()
self.brains = [b['brain'] for b in brains]
self.brain_matrix = np.array(brain_matrix)
self.subjects_dir = subjects_dir
# Initialize the overlay and label dictionaries
self.foci_dict = dict()
self.labels_dict = dict()
self.overlays_dict = dict()
self.contour_list = []
self.morphometry_list = []
self.annot_list = []
self.data_dict = dict(lh=None, rh=None)
# note that texts gets treated differently
self.texts_dict = dict()
self.n_times = None
###########################################################################
# HELPERS
def _toggle_render(self, state, views=None):
"""Turn rendering on (True) or off (False)"""
figs = []
[figs.extend(f) for f in self._figures]
if views is None:
views = [None] * len(figs)
for vi, (_f, view) in enumerate(zip(figs, views)):
if state is False and view is None:
views[vi] = mlab.view(figure=_f)
# Testing backend doesn't have this option
if mlab.options.backend != 'test':
_f.scene.disable_render = not state
if state is True and view is not None:
mlab.draw(figure=_f)
mlab.view(*view, figure=_f)
# let's do the ugly force draw
if state is True:
_force_render(self._figures, self._window_backend)
return views
def _set_window_properties(self, size, background, foreground):
"""Set window properties that are used elsewhere."""
# old option "size" sets both width and height
try:
width, height = size
except TypeError:
width, height = size, size
self._scene_size = height, width
bg_color_rgb = colorConverter.to_rgb(background)
self._bg_color = bg_color_rgb
fg_color_rgb = colorConverter.to_rgb(foreground)
self._fg_color = fg_color_rgb
def get_data_properties(self):
""" Get properties of the data shown
Returns
-------
props : dict
Dictionary with data properties
props["fmin"] : minimum colormap
props["fmid"] : midpoint colormap
props["fmax"] : maximum colormap
props["transparent"] : lower part of colormap transparent?
props["time"] : time points
props["time_idx"] : current time index
props["smoothing_steps"] : number of smoothing steps
"""
props = dict()
keys = ['fmin', 'fmid', 'fmax', 'transparent', 'time', 'time_idx',
'smoothing_steps']
try:
if self.data_dict['lh'] is not None:
hemi = 'lh'
else:
hemi = 'rh'
for key in keys:
props[key] = self.data_dict[hemi][key]
except KeyError:
# The user has not added any data
for key in keys:
props[key] = 0
return props
def toggle_toolbars(self, show=None):
"""Toggle toolbar display
Parameters
----------
show : bool | None
If None, the state is toggled. If True, the toolbar will
be shown, if False, hidden.
"""
# don't do anything if testing is on
if self._figures[0][0].scene is not None:
# this may not work if QT is not the backend (?), or in testing
if hasattr(self._figures[0][0].scene, 'scene_editor'):
# Within TraitsUI
bars = [f.scene.scene_editor._tool_bar
for ff in self._figures for f in ff]
else:
# Mayavi figure
bars = [f.scene._tool_bar for ff in self._figures for f in ff]
if show is None:
if hasattr(bars[0], 'isVisible'):
# QT4
show = not bars[0].isVisible()
elif hasattr(bars[0], 'Shown'):
# WX
show = not bars[0].Shown()
for bar in bars:
if hasattr(bar, 'setVisible'):
bar.setVisible(show)
elif hasattr(bar, 'Show'):
bar.Show(show)
def _get_one_brain(self, d, name):
"""Helper for various properties"""
if len(self.brains) > 1:
raise ValueError('Cannot access brain.%s when more than '
'one view is plotted. Use brain.brain_matrix '
'or brain.brains.' % name)
if isinstance(d, dict):
out = dict()
for key, value in d.iteritems():
out[key] = value[0]
else:
out = d[0]
return out
@property
def overlays(self):
"""Wrap to overlays"""
return self._get_one_brain(self.overlays_dict, 'overlays')
@property
def foci(self):
"""Wrap to foci"""
return self._get_one_brain(self.foci_dict, 'foci')
@property
def labels(self):
"""Wrap to labels"""
return self._get_one_brain(self.labels_dict, 'labels')
@property
def contour(self):
"""Wrap to contour"""
return self._get_one_brain(self.contour_list, 'contour')
@property
def annot(self):
"""Wrap to annot"""
return self._get_one_brain(self.annot_list, 'contour')
@property
def texts(self):
"""Wrap to texts"""
self._get_one_brain([[]], 'texts')
out = dict()
for key, val in self.texts_dict.iteritems():
out[key] = val['text']
return out
@property
def _geo(self):
"""Wrap to _geo"""
self._get_one_brain([[]], '_geo')
if ('lh' in self.geo) and ['lh'] is not None:
return self.geo['lh']
else:
return self.geo['rh']
@property
def data(self):
"""Wrap to data"""
self._get_one_brain([[]], 'data')
if self.data_dict['lh'] is not None:
data = self.data_dict['lh'].copy()
else:
data = self.data_dict['rh'].copy()
if 'colorbars' in data:
data['colorbar'] = data['colorbars'][0]
return data
def _check_hemi(self, hemi):
"""Check for safe single-hemi input, returns str"""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
raise ValueError('hemi must not be None when both '
'hemispheres are displayed')
else:
hemi = self._hemi
elif hemi not in ['lh', 'rh']:
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' + extra)
return hemi
def _check_hemis(self, hemi):
"""Check for safe dual or single-hemi input, returns list"""
if hemi is None:
if self._hemi not in ['lh', 'rh']:
hemi = ['lh', 'rh']
else:
hemi = [self._hemi]
elif hemi not in ['lh', 'rh']:
extra = ' or None' if self._hemi in ['lh', 'rh'] else ''
raise ValueError('hemi must be either "lh" or "rh"' + extra)
else:
hemi = [hemi]
return hemi
def _read_scalar_data(self, source, hemi, name=None, cast=True):
"""Load in scalar data from an image stored in a file or an array
Parameters
----------
source : str or numpy array
path to scalar data file or a numpy array
name : str or None, optional
name for the overlay in the internal dictionary
cast : bool, optional
either to cast float data into 64bit datatype as a
workaround. cast=True can fix a rendering problem with
certain versions of Mayavi
Returns
-------
scalar_data : numpy array
flat numpy array of scalar data
name : str
if no name was provided, deduces the name if filename was given
as a source
"""
# If source is a string, try to load a file
if isinstance(source, string_types):
if name is None:
basename = os.path.basename(source)
if basename.endswith(".gz"):
basename = basename[:-3]
if basename.startswith("%s." % hemi):
basename = basename[3:]
name = os.path.splitext(basename)[0]
scalar_data = io.read_scalar_data(source)
else:
# Can't think of a good way to check that this will work nicely
scalar_data = source
if cast:
if (scalar_data.dtype.char == 'f' and
scalar_data.dtype.itemsize < 8):
scalar_data = scalar_data.astype(np.float)
return scalar_data, name
def _get_display_range(self, scalar_data, min, max, sign):
if scalar_data.min() >= 0:
sign = "pos"
elif scalar_data.max() <= 0:
sign = "neg"
# Get data with a range that will make sense for automatic thresholding
if sign == "neg":
range_data = np.abs(scalar_data[np.where(scalar_data < 0)])
elif sign == "pos":
range_data = scalar_data[np.where(scalar_data > 0)]
else:
range_data = np.abs(scalar_data)
# Get a numeric value for the scalar minimum
if min is None:
min = "robust_min"
if min == "robust_min":
min = stats.scoreatpercentile(range_data, 2)
elif min == "actual_min":
min = range_data.min()
# Get a numeric value for the scalar maximum
if max is None:
max = "robust_max"
if max == "robust_max":
max = stats.scoreatpercentile(scalar_data, 98)
elif max == "actual_max":
max = range_data.max()
return min, max
###########################################################################
# ADDING DATA PLOTS
def add_overlay(self, source, min=2, max="robust_max", sign="abs",
name=None, hemi=None):
"""Add an overlay to the overlay dict from a file or array.
Parameters
----------
source : str or numpy array
path to the overlay file or numpy array with data
min : float
threshold for overlay display
max : float
saturation point for overlay display
sign : {'abs' | 'pos' | 'neg'}
whether positive, negative, or both values should be displayed
name : str
name for the overlay in the internal dictionary
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
"""
hemi = self._check_hemi(hemi)
# load data here
scalar_data, name = self._read_scalar_data(source, hemi, name=name)
min, max = self._get_display_range(scalar_data, min, max, sign)
if sign not in ["abs", "pos", "neg"]:
raise ValueError("Overlay sign must be 'abs', 'pos', or 'neg'")
old = OverlayData(scalar_data, self.geo[hemi], min, max, sign)
ol = []
views = self._toggle_render(False)
for brain in self._brain_list:
if brain['hemi'] == hemi:
ol.append(brain['brain'].add_overlay(old))
if name in self.overlays_dict:
name = "%s%d" % (name, len(self.overlays_dict) + 1)
self.overlays_dict[name] = ol
self._toggle_render(True, views)
def add_data(self, array, min=None, max=None, thresh=None,
colormap="RdBu_r", alpha=1,
vertices=None, smoothing_steps=20, time=None,
time_label="time index=%d", colorbar=True,
hemi=None, remove_existing=False, time_label_size=14):
"""Display data from a numpy array on the surface.
This provides a similar interface to add_overlay, but it displays
it with a single colormap. It offers more flexibility over the
colormap, and provides a way to display four dimensional data
(i.e. a timecourse).
Note that min sets the low end of the colormap, and is separate
from thresh (this is a different convention from add_overlay)
Note: If the data is defined for a subset of vertices (specified
by the "vertices" parameter), a smoothing method is used to interpolate
the data onto the high resolution surface. If the data is defined for
subsampled version of the surface, smoothing_steps can be set to None,
in which case only as many smoothing steps are applied until the whole
surface is filled with non-zeros.
Parameters
----------
array : numpy array
data array (nvtx vector)
min : float
min value in colormap (uses real min if None)
max : float
max value in colormap (uses real max if None)
thresh : None or float
if not None, values below thresh will not be visible
colormap : string, list of colors, or array
name of matplotlib colormap to use, a list of matplotlib colors,
or a custom look up table (an n x 4 array coded with RBGA values
between 0 and 255).
alpha : float in [0, 1]
alpha level to control opacity
vertices : numpy array
vertices for which the data is defined (needed if len(data) < nvtx)
smoothing_steps : int or None
number of smoothing steps (smooting is used if len(data) < nvtx)
Default : 20
time : numpy array
time points in the data array (if data is 2D)
time_label : str | callable | None
format of the time label (a format string, a function that maps
floating point time values to strings, or None for no label)
colorbar : bool
whether to add a colorbar to the figure
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
remove_existing : bool
Remove surface added by previous "add_data" call. Useful for
conserving memory when displaying different data in a loop.
time_label_size : int
Font size of the time label (default 14)
"""
hemi = self._check_hemi(hemi)
if min is None:
min = array.min()
if max is None:
max = array.max()
# Create smoothing matrix if necessary
if len(array) < self.geo[hemi].x.shape[0]:
if vertices is None:
raise ValueError("len(data) < nvtx: need vertices")
adj_mat = utils.mesh_edges(self.geo[hemi].faces)
smooth_mat = utils.smoothing_matrix(vertices, adj_mat,
smoothing_steps)
else:
smooth_mat = None
# Calculate initial data to plot
if array.ndim == 1:
array_plot = array
elif array.ndim == 2:
array_plot = array[:, 0]
else:
raise ValueError("data has to be 1D or 2D")
if smooth_mat is not None:
array_plot = smooth_mat * array_plot
# Copy and byteswap to deal with Mayavi bug
mlab_plot = _prepare_data(array_plot)
# Process colormap argument into a lut
lut = create_color_lut(colormap)
colormap = "Greys"
data = dict(array=array, smoothing_steps=smoothing_steps,
fmin=min, fmid=(min + max) / 2, fmax=max,
transparent=False, time=0, time_idx=0,
vertices=vertices, smooth_mat=smooth_mat)
# Create time array and add label if 2D
if array.ndim == 2:
if time is None:
time = np.arange(array.shape[1])
self._times = time
self.n_times = array.shape[1]
if not self.n_times == len(time):
raise ValueError('time is not the same length as '
'array.shape[1]')
if isinstance(time_label, basestring):
time_label_fmt = time_label
time_label = lambda x: time_label_fmt % x
data["time_label"] = time_label
data["time"] = time
data["time_idx"] = 0
y_txt = 0.05 + 0.05 * bool(colorbar)
else:
self._times = None
self.n_times = None
surfs = []
bars = []
views = self._toggle_render(False)
for bi, brain in enumerate(self._brain_list):
if brain['hemi'] == hemi:
out = brain['brain'].add_data(array, mlab_plot, vertices,
smooth_mat, min, max, thresh,
lut, colormap, alpha, time,
time_label, colorbar)
s, ct, bar = out
surfs.append(s)
bars.append(bar)
row, col = np.unravel_index(bi, self.brain_matrix.shape)
if array.ndim == 2 and time_label is not None:
self.add_text(0.95, y_txt, time_label(time[0]),
name="time_label", row=row, col=col,
font_size=time_label_size,
justification='right')
self._toggle_render(True, views)
data['surfaces'] = surfs
data['colorbars'] = bars
data['orig_ctable'] = ct
if remove_existing and self.data_dict[hemi] is not None:
for surf in self.data_dict[hemi]['surfaces']:
surf.parent.parent.remove()
self.data_dict[hemi] = data
def add_annotation(self, annot, borders=True, alpha=1, hemi=None,
remove_existing=True):
"""Add an annotation file.
Parameters
----------
annot : str
Either path to annotation file or annotation name
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
alpha : float in [0, 1]
Alpha level to control opacity
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, data must exist
for both hemispheres.
remove_existing : bool
If True (default), remove old annotations.
"""
hemis = self._check_hemis(hemi)
# Figure out where the data is coming from
if os.path.isfile(annot):
filepath = annot
path = os.path.split(filepath)[0]
file_hemi, annot = os.path.basename(filepath).split('.')[:2]
if len(hemis) > 1:
if annot[:2] == 'lh.':
filepaths = [filepath, pjoin(path, 'rh' + annot[2:])]
elif annot[:2] == 'rh.':
filepaths = [pjoin(path, 'lh' + annot[2:], filepath)]
else:
raise RuntimeError('To add both hemispheres '
'simultaneously, filename must '
'begin with "lh." or "rh."')
else:
filepaths = [filepath]
else:
filepaths = []
for hemi in hemis:
filepath = pjoin(self.subjects_dir,
self.subject_id,
'label',
".".join([hemi, annot, 'annot']))
if not os.path.exists(filepath):
raise ValueError('Annotation file %s does not exist'
% filepath)
filepaths += [filepath]
views = self._toggle_render(False)
if remove_existing is True:
# Get rid of any old annots
for a in self.annot_list:
a['surface'].remove()
self.annot_list = []
al = self.annot_list
for hemi, filepath in zip(hemis, filepaths):
# Read in the data
labels, cmap, _ = nib.freesurfer.read_annot(filepath,
orig_ids=True)
# Maybe zero-out the non-border vertices
self._to_borders(labels, hemi, borders)
# Handle null labels properly
# (tksurfer doesn't use the alpha channel, so sometimes this
# is set weirdly. For our purposes, it should always be 0.
# Unless this sometimes causes problems?
cmap[np.where(cmap[:, 4] == 0), 3] = 0
if np.any(labels == 0) and not np.any(cmap[:, -1] == 0):
cmap = np.vstack((cmap, np.zeros(5, int)))
# Set label ids sensibly
ord = np.argsort(cmap[:, -1])
ids = ord[np.searchsorted(cmap[ord, -1], labels)]
cmap = cmap[:, :4]
# Set the alpha level
alpha_vec = cmap[:, 3]
alpha_vec[alpha_vec > 0] = alpha * 255
for brain in self._brain_list:
if brain['hemi'] == hemi:
al.append(brain['brain'].add_annotation(annot, ids, cmap))
self.annot_list = al
self._toggle_render(True, views)
def add_label(self, label, color=None, alpha=1, scalar_thresh=None,
borders=False, hemi=None, subdir=None):
"""Add an ROI label to the image.
Parameters
----------
label : str | instance of Label
label filepath or name. Can also be an instance of
an object with attributes "hemi", "vertices", "name", and
optionally "color" and "values" (if scalar_thresh is not None).
color : matplotlib-style color | None
anything matplotlib accepts: string, RGB, hex, etc. (default
"crimson")
alpha : float in [0, 1]
alpha level to control opacity
scalar_thresh : None or number
threshold the label ids using this value in the label
file's scalar field (i.e. label only vertices with
scalar >= thresh)
borders : bool | int
Show only label borders. If int, specify the number of steps
(away from the true border) along the cortical mesh to include
as part of the border definition.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
subdir : None | str
If a label is specified as name, subdir can be used to indicate
that the label file is in a sub-directory of the subject's
label directory rather than in the label directory itself (e.g.
for ``$SUBJECTS_DIR/$SUBJECT/label/aparc/lh.cuneus.label``
``brain.add_label('cuneus', subdir='aparc')``).
Notes
-----
To remove previously added labels, run Brain.remove_labels().
"""
if isinstance(label, string_types):
hemi = self._check_hemi(hemi)
if color is None:
color = "crimson"
if os.path.isfile(label):
filepath = label
label_name = os.path.basename(filepath).split('.')[1]
else:
label_name = label
label_fname = ".".join([hemi, label_name, 'label'])
if subdir is None:
filepath = pjoin(self.subjects_dir, self.subject_id,
'label', label_fname)
else:
filepath = pjoin(self.subjects_dir, self.subject_id,
'label', subdir, label_fname)
if not os.path.exists(filepath):
raise ValueError('Label file %s does not exist'
% filepath)
# Load the label data and create binary overlay
if scalar_thresh is None:
ids = nib.freesurfer.read_label(filepath)
else:
ids, scalars = nib.freesurfer.read_label(filepath,
read_scalars=True)
ids = ids[scalars >= scalar_thresh]
else:
# try to extract parameters from label instance
try:
hemi = label.hemi
ids = label.vertices
if label.name is None:
label_name = 'unnamed'
else:
label_name = str(label.name)
if color is None:
if hasattr(label, 'color') and label.color is not None:
color = label.color
else:
color = "crimson"
if scalar_thresh is not None:
scalars = label.values
except Exception:
raise ValueError('Label was not a filename (str), and could '
'not be understood as a class. The class '
'must have attributes "hemi", "vertices", '
'"name", and (if scalar_thresh is not None)'
'"values"')
hemi = self._check_hemi(hemi)
if scalar_thresh is not None:
ids = ids[scalars >= scalar_thresh]
label = np.zeros(self.geo[hemi].coords.shape[0])
label[ids] = 1
# make sure we have a unique name
if label_name in self.labels_dict:
i = 2
name = label_name + '_%i'
while name % i in self.labels_dict:
i += 1
label_name = name % i
self._to_borders(label, hemi, borders, restrict_idx=ids)
# make a list of all the plotted labels
ll = []
views = self._toggle_render(False)
for brain in self._brain_list:
if brain['hemi'] == hemi:
ll.append(brain['brain'].add_label(label, label_name,
color, alpha))
self.labels_dict[label_name] = ll
self._toggle_render(True, views)
def _to_borders(self, label, hemi, borders, restrict_idx=None):
"""Helper to potentially convert a label/parc to borders"""
if not isinstance(borders, (bool, int)) or borders < 0:
raise ValueError('borders must be a bool or positive integer')
if borders:
n_vertices = label.size
edges = utils.mesh_edges(self.geo[hemi].faces)
border_edges = label[edges.row] != label[edges.col]
show = np.zeros(n_vertices, dtype=np.int)
keep_idx = np.unique(edges.row[border_edges])
if isinstance(borders, int):
for _ in range(borders):
keep_idx = np.in1d(self.geo[hemi].faces.ravel(), keep_idx)
keep_idx.shape = self.geo[hemi].faces.shape
keep_idx = self.geo[hemi].faces[np.any(keep_idx, axis=1)]
keep_idx = np.unique(keep_idx)
if restrict_idx is not None:
keep_idx = keep_idx[np.in1d(keep_idx, restrict_idx)]
show[keep_idx] = 1
label *= show
def remove_labels(self, labels=None, hemi=None):
"""Remove one or more previously added labels from the image.
Parameters
----------
labels : None | str | list of str
Labels to remove. Can be a string naming a single label, or None to
remove all labels. Possible names can be found in the Brain.labels
attribute.
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
"""
hemi = self._check_hemi(hemi)
if labels is None:
labels = self.labels_dict.keys()
elif isinstance(labels, str):
labels = [labels]
for key in labels:
label = self.labels_dict.pop(key)
for ll in label:
ll.remove()
def add_morphometry(self, measure, grayscale=False, hemi=None,
remove_existing=True, colormap=None,
min=None, max=None, colorbar=True):
"""Add a morphometry overlay to the image.
Parameters
----------
measure : {'area' | 'curv' | 'jacobian_white' | 'sulc' | 'thickness'}
which measure to load
grayscale : bool
whether to load the overlay with a grayscale colormap
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, data must exist
for both hemispheres.
remove_existing : bool
If True (default), remove old annotations.
colormap : str
Mayavi colormap name, or None to use a sensible default.
min, max : floats
Endpoints for the colormap; if not provided the robust range
of the data is used.
colorbar : bool
If True, show a colorbar corresponding to the overlay data.
"""
hemis = self._check_hemis(hemi)
morph_files = []
for hemi in hemis:
# Find the source data
surf_dir = pjoin(self.subjects_dir, self.subject_id, 'surf')
morph_file = pjoin(surf_dir, '.'.join([hemi, measure]))
if not os.path.exists(morph_file):
raise ValueError(
'Could not find %s in subject directory' % morph_file)
morph_files += [morph_file]
views = self._toggle_render(False)
if remove_existing is True:
# Get rid of any old overlays
for m in self.morphometry_list:
m['surface'].remove()
if m["colorbar"] is not None:
m['colorbar'].visible = False
self.morphometry_list = []
ml = self.morphometry_list
for hemi, morph_file in zip(hemis, morph_files):
if colormap is None:
# Preset colormaps
if grayscale:
colormap = "gray"
else:
colormap = dict(area="pink",
curv="RdBu",
jacobian_white="pink",
sulc="RdBu",
thickness="pink")[measure]
# Read in the morphometric data
morph_data = nib.freesurfer.read_morph_data(morph_file)
# Get a cortex mask for robust range
self.geo[hemi].load_label("cortex")
ctx_idx = self.geo[hemi].labels["cortex"]
# Get the display range
min_default, max_default = np.percentile(morph_data[ctx_idx],
[2, 98])
if min is None:
min = min_default
if max is None:
max = max_default
# Use appropriate values for bivariate measures
if measure in ["curv", "sulc"]:
lim = np.max([abs(min), abs(max)])
min, max = -lim, lim
# Set up the Mayavi pipeline
morph_data = _prepare_data(morph_data)
for brain in self._brain_list:
if brain['hemi'] == hemi:
ml.append(brain['brain'].add_morphometry(morph_data,
colormap, measure,
min, max,
colorbar))
self.morphometry_list = ml
self._toggle_render(True, views)
def add_foci(self, coords, coords_as_verts=False, map_surface=None,
scale_factor=1, color="white", alpha=1, name=None,
hemi=None):
"""Add spherical foci, possibly mapping to displayed surf.
The foci spheres can be displayed at the coordinates given, or
mapped through a surface geometry. In other words, coordinates
from a volume-based analysis in MNI space can be displayed on an
inflated average surface by finding the closest vertex on the
white surface and mapping to that vertex on the inflated mesh.
Parameters
----------
coords : numpy array
x, y, z coordinates in stereotaxic space or array of vertex ids
coords_as_verts : bool
whether the coords parameter should be interpreted as vertex ids
map_surface : Freesurfer surf or None
surface to map coordinates through, or None to use raw coords
scale_factor : int
controls the size of the foci spheres
color : matplotlib color code
HTML name, RBG tuple, or hex code
alpha : float in [0, 1]
opacity of focus gylphs
name : str
internal name to use
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
"""
hemi = self._check_hemi(hemi)
# Figure out how to interpret the first parameter
if coords_as_verts:
coords = self.geo[hemi].coords[coords]
map_surface = None
# Possibly map the foci coords through a surface
if map_surface is None:
foci_coords = np.atleast_2d(coords)
else:
foci_surf = Surface(self.subject_id, hemi, map_surface,
subjects_dir=self.subjects_dir)
foci_surf.load_geometry()
foci_vtxs = utils.find_closest_vertices(foci_surf.coords, coords)
foci_coords = self.geo[hemi].coords[foci_vtxs]
# Get a unique name (maybe should take this approach elsewhere)
if name is None:
name = "foci_%d" % (len(self.foci_dict) + 1)
# Convert the color code
if not isinstance(color, tuple):
color = colorConverter.to_rgb(color)
views = self._toggle_render(False)
fl = []
for brain in self._brain_list:
if brain['hemi'] == hemi:
fl.append(brain['brain'].add_foci(foci_coords, scale_factor,
color, alpha, name))
self.foci_dict[name] = fl
self._toggle_render(True, views)
def add_contour_overlay(self, source, min=None, max=None,
n_contours=7, line_width=1.5, colormap="YlOrRd_r",
hemi=None, remove_existing=True, colorbar=True):
"""Add a topographic contour overlay of the positive data.
Note: This visualization will look best when using the "low_contrast"
cortical curvature colorscheme.
Parameters
----------
source : str or array
path to the overlay file or numpy array
min : float
threshold for overlay display
max : float
saturation point for overlay display
n_contours : int
number of contours to use in the display
line_width : float
width of contour lines
colormap : string, list of colors, or array
name of matplotlib colormap to use, a list of matplotlib colors,
or a custom look up table (an n x 4 array coded with RBGA values
between 0 and 255).
hemi : str | None
If None, it is assumed to belong to the hemipshere being
shown. If two hemispheres are being shown, an error will
be thrown.
remove_existing : bool
If there is an existing contour overlay, remove it before plotting.
colorbar : bool
If True, show the colorbar for the scalar value.
"""
hemi = self._check_hemi(hemi)
# Read the scalar data
scalar_data, _ = self._read_scalar_data(source, hemi)
min, max = self._get_display_range(scalar_data, min, max, "pos")
# Deal with Mayavi bug
scalar_data = _prepare_data(scalar_data)
# Maybe get rid of an old overlay
if hasattr(self, "contour") and remove_existing:
for c in self.contour_list:
c['surface'].remove()
if c['colorbar'] is not None:
c['colorbar'].visible = False
# Process colormap argument into a lut
lut = create_color_lut(colormap)
views = self._toggle_render(False)
cl = []
for brain in self._brain_list:
if brain['hemi'] == hemi:
cl.append(brain['brain'].add_contour_overlay(scalar_data,
min, max,
n_contours,
line_width, lut,
colorbar))
self.contour_list = cl
self._toggle_render(True, views)
def add_text(self, x, y, text, name, color=None, opacity=1.0,
row=-1, col=-1, font_size=None, justification=None):
""" Add a text to the visualization
Parameters
----------
x : Float
x coordinate
y : Float
y coordinate
text : str
Text to add
name : str
Name of the text (text label can be updated using update_text())
color : Tuple
Color of the text. Default: (1, 1, 1)
opacity : Float
Opacity of the text. Default: 1.0
row : int
Row index of which brain to use
col : int
Column index of which brain to use
"""
if name in self.texts_dict:
self.texts_dict[name]['text'].remove()
text = self.brain_matrix[row, col].add_text(x, y, text,
name, color, opacity)
self.texts_dict[name] = dict(row=row, col=col, text=text)
if font_size is not None:
text.property.font_size = font_size
text.actor.text_scale_mode = 'viewport'
if justification is not None:
text.property.justification = justification
def update_text(self, text, name, row=-1, col=-1):
"""Update text label
Parameters
----------
text : str
New text for label
name : str
Name of text label
"""
if name not in self.texts_dict:
raise KeyError('text name "%s" unknown' % name)
self.texts_dict[name]['text'].text = text
###########################################################################
# DATA SCALING / DISPLAY
def reset_view(self):
"""Orient camera to display original view
"""
for view, brain in zip(self._original_views, self._brain_list):
brain['brain'].show_view(view)
def show_view(self, view=None, roll=None, distance=None, row=-1, col=-1):
"""Orient camera to display view
Parameters
----------
view : {'lateral' | 'medial' | 'rostral' | 'caudal' |
'dorsal' | 'ventral' | 'frontal' | 'parietal' |
dict}
brain surface to view or kwargs to pass to mlab.view()
Returns
-------
view : tuple
tuple returned from mlab.view
roll : float
camera roll
distance : float | 'auto' | None
distance from the origin
row : int
Row index of which brain to use
col : int
Column index of which brain to use
"""
return self.brain_matrix[row][col].show_view(view, roll, distance)
def set_distance(self, distance=None):
"""Set view distances for all brain plots to the same value
Parameters
----------
distance : float | None
Distance to use. If None, brains are set to the farthest
"best fit" distance across all current views; note that
the underlying "best fit" function can be buggy.
Returns
-------
distance : float
The distance used.
"""
if distance is None:
distance = []
for ff in self._figures:
for f in ff:
mlab.view(figure=f, distance='auto')
v = mlab.view(figure=f)
# This should only happen for the test backend
if v is None:
v = [0, 0, 100]
distance += [v[2]]
distance = max(distance)
for ff in self._figures:
for f in ff:
mlab.view(distance=distance, figure=f)
return distance
@verbose
def scale_data_colormap(self, fmin, fmid, fmax, transparent, verbose=None):
"""Scale the data colormap.
Parameters
----------
fmin : float
minimum value of colormap
fmid : float
value corresponding to color midpoint
fmax : float
maximum value for colormap
transparent : boolean
if True: use a linear transparency between fmin and fmid
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
"""
if not (fmin < fmid) and (fmid < fmax):
raise ValueError("Invalid colormap, we need fmin<fmid<fmax")
# Cast inputs to float to prevent integer division
fmin = float(fmin)
fmid = float(fmid)
fmax = float(fmax)
logger.info("colormap: fmin=%0.2e fmid=%0.2e fmax=%0.2e "
"transparent=%d" % (fmin, fmid, fmax, transparent))
# Get the original colormap
for h in ['lh', 'rh']:
data = self.data_dict[h]
if data is not None:
table = data["orig_ctable"].copy()
# Add transparency if needed
if transparent:
n_colors = table.shape[0]
n_colors2 = int(n_colors / 2)
table[:n_colors2, -1] = np.linspace(0, 255, n_colors2)
table[n_colors2:, -1] = 255 * np.ones(n_colors - n_colors2)
# Scale the colormap
table_new = table.copy()
n_colors = table.shape[0]
n_colors2 = int(n_colors / 2)
# Index of fmid in new colorbar
fmid_idx = int(np.round(n_colors * ((fmid - fmin) /
(fmax - fmin))) - 1)
# Go through channels
for i in range(4):
part1 = np.interp(np.linspace(0, n_colors2 - 1, fmid_idx + 1),
np.arange(n_colors),
table[:, i])
table_new[:fmid_idx + 1, i] = part1
part2 = np.interp(np.linspace(n_colors2, n_colors - 1,
n_colors - fmid_idx - 1),
np.arange(n_colors),
table[:, i])
table_new[fmid_idx + 1:, i] = part2
views = self._toggle_render(False)
# Use the new colormap
for hemi in ['lh', 'rh']:
data = self.data_dict[hemi]
if data is not None:
for surf in data['surfaces']:
cmap = surf.module_manager.scalar_lut_manager
cmap.lut.table = table_new
cmap.data_range = np.array([fmin, fmax])
# Update the data properties
data["fmin"], data['fmid'], data['fmax'] = fmin, fmid, fmax
data["transparent"] = transparent
self._toggle_render(True, views)
def set_data_time_index(self, time_idx, interpolation='quadratic'):
"""Set the data time index to show
Parameters
----------
time_idx : int | float
Time index. Non-integer values will be displayed using
interpolation between samples.
interpolation : str
Interpolation method (``scipy.interpolate.interp1d`` parameter,
one of 'linear' | 'nearest' | 'zero' | 'slinear' | 'quadratic' |
'cubic', default 'quadratic'). Interpolation is only used for
non-integer indexes.
"""
if self.n_times is None:
raise RuntimeError('cannot set time index with no time data')
if time_idx < 0 or time_idx >= self.n_times:
raise ValueError("time index out of range")
views = self._toggle_render(False)
for hemi in ['lh', 'rh']:
data = self.data_dict[hemi]
if data is not None:
# interpolation
if isinstance(time_idx, float):
times = np.arange(self.n_times)
ifunc = interp1d(times, data['array'], interpolation, 1)
plot_data = ifunc(time_idx)
else:
plot_data = data["array"][:, time_idx]
if data["smooth_mat"] is not None:
plot_data = data["smooth_mat"] * plot_data
for surf in data["surfaces"]:
surf.mlab_source.scalars = plot_data
data["time_idx"] = time_idx
# Update time label
if data["time_label"]:
if isinstance(time_idx, float):
ifunc = interp1d(times, data['time'])
time = ifunc(time_idx)
else:
time = data["time"][time_idx]
self.update_text(data["time_label"](time), "time_label")
self._toggle_render(True, views)
@property
def data_time_index(self):
"""Retrieve the currently displayed data time index
Returns
-------
time_idx : int
Current time index.
Notes
-----
Raises a RuntimeError if the Brain instance has not data overlay.
"""
time_idx = None
for hemi in ['lh', 'rh']:
data = self.data_dict[hemi]
if data is not None:
time_idx = data["time_idx"]
return time_idx
raise RuntimeError("Brain instance has no data overlay")
@verbose
def set_data_smoothing_steps(self, smoothing_steps, verbose=None):
"""Set the number of smoothing steps
Parameters
----------
smoothing_steps : int
Number of smoothing steps
verbose : bool, str, int, or None
If not None, override default verbose level (see surfer.verbose).
"""
views = self._toggle_render(False)
for hemi in ['lh', 'rh']:
data = self.data_dict[hemi]
if data is not None:
adj_mat = utils.mesh_edges(self.geo[hemi].faces)
smooth_mat = utils.smoothing_matrix(data["vertices"],
adj_mat, smoothing_steps)
data["smooth_mat"] = smooth_mat
# Redraw
if data["array"].ndim == 1:
plot_data = data["array"]
else:
plot_data = data["array"][:, data["time_idx"]]
plot_data = data["smooth_mat"] * plot_data
for surf in data["surfaces"]:
surf.mlab_source.scalars = plot_data
# Update data properties
data["smoothing_steps"] = smoothing_steps
self._toggle_render(True, views)
def index_for_time(self, time, rounding='closest'):
"""Find the data time index closest to a specific time point
Parameters
----------
time : scalar
Time.
rounding : 'closest' | 'up' | 'down
How to round if the exact time point is not an index.
Returns
-------
index : int
Data time index closest to time.
"""
if self.n_times is None:
raise RuntimeError("Brain has no time axis")
times = self._times
# Check that time is in range
tmin = np.min(times)
tmax = np.max(times)
max_diff = (tmax - tmin) / (len(times) - 1) / 2
if time < tmin - max_diff or time > tmax + max_diff:
err = ("time = %s lies outside of the time axis "
"[%s, %s]" % (time, tmin, tmax))
raise ValueError(err)
if rounding == 'closest':
idx = np.argmin(np.abs(times - time))
elif rounding == 'up':
idx = np.nonzero(times >= time)[0][0]
elif rounding == 'down':
idx = np.nonzero(times <= time)[0][-1]
else:
err = "Invalid rounding parameter: %s" % repr(rounding)
raise ValueError(err)
return idx
def set_time(self, time):
"""Set the data time index to the time point closest to time
Parameters
----------
time : scalar
Time.
"""
idx = self.index_for_time(time)
self.set_data_time_index(idx)
def _get_colorbars(self, row, col):
shape = self.brain_matrix.shape
row = row % shape[0]
col = col % shape[1]
ind = np.ravel_multi_index((row, col), self.brain_matrix.shape)
colorbars = []
h = self._brain_list[ind]['hemi']
if self.data_dict[h] is not None and 'colorbars' in self.data_dict[h]:
colorbars.append(self.data_dict[h]['colorbars'][row])
if len(self.morphometry_list) > 0:
colorbars.append(self.morphometry_list[ind]['colorbar'])
if len(self.contour_list) > 0:
colorbars.append(self.contour_list[ind]['colorbar'])
if len(self.overlays_dict) > 0:
for name, obj in self.overlays_dict.items():
for bar in ["pos_bar", "neg_bar"]:
try: # deal with positive overlays
this_ind = min(len(obj) - 1, ind)
colorbars.append(getattr(obj[this_ind], bar))
except AttributeError:
pass
return colorbars
def _colorbar_visibility(self, visible, row, col):
for cb in self._get_colorbars(row, col):
if cb is not None:
cb.visible = visible
def show_colorbar(self, row=-1, col=-1):
"""Show colorbar(s) for given plot
Parameters
----------
row : int
Row index of which brain to use
col : int
Column index of which brain to use
"""
self._colorbar_visibility(True, row, col)
def hide_colorbar(self, row=-1, col=-1):
"""Hide colorbar(s) for given plot
Parameters
----------
row : int
Row index of which brain to use
col : int
Column index of which brain to use
"""
self._colorbar_visibility(False, row, col)
def close(self):
"""Close all figures and cleanup data structure."""
for ri, ff in enumerate(self._figures):
for ci, f in enumerate(ff):
if f is not None:
mlab.close(f)
self._figures[ri][ci] = None
# should we tear down other variables?
if self._v is not None:
self._v.dispose()
self._v = None
def __del__(self):
if hasattr(self, '_v') and self._v is not None:
self._v.dispose()
self._v = None
###########################################################################
# SAVING OUTPUT
def save_single_image(self, filename, row=-1, col=-1):
"""Save view from one panel to disk
Only mayavi image types are supported:
(png jpg bmp tiff ps eps pdf rib oogl iv vrml obj
Parameters
----------
filename: string
path to new image file
row : int
row index of the brain to use
col : int
column index of the brain to use
Due to limitations in TraitsUI, if multiple views or hemi='split'
is used, there is no guarantee painting of the windows will
complete before control is returned to the command line. Thus
we strongly recommend using only one figure window (which uses
a Mayavi figure to plot instead of TraitsUI) if you intend to
script plotting commands.
"""
brain = self.brain_matrix[row, col]
ftype = filename[filename.rfind('.') + 1:]
good_ftypes = ['png', 'jpg', 'bmp', 'tiff', 'ps',
'eps', 'pdf', 'rib', 'oogl', 'iv', 'vrml', 'obj']
if ftype not in good_ftypes:
raise ValueError("Supported image types are %s"
% " ".join(good_ftypes))
mlab.draw(brain._f)
mlab.savefig(filename, figure=brain._f)
def save_image(self, filename):
"""Save view from all panels to disk
Only mayavi image types are supported:
(png jpg bmp tiff ps eps pdf rib oogl iv vrml obj
Parameters
----------
filename: string
path to new image file
Due to limitations in TraitsUI, if multiple views or hemi='split'
is used, there is no guarantee painting of the windows will
complete before control is returned to the command line. Thus
we strongly recommend using only one figure window (which uses
a Mayavi figure to plot instead of TraitsUI) if you intend to
script plotting commands.
"""
misc.imsave(filename, self.screenshot())
def screenshot(self, mode='rgb', antialiased=False):
"""Generate a screenshot of current view
Wraps to mlab.screenshot for ease of use.
Parameters
----------
mode: string
Either 'rgb' or 'rgba' for values to return
antialiased: bool
Antialias the image (see mlab.screenshot() for details)
row : int
row index of the brain to use
col : int
column index of the brain to use
Returns
-------
screenshot: array
Image pixel values
Notes
-----
Due to limitations in TraitsUI, if multiple views or hemi='split'
is used, there is no guarantee painting of the windows will
complete before control is returned to the command line. Thus
we strongly recommend using only one figure window (which uses
a Mayavi figure to plot instead of TraitsUI) if you intend to
script plotting commands.
"""
row = []
for ri in range(self.brain_matrix.shape[0]):
col = []
n_col = 2 if self._hemi == 'split' else 1
for ci in range(n_col):
col += [self.screenshot_single(mode, antialiased,
ri, ci)]
row += [np.concatenate(col, axis=1)]
data = np.concatenate(row, axis=0)
return data
def screenshot_single(self, mode='rgb', antialiased=False, row=-1, col=-1):
"""Generate a screenshot of current view from a single panel
Wraps to mlab.screenshot for ease of use.
Parameters
----------
mode: string
Either 'rgb' or 'rgba' for values to return
antialiased: bool
Antialias the image (see mlab.screenshot() for details)
row : int
row index of the brain to use
col : int
column index of the brain to use
Returns
-------
screenshot: array
Image pixel values
Notes
-----
Due to limitations in TraitsUI, if multiple views or hemi='split'
is used, there is no guarantee painting of the windows will
complete before control is returned to the command line. Thus
we strongly recommend using only one figure window (which uses
a Mayavi figure to plot instead of TraitsUI) if you intend to
script plotting commands.
"""
brain = self.brain_matrix[row, col]
return mlab.screenshot(brain._f, mode, antialiased)
def save_imageset(self, prefix, views, filetype='png', colorbar='auto',
row=-1, col=-1):
"""Convenience wrapper for save_image
Files created are prefix+'_$view'+filetype
Parameters
----------
prefix: string | None
filename prefix for image to be created. If None, a list of
arrays representing images is returned (not saved to disk).
views: list
desired views for images
filetype: string
image type
colorbar: 'auto' | int | list of int | None
For 'auto', the colorbar is shown in the middle view (default).
For int or list of int, the colorbar is shown in the specified
views. For ``None``, no colorbar is shown.
row : int
row index of the brain to use
col : int
column index of the brain to use
Returns
-------
images_written: list
all filenames written
"""
if isinstance(views, string_types):
raise ValueError("Views must be a non-string sequence"
"Use show_view & save_image for a single view")
if colorbar == 'auto':
colorbar = [len(views) // 2]
elif isinstance(colorbar, int):
colorbar = [colorbar]
images_written = []
for iview, view in enumerate(views):
try:
if colorbar is not None and iview in colorbar:
self.show_colorbar(row, col)
else:
self.hide_colorbar(row, col)
self.show_view(view, row=row, col=col)
if prefix is not None:
fname = "%s_%s.%s" % (prefix, view, filetype)
images_written.append(fname)
self.save_single_image(fname, row, col)
else:
images_written.append(self.screenshot_single(row=row,
col=col))
except ValueError:
print("Skipping %s: not in view dict" % view)
return images_written
def save_image_sequence(self, time_idx, fname_pattern, use_abs_idx=True,
row=-1, col=-1, montage='single', border_size=15,
colorbar='auto', interpolation='quadratic'):
"""Save a temporal image sequence
The files saved are named "fname_pattern % (pos)" where "pos" is a
relative or absolute index (controlled by "use_abs_idx")
Parameters
----------
time_idx : array-like
Time indices to save. Non-integer values will be displayed using
interpolation between samples.
fname_pattern : str
Filename pattern, e.g. 'movie-frame_%0.4d.png'.
use_abs_idx : boolean
If True the indices given by "time_idx" are used in the filename
if False the index in the filename starts at zero and is
incremented by one for each image (Default: True).
row : int
Row index of the brain to use.
col : int
Column index of the brain to use.
montage: 'current' | 'single' | list
Views to include in the images: 'current' uses the currently
displayed image; 'single' (default) uses a single view, specified
by the ``row`` and ``col`` parameters; a 1 or 2 dimensional list
can be used to specify a complete montage. Examples:
``['lat', 'med']`` lateral and ventral views ordered horizontally;
``[['fro'], ['ven']]`` frontal and ventral views ordered
vertically.
border_size: int
Size of image border (more or less space between images).
colorbar: 'auto' | int | list of int | None
For 'auto', the colorbar is shown in the middle view (default).
For int or list of int, the colorbar is shown in the specified
views. For ``None``, no colorbar is shown.
interpolation : str
Interpolation method (``scipy.interpolate.interp1d`` parameter,
one of 'linear' | 'nearest' | 'zero' | 'slinear' | 'quadratic' |
'cubic', default 'quadratic'). Interpolation is only used for
non-integer indexes.
Returns
-------
images_written: list
all filenames written
"""
current_time_idx = self.data_time_index
images_written = list()
rel_pos = 0
for idx in time_idx:
self.set_data_time_index(idx, interpolation)
fname = fname_pattern % (idx if use_abs_idx else rel_pos)
if montage == 'single':
self.save_single_image(fname, row, col)
elif montage == 'current':
self.save_image(fname)
else:
self.save_montage(fname, montage, 'h', border_size, colorbar,
row, col)
images_written.append(fname)
rel_pos += 1
# Restore original time index
self.set_data_time_index(current_time_idx)
return images_written
def save_montage(self, filename, order=['lat', 'ven', 'med'],
orientation='h', border_size=15, colorbar='auto',
row=-1, col=-1):
"""Create a montage from a given order of images
Parameters
----------
filename: string | None
path to final image. If None, the image will not be saved.
order: list
list of views: order of views to build montage (default ['lat',
'ven', 'med']; nested list of views to specify views in a
2-dimensional grid (e.g, [['lat', 'ven'], ['med', 'fro']])
orientation: {'h' | 'v'}
montage image orientation (horizontal of vertical alignment; only
applies if ``order`` is a flat list)
border_size: int
Size of image border (more or less space between images)
colorbar: 'auto' | int | list of int | None
For 'auto', the colorbar is shown in the middle view (default).
For int or list of int, the colorbar is shown in the specified
views. For ``None``, no colorbar is shown.
row : int
row index of the brain to use
col : int
column index of the brain to use
Returns
-------
out : array
The montage image, useable with matplotlib.imshow().
"""
# find flat list of views and nested list of view indexes
assert orientation in ['h', 'v']
if isinstance(order, (str, dict)):
views = [order]
elif all(isinstance(x, (str, dict)) for x in order):
views = order
else:
views = []
orientation = []
for row_order in order:
if isinstance(row_order, (str, dict)):
orientation.append([len(views)])
views.append(row_order)
else:
orientation.append([])
for view in row_order:
orientation[-1].append(len(views))
views.append(view)
if colorbar == 'auto':
colorbar = [len(views) // 2]
elif isinstance(colorbar, int):
colorbar = [colorbar]
brain = self.brain_matrix[row, col]
# store current view + colorbar visibility
current_view = mlab.view(figure=brain._f)
colorbars = self._get_colorbars(row, col)
colorbars_visibility = dict()
for cb in colorbars:
if cb is not None:
colorbars_visibility[cb] = cb.visible
images = self.save_imageset(None, views, colorbar=colorbar, row=row,
col=col)
out = make_montage(filename, images, orientation, colorbar,
border_size)
# get back original view and colorbars
mlab.view(*current_view, figure=brain._f)
for cb in colorbars:
if cb is not None:
cb.visible = colorbars_visibility[cb]
return out
def save_movie(self, fname, time_dilation=4., tmin=None, tmax=None,
framerate=24, interpolation='quadratic', codec='mpeg4',
bitrate='1M'):
"""Save a movie (for data with a time axis)
.. Warning::
This method assumes that time is specified in seconds when adding
data. If time is specified in milliseconds this will result in
movies 1000 times longer than expected.
Parameters
----------
fname : str
Path at which to save the movie.
time_dilation : float
Factor by which to stretch time (default 4). For example, an epoch
from -100 to 600 ms lasts 700 ms. With ``time_dilation=4`` this
would result in a 2.8 s long movie.
tmin : float
First time point to include (default: all data).
tmax : float
Last time point to include (default: all data).
framerate : float
Framerate of the movie (frames per second, default 24).
interpolation : str
Interpolation method (``scipy.interpolate.interp1d`` parameter,
one of 'linear' | 'nearest' | 'zero' | 'slinear' | 'quadratic' |
'cubic', default 'quadratic').
codec : str
Codec to use with ffmpeg (default 'mpeg4').
bitrate : str | float
Bitrate to use to encode movie. Can be specified as number (e.g.
64000) or string (e.g. '64k'). Default value is 1M
Notes
-----
This method requires FFmpeg to be installed in the system PATH. FFmpeg
is free and can be obtained from `here
<http://ffmpeg.org/download.html>`_.
"""
assert_ffmpeg_is_available()
if tmin is None:
tmin = self._times[0]
elif tmin < self._times[0]:
raise ValueError("tmin=%r is smaller than the first time point "
"(%r)" % (tmin, self._times[0]))
# find indexes at which to create frames
if tmax is None:
tmax = self._times[-1]
elif tmax > self._times[-1]:
raise ValueError("tmax=%r is greater than the latest time point "
"(%r)" % (tmax, self._times[-1]))
n_frames = floor((tmax - tmin) * time_dilation * framerate)
times = np.arange(n_frames)
times /= framerate * time_dilation
times += tmin
interp_func = interp1d(self._times, np.arange(self.n_times))
time_idx = interp_func(times)
n_times = len(time_idx)
if n_times == 0:
raise ValueError("No time points selected")
logger.debug("Save movie for time points/samples\n%s\n%s"
% (times, time_idx))
tempdir = mkdtemp()
frame_pattern = 'frame%%0%id.png' % (np.floor(np.log10(n_times)) + 1)
fname_pattern = os.path.join(tempdir, frame_pattern)
self.save_image_sequence(time_idx, fname_pattern, False, -1, -1,
'current', interpolation=interpolation)
ffmpeg(fname, fname_pattern, framerate, codec=codec, bitrate=bitrate)
def animate(self, views, n_steps=180., fname=None, use_cache=False,
row=-1, col=-1):
"""Animate a rotation.
Currently only rotations through the axial plane are allowed.
Parameters
----------
views: sequence
views to animate through
n_steps: float
number of steps to take in between
fname: string
If not None, it saves the animation as a movie.
fname should end in '.avi' as only the AVI format is supported
use_cache: bool
Use previously generated images in ./.tmp/
row : int
Row index of the brain to use
col : int
Column index of the brain to use
"""
brain = self.brain_matrix[row, col]
gviews = map(brain._xfm_view, views)
allowed = ('lateral', 'caudal', 'medial', 'rostral')
if not len([v for v in gviews if v in allowed]) == len(gviews):
raise ValueError('Animate through %s views.' % ' '.join(allowed))
if fname is not None:
if not fname.endswith('.avi'):
raise ValueError('Can only output to AVI currently.')
tmp_dir = './.tmp'
tmp_fname = pjoin(tmp_dir, '%05d.png')
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
for i, beg in enumerate(gviews):
try:
end = gviews[i + 1]
dv, dr = brain._min_diff(beg, end)
dv /= np.array((n_steps))
dr /= np.array((n_steps))
brain.show_view(beg)
for i in range(int(n_steps)):
brain._f.scene.camera.orthogonalize_view_up()
brain._f.scene.camera.azimuth(dv[0])
brain._f.scene.camera.elevation(dv[1])
brain._f.scene.renderer.reset_camera_clipping_range()
_force_render([[brain._f]], self._window_backend)
if fname is not None:
if not (os.path.isfile(tmp_fname % i) and use_cache):
self.save_single_image(tmp_fname % i, row, col)
except IndexError:
pass
if fname is not None:
fps = 10
# we'll probably want some config options here
enc_cmd = " ".join(["mencoder",
"-ovc lavc",
"-mf fps=%d" % fps,
"mf://%s" % tmp_fname,
"-of avi",
"-lavcopts vcodec=mjpeg",
"-ofps %d" % fps,
"-noskip",
"-o %s" % fname])
ret = os.system(enc_cmd)
if ret:
print("\n\nError occured when exporting movie\n\n")
class _Hemisphere(object):
"""Object for visualizing one hemisphere with mlab"""
def __init__(self, subject_id, hemi, surf, figure, geo, curv, title,
cortex, subjects_dir, bg_color, offset, backend):
if hemi not in ['lh', 'rh']:
raise ValueError('hemi must be either "lh" or "rh"')
# Set the identifying info
self.subject_id = subject_id
self.hemi = hemi
self.subjects_dir = subjects_dir
self.viewdict = viewdicts[hemi]
self.surf = surf
self._f = figure
self._bg_color = bg_color
self._backend = backend
# mlab pipeline mesh and surface for geomtery
self._geo = geo
if curv:
curv_data = self._geo.bin_curv
meshargs = dict(scalars=curv_data)
colormap, vmin, vmax, reverse = self._get_geo_colors(cortex)
kwargs = dict(colormap=colormap, vmin=vmin, vmax=vmax)
else:
curv_data = None
meshargs = dict()
kwargs = dict(color=(.5, .5, .5))
meshargs['figure'] = self._f
x, y, z, f = self._geo.x, self._geo.y, self._geo.z, self._geo.faces
self._geo_mesh = mlab.pipeline.triangular_mesh_source(x, y, z, f,
**meshargs)
# add surface normals
self._geo_mesh.data.point_data.normals = self._geo.nn
self._geo_mesh.data.cell_data.normals = None
self._geo_surf = mlab.pipeline.surface(self._geo_mesh,
figure=self._f, reset_zoom=True,
**kwargs)
if curv and reverse:
curv_bar = mlab.scalarbar(self._geo_surf)
curv_bar.reverse_lut = True
curv_bar.visible = False
def show_view(self, view=None, roll=None, distance=None):
"""Orient camera to display view"""
if isinstance(view, string_types):
try:
vd = self._xfm_view(view, 'd')
view = dict(azimuth=vd['v'][0], elevation=vd['v'][1])
roll = vd['r']
except ValueError as v:
print(v)
raise
_force_render(self._f, self._backend)
if view is not None:
view['reset_roll'] = True
view['figure'] = self._f
view['distance'] = distance
# DO NOT set focal point, can screw up non-centered brains
# view['focalpoint'] = (0.0, 0.0, 0.0)
mlab.view(**view)
if roll is not None:
mlab.roll(roll=roll, figure=self._f)
_force_render(self._f, self._backend)
view = mlab.view(figure=self._f)
roll = mlab.roll(figure=self._f)
return view, roll
def _xfm_view(self, view, out='s'):
"""Normalize a given string to available view
Parameters
----------
view: string
view which may match leading substring of available views
Returns
-------
good: string
matching view string
out: {'s' | 'd'}
's' to return string, 'd' to return dict
"""
if view not in self.viewdict:
good_view = [k for k in self.viewdict if view == k[:len(view)]]
if len(good_view) == 0:
raise ValueError('No views exist with this substring')
if len(good_view) > 1:
raise ValueError("Multiple views exist with this substring."
"Try a longer substring")
view = good_view[0]
if out == 'd':
return self.viewdict[view]
else:
return view
def _min_diff(self, beg, end):
"""Determine minimum "camera distance" between two views.
Parameters
----------
beg: string
origin anatomical view
end: string
destination anatomical view
Returns
-------
diffs: tuple
(min view "distance", min roll "distance")
"""
beg = self._xfm_view(beg)
end = self._xfm_view(end)
if beg == end:
dv = [360., 0.]
dr = 0
else:
end_d = self._xfm_view(end, 'd')
beg_d = self._xfm_view(beg, 'd')
dv = []
for b, e in zip(beg_d['v'], end_d['v']):
diff = e - b
# to minimize the rotation we need -180 <= diff <= 180
if diff > 180:
dv.append(diff - 360)
elif diff < -180:
dv.append(diff + 360)
else:
dv.append(diff)
dr = np.array(end_d['r']) - np.array(beg_d['r'])
return (np.array(dv), dr)
def add_overlay(self, old):
"""Add an overlay to the overlay dict from a file or array"""
surf = OverlayDisplay(old, figure=self._f)
for bar in ["pos_bar", "neg_bar"]:
try:
self._format_cbar_text(getattr(surf, bar))
except AttributeError:
pass
return surf
@verbose
def add_data(self, array, mlab_plot, vertices, smooth_mat, min, max,
thresh, lut, colormap, alpha, time, time_label, colorbar):
"""Add data to the brain"""
# Calculate initial data to plot
if array.ndim == 1:
array_plot = array
elif array.ndim == 2:
array_plot = array[:, 0]
else:
raise ValueError("data has to be 1D or 2D")
# Set up the visualization pipeline
mesh = mlab.pipeline.triangular_mesh_source(self._geo.x,
self._geo.y,
self._geo.z,
self._geo.faces,
scalars=mlab_plot,
figure=self._f)
mesh.data.point_data.normals = self._geo.nn
mesh.data.cell_data.normals = None
if thresh is not None:
if array_plot.min() >= thresh:
warn("Data min is greater than threshold.")
else:
mesh = mlab.pipeline.threshold(mesh, low=thresh)
surf = mlab.pipeline.surface(mesh, colormap=colormap,
vmin=min, vmax=max,
opacity=float(alpha), figure=self._f)
# apply look up table if given
if lut is not None:
surf.module_manager.scalar_lut_manager.lut.table = lut
# Get the original colormap table
orig_ctable = \
surf.module_manager.scalar_lut_manager.lut.table.to_array().copy()
# Get the colorbar
if colorbar:
bar = mlab.scalarbar(surf)
self._format_cbar_text(bar)
bar.scalar_bar_representation.position2 = .8, 0.09
else:
bar = None
return surf, orig_ctable, bar
def add_annotation(self, annot, ids, cmap):
"""Add an annotation file"""
# Create an mlab surface to visualize the annot
mesh = mlab.pipeline.triangular_mesh_source(self._geo.x,
self._geo.y,
self._geo.z,
self._geo.faces,
scalars=ids,
figure=self._f)
mesh.data.point_data.normals = self._geo.nn
mesh.data.cell_data.normals = None
surf = mlab.pipeline.surface(mesh, name=annot, figure=self._f)
# Set the color table
surf.module_manager.scalar_lut_manager.lut.table = cmap
# Set the brain attributes
annot = dict(surface=surf, name=annot, colormap=cmap)
return annot
def add_label(self, label, label_name, color, alpha):
"""Add an ROI label to the image"""
mesh = mlab.pipeline.triangular_mesh_source(self._geo.x,
self._geo.y,
self._geo.z,
self._geo.faces,
scalars=label,
figure=self._f)
mesh.data.point_data.normals = self._geo.nn
mesh.data.cell_data.normals = None
surf = mlab.pipeline.surface(mesh, name=label_name, figure=self._f)
color = colorConverter.to_rgba(color, alpha)
cmap = np.array([(0, 0, 0, 0,), color]) * 255
surf.module_manager.scalar_lut_manager.lut.table = cmap
return surf
def add_morphometry(self, morph_data, colormap, measure,
min, max, colorbar):
"""Add a morphometry overlay to the image"""
mesh = mlab.pipeline.triangular_mesh_source(self._geo.x,
self._geo.y,
self._geo.z,
self._geo.faces,
scalars=morph_data,
figure=self._f)
mesh.data.point_data.normals = self._geo.nn
mesh.data.cell_data.normals = None
surf = mlab.pipeline.surface(mesh, colormap=colormap,
vmin=min, vmax=max,
name=measure, figure=self._f)
# Get the colorbar
if colorbar:
bar = mlab.scalarbar(surf)
self._format_cbar_text(bar)
bar.scalar_bar_representation.position2 = .8, 0.09
else:
bar = None
# Fil in the morphometry dict
return dict(surface=surf, colorbar=bar, measure=measure)
def add_foci(self, foci_coords, scale_factor, color, alpha, name):
"""Add spherical foci, possibly mapping to displayed surf"""
# Create the visualization
points = mlab.points3d(foci_coords[:, 0],
foci_coords[:, 1],
foci_coords[:, 2],
np.ones(foci_coords.shape[0]),
scale_factor=(10. * scale_factor),
color=color, opacity=alpha, name=name,
figure=self._f)
return points
def add_contour_overlay(self, scalar_data, min=None, max=None,
n_contours=7, line_width=1.5, lut=None,
colorbar=True):
"""Add a topographic contour overlay of the positive data"""
# Set up the pipeline
mesh = mlab.pipeline.triangular_mesh_source(self._geo.x, self._geo.y,
self._geo.z,
self._geo.faces,
scalars=scalar_data,
figure=self._f)
mesh.data.point_data.normals = self._geo.nn
mesh.data.cell_data.normals = None
thresh = mlab.pipeline.threshold(mesh, low=min)
surf = mlab.pipeline.contour_surface(thresh, contours=n_contours,
line_width=line_width)
if lut is not None:
surf.module_manager.scalar_lut_manager.lut.table = lut
# Set the colorbar and range correctly
bar = mlab.scalarbar(surf,
nb_colors=n_contours,
nb_labels=n_contours + 1)
bar.data_range = min, max
self._format_cbar_text(bar)
bar.scalar_bar_representation.position2 = .8, 0.09
if not colorbar:
bar.visible = False
# Set up a dict attribute with pointers at important things
return dict(surface=surf, colorbar=bar)
def add_text(self, x, y, text, name, color=None, opacity=1.0):
""" Add a text to the visualization"""
return mlab.text(x, y, text, name=name, color=color,
opacity=opacity, figure=self._f)
def _orient_lights(self):
"""Set lights to come from same direction relative to brain."""
if self.hemi == "rh":
if self._f.scene is not None and \
self._f.scene.light_manager is not None:
for light in self._f.scene.light_manager.lights:
light.azimuth *= -1
def _get_geo_colors(self, cortex):
"""Return an mlab colormap name, vmin, and vmax for binary curvature.
Parameters
----------
cortex : {classic, high_contrast, low_contrast, bone, tuple}
The name of one of the preset cortex styles, or a tuple
with four entries as described in the return vales.
Returns
-------
colormap : string
mlab colormap name
vmin : float
curv colormap minimum
vmax : float
curv colormap maximum
reverse : boolean
boolean indicating whether the colormap should be reversed
"""
colormap_map = dict(classic=("Greys", -1, 2, False),
high_contrast=("Greys", -.1, 1.3, False),
low_contrast=("Greys", -5, 5, False),
bone=("bone", -.2, 2, True))
if cortex in colormap_map:
color_data = colormap_map[cortex]
elif cortex in lut_manager.lut_mode_list():
color_data = cortex, -1, 2, False
else:
color_data = cortex
return color_data
def _format_cbar_text(self, cbar):
bg_color = self._bg_color
if bg_color is None or sum(bg_color) < 2:
text_color = (1., 1., 1.)
else:
text_color = (0., 0., 0.)
cbar.label_text_property.color = text_color
class OverlayData(object):
"""Encapsulation of statistical neuroimaging overlay viz data"""
def __init__(self, scalar_data, geo, min, max, sign):
if scalar_data.min() >= 0:
sign = "pos"
elif scalar_data.max() <= 0:
sign = "neg"
self.geo = geo
if sign in ["abs", "pos"]:
# Figure out the correct threshold to avoid TraitErrors
# This seems like not the cleanest way to do this
pos_max = np.max((0.0, np.max(scalar_data)))
if pos_max < min:
thresh_low = pos_max
else:
thresh_low = min
self.pos_lims = [thresh_low, min, max]
else:
self.pos_lims = None
if sign in ["abs", "neg"]:
# Figure out the correct threshold to avoid TraitErrors
# This seems even less clean due to negative convolutedness
neg_min = np.min((0.0, np.min(scalar_data)))
if neg_min > -min:
thresh_up = neg_min
else:
thresh_up = -min
self.neg_lims = [thresh_up, -max, -min]
else:
self.neg_lims = None
# Byte swap copy; due to mayavi bug
self.mlab_data = _prepare_data(scalar_data)
class OverlayDisplay():
"""Encapsulation of overlay viz plotting"""
def __init__(self, ol, figure):
args = [ol.geo.x, ol.geo.y, ol.geo.z, ol.geo.faces]
kwargs = dict(scalars=ol.mlab_data, figure=figure)
if ol.pos_lims is not None:
pos_mesh = mlab.pipeline.triangular_mesh_source(*args, **kwargs)
pos_mesh.data.point_data.normals = ol.geo.nn
pos_mesh.data.cell_data.normals = None
pos_thresh = mlab.pipeline.threshold(pos_mesh, low=ol.pos_lims[0])
self.pos = mlab.pipeline.surface(pos_thresh, colormap="YlOrRd",
vmin=ol.pos_lims[1],
vmax=ol.pos_lims[2],
figure=figure)
self.pos_bar = mlab.scalarbar(self.pos, nb_labels=5)
self.pos_bar.reverse_lut = True
else:
self.pos = None
if ol.neg_lims is not None:
neg_mesh = mlab.pipeline.triangular_mesh_source(*args, **kwargs)
neg_mesh.data.point_data.normals = ol.geo.nn
neg_mesh.data.cell_data.normals = None
neg_thresh = mlab.pipeline.threshold(neg_mesh,
up=ol.neg_lims[0])
self.neg = mlab.pipeline.surface(neg_thresh, colormap="PuBu",
vmin=ol.neg_lims[1],
vmax=ol.neg_lims[2],
figure=figure)
self.neg_bar = mlab.scalarbar(self.neg, nb_labels=5)
else:
self.neg = None
self._format_colorbar()
def remove(self):
if self.pos is not None:
self.pos.remove()
self.pos_bar.visible = False
if self.neg is not None:
self.neg.remove()
self.neg_bar.visible = False
def _format_colorbar(self):
if self.pos is not None:
self.pos_bar.scalar_bar_representation.position = (0.53, 0.01)
self.pos_bar.scalar_bar_representation.position2 = (0.42, 0.09)
if self.neg is not None:
self.neg_bar.scalar_bar_representation.position = (0.05, 0.01)
self.neg_bar.scalar_bar_representation.position2 = (0.42, 0.09)
class TimeViewer(HasTraits):
"""TimeViewer object providing a GUI for visualizing time series
Useful for visualizing M/EEG inverse solutions on Brain object(s).
Parameters
----------
brain : Brain (or list of Brain)
brain(s) to control
"""
# Nested import of traisui for setup.py without X server
from traitsui.api import (View, Item, VSplit, HSplit, Group)
min_time = Int(0)
max_time = Int(1E9)
current_time = Range(low="min_time", high="max_time", value=0)
# colormap: only update when user presses Enter
fmax = Float(enter_set=True, auto_set=False)
fmid = Float(enter_set=True, auto_set=False)
fmin = Float(enter_set=True, auto_set=False)
transparent = Bool(True)
smoothing_steps = Int(20, enter_set=True, auto_set=False,
desc="number of smoothing steps. Use -1 for"
"automatic number of steps")
orientation = Enum("lateral", "medial", "rostral", "caudal",
"dorsal", "ventral", "frontal", "parietal")
# GUI layout
view = View(VSplit(Item(name="current_time"),
Group(HSplit(Item(name="fmin"),
Item(name="fmid"),
Item(name="fmax"),
Item(name="transparent")
),
label="Color scale",
show_border=True),
Item(name="smoothing_steps"),
Item(name="orientation")
)
)
def __init__(self, brain):
super(TimeViewer, self).__init__()
if isinstance(brain, (list, tuple)):
self.brains = brain
else:
self.brains = [brain]
# Initialize GUI with values from first brain
props = self.brains[0].get_data_properties()
self._disable_updates = True
self.max_time = len(props["time"]) - 1
self.current_time = props["time_idx"]
self.fmin = props["fmin"]
self.fmid = props["fmid"]
self.fmax = props["fmax"]
self.transparent = props["transparent"]
if props["smoothing_steps"] is None:
self.smoothing_steps = -1
else:
self.smoothing_steps = props["smoothing_steps"]
self._disable_updates = False
# Make sure all brains have the same time points
for brain in self.brains[1:]:
this_props = brain.get_data_properties()
if not np.all(props["time"] == this_props["time"]):
raise ValueError("all brains must have the same time"
"points")
# Show GUI
self.configure_traits()
@on_trait_change("smoothing_steps")
def set_smoothing_steps(self):
""" Change number of smooting steps
"""
if self._disable_updates:
return
smoothing_steps = self.smoothing_steps
if smoothing_steps < 0:
smoothing_steps = None
for brain in self.brains:
brain.set_data_smoothing_steps(self.smoothing_steps)
@on_trait_change("orientation")
def set_orientation(self):
""" Set the orientation
"""
if self._disable_updates:
return
for brain in self.brains:
brain.show_view(view=self.orientation)
@on_trait_change("current_time")
def set_time_point(self):
""" Set the time point shown
"""
if self._disable_updates:
return
for brain in self.brains:
brain.set_data_time_index(self.current_time)
@on_trait_change("fmin, fmid, fmax, transparent")
def scale_colormap(self):
""" Scale the colormap
"""
if self._disable_updates:
return
for brain in self.brains:
brain.scale_data_colormap(self.fmin, self.fmid, self.fmax,
self.transparent)
|
bsd-3-clause
|
nesterione/scikit-learn
|
sklearn/datasets/mlcomp.py
|
289
|
3855
|
# Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
|
bsd-3-clause
|
barentsen/dave
|
plot/compare_lightcurves.py
|
1
|
4011
|
"""Compare multiple lightcurves by plotting them one below the other.
Example use
===========
from compare_lightcurves import plot_multiple_lightcurves
fig = plot_multiple_lightcurves(time=time,
fluxes=(flux1, flux2, flux3),
labels=('Smith', 'Jones', 'Williams'),
title='Comparison of lightcurves')
fig.savefig('lightcurves.png')
pl.close(fig)
"""
import matplotlib.pyplot as pl
import numpy as np
def plot_multiple_lightcurves(time, fluxes=(), labels=None,
offset_sigma=7., title='', markersize=2,
epoch=None, period=None):
"""Returns a figure showing multiple lightcurves separated by an offset.
Parameters
----------
time : 1D array of size N.
Times (i.e. x axis).
fluxes: 1D array of size N, or sequence of 1D arrays of size N.
Fluxes (i.e. y axis).
labels : str, or sequence of str
Labels corresponding to the fluxes.
offset_sigma: float
The lightcurves will be shown with an offset of
offset_sigma times the overall standard deviation.
title : str
Title to show at the top of the figure.
markersize : float
Size of the symbols in the plot.
epoch : float
If `epoch` and `period` are set, the lightcurves will be folded.
period : float
If `epoch` and `period` are set, the lightcurves will be folded.
"""
fig, ax = pl.subplots()
# Input validation
if not isinstance(fluxes, tuple):
fluxes = [fluxes]
if labels is None or len(labels) != len(fluxes):
labels = [None] * len(fluxes)
# Normalize the lightcurves
normalized_fluxes = []
for f in fluxes:
normalized_fluxes.append(f / np.median(f))
# Did the user request a folded plot?
if epoch is not None and period is not None:
time = np.fmod(time - epoch + .25*period, period)
# How big should the spacing be between lightcurves?
sampling_size = 20 # speedup
std = np.std(np.array(normalized_fluxes)[::sampling_size].flatten())
margin = offset_sigma * std
# Plot all the lightcurves
for idx in range(len(normalized_fluxes)):
normalized = normalized_fluxes[idx] + idx*margin
ax.plot(time, normalized,
label=labels[idx],
marker='o',
markersize=markersize,
linestyle='None')
# Aesthetics
ax.legend(bbox_to_anchor=(0., 0.99, 1., 0),
loc=9,
ncol=len(fluxes),
borderaxespad=0.,
handlelength=0.8,
frameon=True)
ax.set_title(title)
ax.set_xlim(np.min(time), np.max(time))
ax.set_ylim([1 - margin, 1 + len(fluxes)*margin])
return fig
def save_multiple_lightcurves_plot(output_fn='plot.png', **kwargs):
fig = plot_multiple_lightcurves(**kwargs)
fig.savefig(output_fn)
pl.close(fig)
# Demo
if __name__ == '__main__':
size = 3840
time = np.arange(0, size)
flux1 = np.ones(size) + np.random.normal(0, scale=0.1, size=size)
flux2 = np.ones(size) + np.random.normal(0, scale=0.1, size=size)
flux3 = np.ones(size) + np.random.normal(0, scale=0.1, size=size)
fig = plot_multiple_lightcurves(time=time,
fluxes=(flux1, flux2, flux3),
labels=('Smith', 'Jones', 'Williams'),
title='Comparison of lightcurves')
fig.savefig('lightcurves.png')
pl.close(fig)
fig = plot_multiple_lightcurves(time=time,
epoch=20.,
period=100.,
fluxes=(flux1, flux2, flux3),
labels=('Smith', 'Jones', 'Williams'),
title='Comparison of lightcurves')
fig.savefig('lightcurves-folded.png')
pl.close(fig)
|
mit
|
xzh86/scikit-learn
|
examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
|
252
|
3490
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
bsd-3-clause
|
selective-inference/selective-inference
|
doc/learning_examples/riboflavin/CV.py
|
3
|
10083
|
import functools, hashlib
import numpy as np
from scipy.stats import norm as normal_dbn
import regreg.api as rr
from selection.algorithms.debiased_lasso import pseudoinverse_debiasing_matrix
# load in the X matrix
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
rpy.r('library(hdi); data(riboflavin); X = riboflavin$x')
numpy2ri.activate()
X_full = np.asarray(rpy.r('X'))
numpy2ri.deactivate()
from selection.learning.utils import full_model_inference, liu_inference, pivot_plot
from selection.learning.core import split_sampler, keras_fit, repeat_selection, infer_set_target
from selection.learning.Rutils import lasso_glmnet, cv_glmnet_lam
from selection.learning.learners import mixture_learner
def highdim_model_inference(X,
y,
truth,
selection_algorithm,
sampler,
lam_min,
dispersion,
success_params=(1, 1),
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'},
alpha=0.1,
B=2000,
naive=True,
learner_klass=mixture_learner,
how_many=None):
n, p = X.shape
XTX = X.T.dot(X)
instance_hash = hashlib.md5()
instance_hash.update(X.tobytes())
instance_hash.update(y.tobytes())
instance_hash.update(truth.tobytes())
instance_id = instance_hash.hexdigest()
# run selection algorithm
observed_set = repeat_selection(selection_algorithm, sampler, *success_params)
observed_list = sorted(observed_set)
# observed debiased LASSO estimate
loss = rr.squared_error(X, y)
pen = rr.l1norm(p, lagrange=lam_min)
problem = rr.simple_problem(loss, pen)
soln = problem.solve()
grad = X.T.dot(X.dot(soln) - y) # gradient at beta_hat
M = pseudoinverse_debiasing_matrix(X,
observed_list)
observed_target = soln[observed_list] - M.dot(grad)
tmp = X.dot(M.T)
target_cov = tmp.T.dot(tmp) * dispersion
cross_cov = np.identity(p)[:,observed_list] * dispersion
if len(observed_list) > 0:
if how_many is None:
how_many = len(observed_list)
observed_list = observed_list[:how_many]
# find the target, based on the observed outcome
(pivots,
covered,
lengths,
pvalues,
lower,
upper) = [], [], [], [], [], []
targets = []
true_target = truth[observed_list]
results = infer_set_target(selection_algorithm,
observed_set,
observed_list,
sampler,
observed_target,
target_cov,
cross_cov,
hypothesis=true_target,
fit_probability=fit_probability,
fit_args=fit_args,
success_params=success_params,
alpha=alpha,
B=B,
learner_klass=learner_klass)
for i, result in enumerate(results):
(pivot,
interval,
pvalue,
_) = result
pvalues.append(pvalue)
pivots.append(pivot)
covered.append((interval[0] < true_target[i]) * (interval[1] > true_target[i]))
lengths.append(interval[1] - interval[0])
lower.append(interval[0])
upper.append(interval[1])
if len(pvalues) > 0:
df = pd.DataFrame({'pivot':pivots,
'pvalue':pvalues,
'coverage':covered,
'length':lengths,
'upper':upper,
'lower':lower,
'id':[instance_id]*len(pvalues),
'target':true_target,
'variable':observed_list,
'B':[B]*len(pvalues)})
if naive:
(naive_pvalues,
naive_pivots,
naive_covered,
naive_lengths,
naive_upper,
naive_lower) = [], [], [], [], [], []
for j, idx in enumerate(observed_list):
true_target = truth[idx]
target_sd = np.sqrt(target_cov[j, j])
observed_target_j = observed_target[j]
quantile = normal_dbn.ppf(1 - 0.5 * alpha)
naive_interval = (observed_target_j - quantile * target_sd,
observed_target_j + quantile * target_sd)
naive_upper.append(naive_interval[1])
naive_lower.append(naive_interval[0])
naive_pivot = (1 - normal_dbn.cdf((observed_target_j - true_target) / target_sd))
naive_pivot = 2 * min(naive_pivot, 1 - naive_pivot)
naive_pivots.append(naive_pivot)
naive_pvalue = (1 - normal_dbn.cdf(observed_target_j / target_sd))
naive_pvalue = 2 * min(naive_pvalue, 1 - naive_pvalue)
naive_pvalues.append(naive_pvalue)
naive_covered.append((naive_interval[0] < true_target) * (naive_interval[1] > true_target))
naive_lengths.append(naive_interval[1] - naive_interval[0])
naive_df = pd.DataFrame({'naive_pivot':naive_pivots,
'naive_pvalue':naive_pvalues,
'naive_coverage':naive_covered,
'naive_length':naive_lengths,
'naive_upper':naive_upper,
'naive_lower':naive_lower,
'variable':observed_list,
})
df = pd.merge(df, naive_df, on='variable')
return df
boot_design = False
def simulate(s=10, signal=(0.5, 1), sigma=2, alpha=0.1, B=3000, seed=0):
# description of statistical problem
n, p = X_full.shape
if boot_design:
idx = np.random.choice(np.arange(n), n, replace=True)
X = X_full[idx] # bootstrap X to make it really an IID sample, i.e. don't condition on X throughout
X += 0.1 * np.std(X) * np.random.standard_normal(X.shape) # to make non-degenerate
else:
X = X_full.copy()
X = X - np.mean(X, 0)[None, :]
X = X / np.std(X, 0)[None, :]
truth = np.zeros(p)
truth[:s] = np.linspace(signal[0], signal[1], s)
np.random.shuffle(truth)
truth *= sigma / np.sqrt(n)
y = X.dot(truth) + sigma * np.random.standard_normal(n)
lam_min, lam_1se = cv_glmnet_lam(X.copy(), y.copy(), seed=seed)
lam_min, lam_1se = n * lam_min, n * lam_1se
XTX = X.T.dot(X)
XTXi = np.linalg.inv(XTX)
resid = y - X.dot(XTXi.dot(X.T.dot(y)))
dispersion = sigma**2
S = X.T.dot(y)
covS = dispersion * X.T.dot(X)
splitting_sampler = split_sampler(X * y[:, None], covS)
def meta_algorithm(X, XTXi, resid, sampler):
S = sampler.center.copy()
ynew = X.dot(XTXi).dot(S) + resid # will be ok for n>p and non-degen X
G = lasso_glmnet(X, ynew, *[None]*4)
select = G.select(seed=seed)
return set(list(select[0]))
selection_algorithm = functools.partial(meta_algorithm, X, XTXi, resid)
# run selection algorithm
df = highdim_model_inference(X,
y,
truth,
selection_algorithm,
splitting_sampler,
lam_min,
sigma**2, # dispersion assumed known for now
success_params=(1, 1),
B=B,
fit_probability=keras_fit,
fit_args={'epochs':10, 'sizes':[100]*5, 'dropout':0., 'activation':'relu'})
if df is not None:
liu_df = liu_inference(X,
y,
1.00001 * lam_min,
dispersion,
truth,
alpha=alpha,
approximate_inverse='BN')
return pd.merge(df, liu_df, on='variable')
if __name__ == "__main__":
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
U = np.linspace(0, 1, 101)
plt.clf()
init_seed = np.fabs(np.random.standard_normal() * 500)
for i in range(500):
df = simulate(seed=init_seed+i)
csvfile = 'riboflavin_CV.csv'
outbase = csvfile[:-4]
if df is not None and i > 0:
try:
df = pd.concat([df, pd.read_csv(csvfile)])
except FileNotFoundError:
pass
df.to_csv(csvfile, index=False)
if len(df['pivot']) > 0:
pivot_ax, lengths_ax = pivot_plot(df, outbase)
liu_pivot = df['liu_pivot']
liu_pivot = liu_pivot[~np.isnan(liu_pivot)]
pivot_ax.plot(U, sm.distributions.ECDF(liu_pivot)(U), 'gray', label='Liu CV',
linewidth=3)
pivot_ax.legend()
fig = pivot_ax.figure
fig.savefig(csvfile[:-4] + '.pdf')
|
bsd-3-clause
|
elenbert/allsky
|
src/webdatagen/system-sensors.py
|
1
|
2514
|
#!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import MySQLdb
import sys
import config
def plot_cpu_temperature(sensor_data, output_file):
xdata = []
ydata = []
print 'Plotting cpu temperature graph using ' + str(len(sensor_data)) + ' db records'
for row in sensor_data:
xdata.append(row[0])
ydata.append(row[1])
temper = np.array(ydata)
plt.title('CPU temperature: ' + str(ydata[-1]) + ' C\n')
plt.plot(xdata, temper, label = "Temperature", color="red")
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.legend()
plt.ylabel('Temperature C')
plt.grid(True)
plt.tight_layout()
plt.savefig(output_file, dpi=120)
print 'Graph saved as ' + output_file
plt.gcf().clear()
def plot_internal_climate(sensor_data, output_file):
xdata = []
ydata_temper = []
ydata_humidity = []
print 'Plotting internal temperature/humidity graph using ' + str(len(sensor_data)) + ' db records'
for row in sensor_data:
xdata.append(row[0])
ydata_temper.append(row[1])
ydata_humidity.append(row[2])
temper = np.array(ydata_temper)
humid = np.array(ydata_humidity)
plt.subplot(211)
plt.title('Box air temperature and humidity\nCurrent temperature: '
+ str(ydata_temper[-1]) + ' C\nCurrent humidity: ' + str(ydata_humidity[-1]) + ' %\n')
plt.plot(xdata, temper, label = "Temperature")
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.legend()
plt.ylabel('Temperature C')
plt.grid(True)
plt.tight_layout()
plt.subplot(212)
plt.plot(xdata, humid, label = "Humidity", color='green')
plt.xlabel('Time period: ' + str(xdata[0].date()) \
+ ' - ' + str((xdata[len(xdata)-1]).date()))
plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.grid(True)
plt.legend()
plt.ylabel('Humidity %')
plt.tight_layout()
plt.savefig(output_file, dpi=120)
print 'Graph saved as ' + output_file
plt.gcf().clear()
db = MySQLdb.connect(host=config.MYSQL_HOST, user=config.MYSQL_USER, \
passwd=config.MYSQL_PASSWORD, db=config.MYSQL_DB, connect_timeout=90)
cur = db.cursor()
print 'Selecting data from db'
cur.execute("SELECT * from cpu_sensor WHERE time >= NOW() - INTERVAL 1 DAY")
plot_cpu_temperature(cur.fetchall(), output_file=config.PLOT_CPU_TEMPERATURE_DAY)
cur.execute("SELECT * from internal_dh22 WHERE time >= NOW() - INTERVAL 1 DAY")
plot_internal_climate(cur.fetchall(), output_file=config.PLOT_INTERNAL_DH22_DAY)
db.close()
print 'Done\n'
|
gpl-2.0
|
Titan-C/scikit-learn
|
examples/datasets/plot_iris_dataset.py
|
36
|
1929
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
bt3gl/MLNet-Classifying-Complex-Networks
|
MLNet-2.0/classifiers/log_reg/src/main.py
|
1
|
7266
|
#!/usr/bin/env python
__author__ = "Mari Wahl"
__email__ = "[email protected]"
''' Performs logistic regression to our complex networks '''
import os
import numpy as np
from sklearn import linear_model
from constants import PERCENTAGE, INPUT_FOLDER, OUTPUT_FOLDER, NORM, NUM_SETS, FEATURE_NAME
def write_accuracies(acc_train, acc_test, output_file, norm_type, set_number, bol_with_outlier):
''' Save the results for accuracy in a file '''
with open(output_file, "a") as f:
f.write(norm_type[:4] + ", " + str(set_number) + ', ' + str(bol_with_outlier) + ', ' + \
str(round(acc_train,3)) + ", " + str(round(acc_test, 3)) + "\n")
def write_features_weights(score, output_file_feature, norm_type, set_number, bol_with_outlier):
''' Save the results for each feature's weight in a file '''
good_feature = []
with open(output_file_feature, "a") as f:
f.write(norm_type[:4] + ', ' + str(set_number) + ' , ' + str(bol_with_outlier)[:1] + ', ')
for i, feature in enumerate(score):
f.write(str(round(feature,1)) + ' ')
if feature > 0.9:
good_feature.append([norm_type[:4] + ', ' + str(set_number) +', '+ str(bol_with_outlier)[:1] + ', '\
+ str(feature) + ', ' + str(FEATURE_NAME[i]) + ', '])
f.write("\n")
with open(output_file_feature + 'good_feature', "a") as f:
for feature in good_feature:
f.write(feature[0] + '\n')
f.write("\n")
def find_better_features(data, truth, regularization=1e5, number_renor_models=200):
'''Resample the train data and compute a Logistic Regression on each resampling
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RandomizedLogisticRegression.html'''
model = linear_model.RandomizedLogisticRegression(C=regularization, n_resampling=number_renor_models)
model = model.fit(data, truth)
return model
def fit_model(data, truth, regularization=1e5, pen='l1'):
""" Logistic Regression where the training algorithm uses a one-vs.-all (OvA) scheme
http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html """
model = linear_model.LogisticRegression(penalty=pen, C=regularization)
model = model.fit(data, truth)
return model
def classify_data(model, data, truth):
""" Returns the accuracy score. Same as the method 'score()'"""
guesses = model.predict(data)
right = np.sum(guesses == truth)
return float(right) / truth.shape[0]
def load_data(datafile_name):
''' Load the data and separate it by feature and labels '''
data = np.loadtxt(datafile_name, delimiter = ',')
# features
X = data[:,:-1]
# label
Y = data[:,-1]
return X, Y
def get_input_data(sett, set_number, norm_type, bol_with_outlier):
""" Return the name of the file for the parameters """
if bol_with_outlier:
return INPUT_FOLDER + 'together' + str(set_number) + "_" + sett + "_" + str(PERCENTAGE) + '_' + \
norm_type + '_with_outlier' + ".data"
else:
return INPUT_FOLDER + 'together' + str(set_number) + "_" + sett + "_" + str(PERCENTAGE) + '_' + \
norm_type + ".data"
def main():
''' Set input/output variables '''
folder = OUTPUT_FOLDER + 'log_reg/'
if not os.path.exists(folder):
os.makedirs(folder)
output_file = folder + 'results.out'
with open(output_file, "w") as f:
f.write("# LOG REG RESULTS, TRAIN/TEST FRACTION: " + str(PERCENTAGE) + "\n")
f. write("# norm_typealization - Set set_number - Include Outliers? - Accu. Train - Accu Test\n")
output_file_feature = folder + 'results_features.out'
with open(output_file_feature, "w") as f:
f.write("# LOG REG RESULTS, TRAIN/TEST FRACTION: " + str(PERCENTAGE) + "\n")
f.write('# Nor Set OL? Siz Ord Ass Tra Deg Cor NTr NCl Cnu Clu Eco Ecc Dia Bet Den Rad Scl Com Pag Cen \n')
''' Loop to each case file:
- normalization type
- set order number
- with outlier or without outlier '''
classification = []
for norm_type in NORM:
for set_number in range(1, NUM_SETS+1):
''' with outlier '''
bol_with_outlier = True
# get input and output file
input_train = get_input_data('train', set_number, norm_type, bol_with_outlier)
input_test = get_input_data('test', set_number, norm_type, bol_with_outlier)
# get data
X_train, Y_train = load_data(input_train)
X_test, Y_test = load_data(input_test)
# classify and save
model = fit_model(X_train, Y_train)
acc_train = classify_data(model, X_train, Y_train)
acc_test = classify_data(model, X_test, Y_test)
classification.append(str(round(acc_test,3)) +', ' + str(norm_type) + ', ' + str(set_number) + ', ' + str(bol_with_outlier)[0] + '\n')
write_accuracies(acc_train, acc_test, output_file, norm_type, set_number, bol_with_outlier)
# best feature and save
model = find_better_features(X_train, Y_train)
write_features_weights(model.scores_, output_file_feature, norm_type, set_number, bol_with_outlier)
''' without outlier '''
bol_with_outlier = False
# get input and output file paths
input_train = get_input_data('train', set_number, norm_type, bol_with_outlier)
input_test = get_input_data('test', set_number, norm_type, bol_with_outlier)
# get data
X_train, Y_train = load_data(input_train)
X_test, Y_test = load_data(input_test)
# classifier and save
model = fit_model(X_train, Y_train)
acc_train = classify_data(model, X_train, Y_train)
acc_test = classify_data(model, X_test, Y_test)
write_accuracies(acc_train, acc_test, output_file, norm_type, set_number, bol_with_outlier)
classification.append(str(round(acc_test,3)) +', ' + str(norm_type) + ', ' + str(set_number) + ', ' + str(bol_with_outlier)[0] + '\n')
# best feature and save
model = find_better_features(X_train, Y_train)
write_features_weights(model.scores_, output_file_feature, norm_type, set_number, bol_with_outlier)
''' Order the best classifications and save in the end of the file, to help analysis'''
classification.sort()
with open(output_file_feature + 'good_feature', "a") as f:
f.write("\n\n\nClassification\n\n")
for feat in classification:
f.write(feat + '\n')
f.write("\n")
print 'Results saved at ' + folder
print 'Done!!!'
if __name__ == '__main__':
main()
|
mit
|
j-silver/quantum_dots
|
multiple.py
|
1
|
2207
|
#!/usr/bin/env python
#
# multiple.py
#
# Import matplotlib and numpy modules
import matplotlib.pyplot as plt
import numpy as np
fig=plt.figure(1)
# Create the first subplot (first of 3 rows)
fig1=plt.subplot(311)
# Axes labels (vertical labels on the left)
plt.xlabel( '$t$')
plt.ylabel('$S(t)$', rotation='horizontal')
# Load the data as arrays. We take the transpose because each array must be homogeneous
# The first (index 0) is always the time
redent = (np.loadtxt('RED-ENTROPY.dat')).T
cpent = (np.loadtxt( 'CP-ENTROPY.dat')).T
# Plot the the graphs
rfig = plt.plot(redent[0], redent[1], color='red',
label='Redfield dynamics entropy')
cfig = plt.plot(cpent[0], cpent[1], color='blue',
label='Completely positive dynamics entropy')
# Draw the grid
fig1.grid(True)
# Write the legend
plt.legend(('Redfield dynamics entropy',
'Completely positive dynamics entropy'), loc='upper left',
bbox_to_anchor=(0.2, 0.95))
# Create another label for the right vertical axis, leaving unaltered the horizontal one
ax = plt.twinx()
plt.ylabel('$\sigma(t)$', rotation='horizontal')
# Load the entropy production data into arrays
redprod = (np.loadtxt('RED-ENTROPY-PROD.dat')).T
cpprod = (np.loadtxt('CP-ENTROPY-PROD.dat')).T
# Plot the graphs, draw the grid and write the legend
rpfig = plt.plot(redprod[0], redprod[1], 'y-', label='Redfield entropy production')
cpfig = plt.plot(cpprod[0], cpprod[1], 'c-', label='Completely positive entropy prod.')
plt.grid(True)
plt.legend(('Redfield entropy prod.', 'Completely posit. entropy prod.'),
loc='upper left', bbox_to_anchor=(0.2, 0.6))
# Second subplot: time-evolution of the states in the Redfield dynamics
fig2 = plt.subplot(312)
redstat = (np.loadtxt('RED-EVOLUTION.dat')).T
# Every component of the Bloch vector must be plotted separetely
plt.plot(redstat[0],redstat[1])
plt.plot(redstat[0],redstat[2])
plt.plot(redstat[0],redstat[3])
fig2.grid(True)
# Third subplot: time-evolution of the states in the CP dynamics
fig3=plt.subplot(313)
cpstat = (np.loadtxt('CP-EVOLUTION.dat')).T
plt.plot(cpstat[0],cpstat[1])
plt.plot(cpstat[0],cpstat[2])
plt.plot(cpstat[0],cpstat[3])
fig3.grid(True)
plt.show()
|
bsd-2-clause
|
alephu5/Soundbyte
|
environment/lib/python3.3/site-packages/pandas/sparse/tests/test_libsparse.py
|
1
|
11260
|
from pandas import Series
import nose
from numpy import nan
import numpy as np
import operator
from numpy.testing import assert_almost_equal, assert_equal
import pandas.util.testing as tm
from pandas.core.sparse import SparseSeries
from pandas import DataFrame
from pandas._sparse import IntIndex, BlockIndex
import pandas._sparse as splib
TEST_LENGTH = 20
plain_case = dict(xloc=[0, 7, 15],
xlen=[3, 5, 5],
yloc=[2, 9, 14],
ylen=[2, 3, 5],
intersect_loc=[2, 9, 15],
intersect_len=[1, 3, 4])
delete_blocks = dict(xloc=[0, 5],
xlen=[4, 4],
yloc=[1],
ylen=[4],
intersect_loc=[1],
intersect_len=[3])
split_blocks = dict(xloc=[0],
xlen=[10],
yloc=[0, 5],
ylen=[3, 7],
intersect_loc=[0, 5],
intersect_len=[3, 5])
skip_block = dict(xloc=[10],
xlen=[5],
yloc=[0, 12],
ylen=[5, 3],
intersect_loc=[12],
intersect_len=[3])
no_intersect = dict(xloc=[0, 10],
xlen=[4, 6],
yloc=[5, 17],
ylen=[4, 2],
intersect_loc=[],
intersect_len=[])
def check_cases(_check_case):
def _check_case_dict(case):
_check_case(case['xloc'], case['xlen'], case['yloc'], case['ylen'],
case['intersect_loc'], case['intersect_len'])
_check_case_dict(plain_case)
_check_case_dict(delete_blocks)
_check_case_dict(split_blocks)
_check_case_dict(skip_block)
_check_case_dict(no_intersect)
# one or both is empty
_check_case([0], [5], [], [], [], [])
_check_case([], [], [], [], [], [])
def test_index_make_union():
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
bresult = xindex.make_union(yindex)
assert(isinstance(bresult, BlockIndex))
assert_equal(bresult.blocs, eloc)
assert_equal(bresult.blengths, elen)
ixindex = xindex.to_int_index()
iyindex = yindex.to_int_index()
iresult = ixindex.make_union(iyindex)
assert(isinstance(iresult, IntIndex))
assert_equal(iresult.indices, bresult.to_int_index().indices)
"""
x: ----
y: ----
r: --------
"""
xloc = [0]
xlen = [5]
yloc = [5]
ylen = [4]
eloc = [0]
elen = [9]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ----- -----
y: ----- --
"""
xloc = [0, 10]
xlen = [5, 5]
yloc = [2, 17]
ylen = [5, 2]
eloc = [0, 10, 17]
elen = [7, 5, 2]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ------
y: -------
r: ----------
"""
xloc = [1]
xlen = [5]
yloc = [3]
ylen = [5]
eloc = [1]
elen = [7]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ------ -----
y: -------
r: -------------
"""
xloc = [2, 10]
xlen = [4, 4]
yloc = [4]
ylen = [8]
eloc = [2]
elen = [12]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: --- -----
y: -------
r: -------------
"""
xloc = [0, 5]
xlen = [3, 5]
yloc = [0]
ylen = [7]
eloc = [0]
elen = [10]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ------ -----
y: ------- ---
r: -------------
"""
xloc = [2, 10]
xlen = [4, 4]
yloc = [4, 13]
ylen = [8, 4]
eloc = [2]
elen = [15]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ----------------------
y: ---- ---- ---
r: ----------------------
"""
xloc = [2]
xlen = [15]
yloc = [4, 9, 14]
ylen = [3, 2, 2]
eloc = [2]
elen = [15]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
"""
x: ---- ---
y: --- ---
"""
xloc = [0, 10]
xlen = [3, 3]
yloc = [5, 15]
ylen = [2, 2]
eloc = [0, 5, 10, 15]
elen = [3, 2, 3, 2]
_check_case(xloc, xlen, yloc, ylen, eloc, elen)
# TODO: different-length index objects
def test_lookup():
def _check(index):
assert(index.lookup(0) == -1)
assert(index.lookup(5) == 0)
assert(index.lookup(7) == 2)
assert(index.lookup(8) == -1)
assert(index.lookup(9) == -1)
assert(index.lookup(10) == -1)
assert(index.lookup(11) == -1)
assert(index.lookup(12) == 3)
assert(index.lookup(17) == 8)
assert(index.lookup(18) == -1)
bindex = BlockIndex(20, [5, 12], [3, 6])
iindex = bindex.to_int_index()
_check(bindex)
_check(iindex)
# corner cases
def test_intersect():
def _check_correct(a, b, expected):
result = a.intersect(b)
assert(result.equals(expected))
def _check_length_exc(a, longer):
nose.tools.assert_raises(Exception, a.intersect, longer)
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
expected = BlockIndex(TEST_LENGTH, eloc, elen)
longer_index = BlockIndex(TEST_LENGTH + 1, yloc, ylen)
_check_correct(xindex, yindex, expected)
_check_correct(xindex.to_int_index(),
yindex.to_int_index(),
expected.to_int_index())
_check_length_exc(xindex, longer_index)
_check_length_exc(xindex.to_int_index(),
longer_index.to_int_index())
check_cases(_check_case)
class TestBlockIndex(tm.TestCase):
def test_equals(self):
index = BlockIndex(10, [0, 4], [2, 5])
self.assert_(index.equals(index))
self.assert_(not index.equals(BlockIndex(10, [0, 4], [2, 6])))
def test_check_integrity(self):
locs = []
lengths = []
# 0-length OK
index = BlockIndex(0, locs, lengths)
# also OK even though empty
index = BlockIndex(1, locs, lengths)
# block extend beyond end
self.assertRaises(Exception, BlockIndex, 10, [5], [10])
# block overlap
self.assertRaises(Exception, BlockIndex, 10, [2, 5], [5, 3])
def test_to_int_index(self):
locs = [0, 10]
lengths = [4, 6]
exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15]
block = BlockIndex(20, locs, lengths)
dense = block.to_int_index()
assert_equal(dense.indices, exp_inds)
def test_to_block_index(self):
index = BlockIndex(10, [0, 5], [4, 5])
self.assert_(index.to_block_index() is index)
class TestIntIndex(tm.TestCase):
def test_equals(self):
index = IntIndex(10, [0, 1, 2, 3, 4])
self.assert_(index.equals(index))
self.assert_(not index.equals(IntIndex(10, [0, 1, 2, 3])))
def test_to_block_index(self):
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
# see if survive the round trip
xbindex = xindex.to_int_index().to_block_index()
ybindex = yindex.to_int_index().to_block_index()
tm.assert_isinstance(xbindex, BlockIndex)
self.assert_(xbindex.equals(xindex))
self.assert_(ybindex.equals(yindex))
check_cases(_check_case)
def test_to_int_index(self):
index = IntIndex(10, [2, 3, 4, 5, 6])
self.assert_(index.to_int_index() is index)
class TestSparseOperators(tm.TestCase):
def _nan_op_tests(self, sparse_op, python_op):
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
xdindex = xindex.to_int_index()
ydindex = yindex.to_int_index()
x = np.arange(xindex.npoints) * 10. + 1
y = np.arange(yindex.npoints) * 100. + 1
result_block_vals, rb_index = sparse_op(x, xindex, y, yindex)
result_int_vals, ri_index = sparse_op(x, xdindex, y, ydindex)
self.assert_(rb_index.to_int_index().equals(ri_index))
assert_equal(result_block_vals, result_int_vals)
# check versus Series...
xseries = Series(x, xdindex.indices)
yseries = Series(y, ydindex.indices)
series_result = python_op(xseries, yseries).valid()
assert_equal(result_block_vals, series_result.values)
assert_equal(result_int_vals, series_result.values)
check_cases(_check_case)
def _op_tests(self, sparse_op, python_op):
def _check_case(xloc, xlen, yloc, ylen, eloc, elen):
xindex = BlockIndex(TEST_LENGTH, xloc, xlen)
yindex = BlockIndex(TEST_LENGTH, yloc, ylen)
xdindex = xindex.to_int_index()
ydindex = yindex.to_int_index()
x = np.arange(xindex.npoints) * 10. + 1
y = np.arange(yindex.npoints) * 100. + 1
xfill = 0
yfill = 2
result_block_vals, rb_index = sparse_op(
x, xindex, xfill, y, yindex, yfill)
result_int_vals, ri_index = sparse_op(x, xdindex, xfill,
y, ydindex, yfill)
self.assert_(rb_index.to_int_index().equals(ri_index))
assert_equal(result_block_vals, result_int_vals)
# check versus Series...
xseries = Series(x, xdindex.indices)
xseries = xseries.reindex(np.arange(TEST_LENGTH)).fillna(xfill)
yseries = Series(y, ydindex.indices)
yseries = yseries.reindex(np.arange(TEST_LENGTH)).fillna(yfill)
series_result = python_op(xseries, yseries)
series_result = series_result.reindex(ri_index.indices)
assert_equal(result_block_vals, series_result.values)
assert_equal(result_int_vals, series_result.values)
check_cases(_check_case)
# too cute? oh but how I abhor code duplication
check_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
def make_nanoptestf(op):
def f(self):
sparse_op = getattr(splib, 'sparse_nan%s' % op)
python_op = getattr(operator, op)
self._nan_op_tests(sparse_op, python_op)
f.__name__ = 'test_nan%s' % op
return f
def make_optestf(op):
def f(self):
sparse_op = getattr(splib, 'sparse_%s' % op)
python_op = getattr(operator, op)
self._op_tests(sparse_op, python_op)
f.__name__ = 'test_%s' % op
return f
for op in check_ops:
f = make_nanoptestf(op)
g = make_optestf(op)
setattr(TestSparseOperators, f.__name__, f)
setattr(TestSparseOperators, g.__name__, g)
del f
del g
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-3.0
|
arjoly/scikit-learn
|
examples/cross_decomposition/plot_compare_cross_decomposition.py
|
128
|
4761
|
"""
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
|
bsd-3-clause
|
RecipeML/Recipe
|
recipe/preprocessors/fag.py
|
1
|
1645
|
# -*- coding: utf-8 -*-
"""
Copyright 2016 Walter José and Alex de Sá
This file is part of the RECIPE Algorithm.
The RECIPE is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your option)
any later version.
RECIPE is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. See http://www.gnu.org/licenses/.
"""
from sklearn.cluster import FeatureAgglomeration
def fag(args):
"""Uses scikit-learn's FeatureAgglomeration agglomerate features.
Parameters
----------
affinity : string
Metric used to compute the linkage. Can be “euclidean”, “l1”, “l2”, “manhattan”, “cosine”, or ‘precomputed’. If linkage is “ward”,
only “euclidean” is accepted.
linkage : {“ward”, “complete”, “average”}
Which linkage criterion to use.
The linkage criterion determines which distance to use between sets of features.
The algorithm will merge the pairs of cluster that minimize this criterion.
compute_full_tree : bool or ‘auto’
Stop early the construction of the tree at n_clusters
n_clusters : int
The number of clusters to find.
"""
affi = args[1]
link = args[2]
cft = False
if(args[3].find("True")!=-1):
cft = True
n_clust = int(args[4])
return FeatureAgglomeration(n_clusters=n_clust, affinity=affi,
connectivity=None,compute_full_tree=cft, linkage=link)
|
gpl-3.0
|
rlowrance/re-avm
|
chart-03.py
|
1
|
8395
|
'''create charts showing results of rfval.py
INVOCATION
python chart-03.py [--data] [--test]
INPUT FILES
INPUT/rfval/YYYYMM.pickle
OUTPUT FILES
WORKING/chart-03/[test-]data.pickle
WORKING/chart-03/[test-]VAR-YYYY[-MM].pdf
where
VAR in {max_depth | max_features}
YYYY in {2004 | 2005 | 2006 | 2007 | 2008 | 2009}
MM in {02 | 05 | 08 | 11}
'''
from __future__ import division
import cPickle as pickle
import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pdb
from pprint import pprint
import random
import sys
from AVM import AVM
from Bunch import Bunch
from columns_contain import columns_contain
from Logger import Logger
from ParseCommandLine import ParseCommandLine
from Path import Path
from rfval import ResultKey, ResultValue
cc = columns_contain
def usage(msg=None):
print __doc__
if msg is not None:
print msg
sys.exit(1)
def make_control(argv):
# return a Bunch
print argv
if len(argv) not in (1, 2, 3):
usage('invalid number of arguments')
pcl = ParseCommandLine(argv)
arg = Bunch(
base_name='chart-03',
data=pcl.has_arg('--data'),
test=pcl.has_arg('--test'),
)
random_seed = 123
random.seed(random_seed)
dir_working = Path().dir_working()
debug = False
reduced_file_name = ('test-' if arg.test else '') + 'data.pickle'
# assure output directory exists
dir_path = dir_working + arg.base_name + '/'
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return Bunch(
arg=arg,
debug=debug,
path_in_ege=dir_working + 'rfval/*.pickle',
path_reduction=dir_path + reduced_file_name,
path_chart_base=dir_path,
random_seed=random_seed,
test=arg.test,
)
def make_chart(df, hp, control, ege_control):
'write one txt file for each n_months_back'
def make_subplot(test_period, n_months_back, loss_metric):
'mutate the default axes'
for i, n_estimators in enumerate(sorted(set(df.n_estimators))):
mask = (
(df.test_period == test_period) &
(df.n_months_back == n_months_back) &
(df.n_estimators == n_estimators) &
(~df.max_depth.isnull() if hp == 'max_depth' else ~df.max_features.isnull())
)
subset = df.loc[mask]
if hp == 'max_depth':
x_values = sorted(set(subset.max_depth))
assert len(x_values) == len(subset)
x = np.empty(len(x_values), dtype=int)
y = np.empty(len(x_values), dtype=float)
for ii, max_depth_value in enumerate(x_values):
# select one row
mask2 = subset.max_depth == max_depth_value
subset2 = subset.loc[mask2]
assert len(subset2) == 1
row = subset2.iloc[0]
x[ii] = row['max_depth']
y[ii] = row[loss_metric]
else:
assert hp == 'max_features'
x_values = (1, 'sqrt', 'log2', 0.1, 0.3, 'auto')
if len(x_values) != len(subset):
pdb.set_trace()
assert len(x_values) == len(subset)
x = np.empty(len(x_values), dtype=object)
y = np.empty(len(x_values), dtype=float)
for ii, max_features_value in enumerate(x_values):
# select one row
mask2 = subset.max_features == max_features_value
subset2 = subset.loc[mask2]
assert len(subset2) == 1
row = subset2.iloc[0]
x[ii] = row['max_features']
y[ii] = row[loss_metric]
plt.plot(y / 1000.0,
label=('n_estimators: %d' % n_estimators),
linestyle=[':', '-.', '--', '-'][i % 4],
color='bgrcmykw'[i % 8],
)
plt.xticks(range(len(y)), x, size='xx-small', rotation='vertical')
plt.yticks(size='xx-small')
plt.title('yr-mo %s-%s bk %d' % (test_period[:4], test_period[4:], n_months_back),
loc='left',
fontdict={'fontsize': 'xx-small', 'style': 'italic'},
)
return
def make_figure(year, months):
print 'make_figure', hp, year, months
test_periods_typical = [str(year * 100 + month)
for month in months
]
test_periods = ('200902',) if year == 2009 else test_periods_typical
plt.figure() # new figure
# plt.suptitle('Loss by Test Period, Tree Max Depth, N Trees') # overlays the subplots
loss_metric = 'rmse'
loss_metric = 'mae'
axes_number = 0
n_months_backs = range(1, 7, 1)
last_test_period_index = len(test_periods) - 1
last_n_months_back_index = len(n_months_backs) - 1
for test_period_index, test_period in enumerate(test_periods):
for n_months_back_index, n_months_back in enumerate(n_months_backs):
axes_number += 1 # count across rows
plt.subplot(len(test_periods), len(n_months_backs), axes_number)
make_subplot(test_period, n_months_back, loss_metric)
if test_period_index == last_test_period_index:
# annotate the bottom row only
if n_months_back_index == 0:
plt.xlabel(hp)
plt.ylabel('%s x $1000' % loss_metric)
if n_months_back_index == last_n_months_back_index:
plt.legend(loc='best', fontsize=5)
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
out_suffix = '-%02d' % months if len(months) == 1 else ''
plt.savefig(control.path_chart_base + hp + '-' + str(year) + out_suffix + '.pdf')
plt.close()
for year in (2004, 2005, 2006, 2007, 2008, 2009):
months = (2,) if year == 2009 else (2, 5, 8, 11)
for month in months:
make_figure(year, (month,))
make_figure(year, months)
if control.test:
break
def make_data(control):
'return data frame, ege_control'
def process_file(path, rows_list):
'mutate rows_list to include gscv object info at path'
print 'reducing', path
with open(path, 'rb') as f:
rfval_result, ege_control = pickle.load(f)
for k, v in rfval_result.iteritems():
actuals = v.actuals.values
predictions = v.predictions
errors = actuals - predictions
rmse = np.sqrt(np.sum(errors * errors) / (1.0 * len(errors)))
median_absolute_error = np.median(np.abs(errors))
row = {
'n_months_back': k.n_months_back,
'n_estimators': k.n_estimators,
'max_depth': k.max_depth,
'max_features': k.max_features,
'test_period': str(k.yyyymm),
'rmse': rmse,
'mae': median_absolute_error,
}
rows_list.append(row)
rows_list = []
for file in glob.glob(control.path_in_ege):
ege_control = process_file(file, rows_list)
df = pd.DataFrame(rows_list)
return df, ege_control # return last ege_control, not all
def main(argv):
control = make_control(argv)
sys.stdout = Logger(base_name=control.arg.base_name)
print control
if control.arg.data:
df, ege_control = make_data(control)
with open(control.path_reduction, 'wb') as f:
pickle.dump((df, ege_control, control), f)
else:
with open(control.path_reduction, 'rb') as f:
df, ege_control, data_control = pickle.load(f)
make_chart(df, 'max_depth', control, ege_control)
make_chart(df, 'max_features', control, ege_control)
print control
if control.test:
print 'DISCARD OUTPUT: test'
print 'done'
return
if __name__ == '__main__':
if False:
# avoid pyflakes warnings
pdb.set_trace()
pprint()
pd.DataFrame()
np.array()
AVM()
ResultKey
ResultValue
main(sys.argv)
|
bsd-3-clause
|
gyanderson/SimColumn
|
simulation.py
|
1
|
16850
|
#!/usr/bin/python -tt
""" Simulate a distillation """
import sys
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
eps = sys.float_info.epsilon
def Antoine(T, species):
""" Find individual vapor pressure as a function of temperature.
Each species has characteristic constants. """
antoine_dict = {'water':[5.0768, 1659.793, -45.854],
'ethanol':[5.24677, 1598.673, -46.424],
'propanoic acid':[4.74558, 1679.869, -59.832],
'butanoic acid':[4.90904, 1793.898, -70.564],
'pentanoic acid':[3.2075, 879.771, -172.237],
'hexanoic acid':[4.34853, 1512.718, -129.255],
'heptanoic acid':[4.30691, 1536.114, -137.446],
'octanoic acid':[4.25235, 1530.446, -150.12],
'nonanoic acid':[2.54659, 733.594, -235.239],
'decanoic acid':[2.4645, 733.581, -256.708],
'isopropanol':[4.8610, 1357.427, -75.814],
'1-butanol':[4.50393, 1313.878, -98.789],
'isobutanol':[4.43126, 1236.911, -101.528],
'2-pentanol':[4.42349, 1291.212, -100.017],
'1-pentanol':[4.68277, 1492.549, -91.621],
'hexanol':[4.41271, 1422.031, -107.706],
'heptanol':[3.9794, 1256.783, -133.487],
'octanol':[3.74844, 1196.639, -149.043],
'nonanol':[3.96157, 1373.417, -139.182],
'decanol':[3.51869, 1180.306, -168.829]}
A, B, C = antoine_dict[species]
return 10**(A-B/(C+T)) # bar
def emperical_vapor_composition(abv):
""" Empirical formula for vapor composition of a binary ethanol:water mixture."""
#print('abv = %1.2f' %abv)
vapor_abv = -94.7613*abv**8.0 + 450.932*abv**7.0 -901.175*abv**6.0 + 985.803*abv**5.0 - 644.997*abv**4.0 + 259.985*abv**3.0 - 64.5050*abv**2.0 + 9.71706*abv
#print('vapor_abv = %1.2f' %vapor_abv)
return vapor_abv
def calculated_vapor_composition(abv): # Doesn't take into account deviations from Raoult
""" Vapor composition calculated from Antoine and Raoult """
T = temp(abv)
ethanol_vapor_pressure = Antoine(T,'ethanol') # bar
water_vapor_pressure = Antoine(T,'water') # bar
ethanol_density = 0.789 # g/mL
ethanol_MW = 46.06844 # g/mol
water_density = 1.0 # g/mL
water_MW = 18.01528 # g/mol
ethanol_molar_fraction = (abv*ethanol_density/ethanol_MW) / (abv*ethanol_density/ethanol_MW + (1-abv)*water_density/water_MW)
water_molar_fraction = ((1-abv)*water_density/water_MW) / (abv*ethanol_density/ethanol_MW + (1-abv)*water_density/water_MW)
ethanol_partial_pressure = ethanol_vapor_pressure*ethanol_molar_fraction # bar
water_partial_pressure = water_vapor_pressure*water_molar_fraction # bar
return ethanol_partial_pressure / (ethanol_partial_pressure + water_partial_pressure)
def temp(abv):
""" Calculate temperature for a boiling ethanol:water binary mixture """
if abv < eps:
T = 300.0
elif 1.0 - abv < eps:
T = 300.0
else:
T = 60.526*abv**4.0 - 163.16*abv**3.0 + 163.96*abv**2.0 - 83.438*abv + 100.0 + 273.15 # K
#print T
return T
def vaporization_enthalpy(T,species):
enthalpy_dict = {'ethanol':[50.43, -0.4475, 0.4989, 413.9]}
A,alpha,beta,Tc = enthalpy_dict[species]
return A*np.e**(-alpha*(T/Tc))*(1.0-(T/Tc))**beta*1000.0 # J/mol
def binary_vaporization_rate(abv,power,T):
""" Assume the vaporization rate of the mix is a molar combination of individual vaporization rates """
ethanol_density = 0.789 # g/mL
ethanol_MW = 46.06844 # g/mol
#ethanol_enthalpy = vaporization_enthalpy(T,'ethanol') # J/mol
ethanol_enthalpy = 38600.0 # J/mol
water_density = 1.0 # g/mL
water_MW = 18.01528 # g/mol
water_enthalpy = 40650.0 # J/mol
#abv = 0.1
vap_rate = power/(abv*ethanol_density/ethanol_MW*ethanol_enthalpy + (1.0-abv)*water_density/water_MW*water_enthalpy) # mL of liquid / s
#print('vaporization rate = %5.5f' %vap_rate)
return vap_rate
def reflux(ethanol,water):
""" Return reflux rate as a function of liquid height in a plate """
plate_hole_radius = 0.0005 # m
r = .0779/2.0 # column radius in m
C = ((2*9.81)**(0.5))*(np.pi*plate_hole_radius**2.0) # C has units of m(3/2)/s. This constant can be tuned, though sqrt(2g)*area is a good place to start
if (ethanol + water) < eps:
height = eps
else:
height = ((ethanol+water)/1000000.0)/(np.pi*r**2.0) # ethanol and water in are in mL, height is in mL
reflux = 1000000.0*C*height**(0.5) # mL/s
#print 'C = ' + str(C)
#print 'height = ' + str(height)
#print 'reflux = ' + str(reflux)
return reflux
def congener_mole_fraction_float(congener,ethanol,water):
ethanol_density = 0.789 # g/mL
ethanol_MW = 46.06844 # g/mol
water_density = 1.0 # g/mL
water_MW = 18.01528 # g/mol
if (congener + ethanol*ethanol_density/ethanol_MW + water*water_density/water_MW) < eps:
congener_mole_fraction = 0.0
else:
congener_mole_fraction = congener / (congener + ethanol*ethanol_density/ethanol_MW + water*water_density/water_MW)
return congener_mole_fraction
def congener_molarity_float(congener,ethanol,water):
if (ethanol + water) < eps:
congener_molarity = 0.0 # Could change this to the actual molarity of the pure congener?
else:
congener_molarity = congener/(ethanol + water)
return congener_molarity
def abv_float(ethanol,water):
if (ethanol < eps) | (water < eps):
abv = eps
elif (ethanol + water) < eps:
abv = eps
else:
abv = ethanol/(ethanol + water)
return abv
def initialize(ethanol,water,plates,congener):
col0 = []
for i in range(0,(plates*3),3):
#col0.append(ethanol/1000)
#col0.append(water/1000)
#col0.append(congener/1000)
col0.append(0.0)
col0.append(0.0)
col0.append(0.0)
col0[0:3] = [ethanol, water, congener] # initialize the pot
return col0
def col(y,t,plates,congener_identity,reflux_knob,power):
"""
Col is a vector of the form [e1, h1, c1, e2, h2, c2...en, hn, cn] where e and h are mL, and cn is molarity of a congener (takes up 0 volume)
"""
#ethanol_tot =
#water_tot =
#congener_tot =
"""
for i in range(len(y)):
if y[i] < eps:
y[i] = 0.0
"""
ethanol_density = 0.789 # g/mL
ethanol_MW = 46.06844 # g/mol
water_density = 1.0 # g/mL
water_MW = 18.01528 # g/mol
tube_area = np.pi*(.007899/2.0)**2 # m^2, cross sectional area of liquid management tubing
Vdead = 0.330*tube_area # m^3 This volume must be occupied before fluid can go to reflux 330 mm is just an estimate
h2max = 0.011 # m, a design parameter, could also measure actual h2
Vtakeoff = Vdead + (h2max*tube_area)*2.0 # m^3 Once this volume is occupied, all excess goes to takeoff
#print('Vdead = %2.10f' %Vdead) #16.17 mL
#print('Vtakeoff = %2.10f' %Vtakeoff) #17.25 mL
dydt = []
for i in range(plates*3):
dydt.append(0)
for i in range(0,(len(dydt)-3),3):
if y[i] > eps:
ethanol = y[i] # mL
else:
ethanol = 0.001
if y[i+1] > eps:
water = y[i+1] # mL
else:
water = 0.001
abv = abv_float(ethanol,water)
vapor_abv = emperical_vapor_composition(abv)
T = temp(abv) # K
#vapor_abv = calculated_vapor_composition(abv)
reflux_rate = reflux(ethanol,water) # mL/s
vap_rate = binary_vaporization_rate(abv,power,T) # mL of liquid / s
congener = y[i+2] # mol
congener_molarity = congener_molarity_float(congener,ethanol,water)
congener_mole_fraction = congener_mole_fraction_float(congener,ethanol,water)
congener_vapor_pressure = Antoine(T,congener_identity) # bar
congener_partial_pressure = congener_vapor_pressure*congener_mole_fraction # bar
#print 'i = ' + str(i) + ' water = ' + str(water) + ' ethanol = ' + str(ethanol) + ' abv = ' + str(abv) + ' vapor rate ' + str(vap_rate) + ' t = ' + str(t)
if i == 0: # Pot
if y[i] > eps:
dydt[i] += - vapor_abv*vap_rate # current plate ethanol
if y[i+1] > eps:
dydt[i+1] += -(1.0-vapor_abv)*vap_rate # current plate water
if y[i+2] > eps:
dydt[i+2] += - congener_molarity*congener_partial_pressure/1.01325*vap_rate# current plate congener
if y[i] > eps:
dydt[i+3] += vapor_abv*vap_rate # upper plate ethanol
if y[i+1] > eps:
dydt[i+4] += (1.0-vapor_abv)*vap_rate # upper plate water
if y[i+2] > eps:
dydt[i+5] += congener_molarity*congener_partial_pressure/1.01325*vap_rate # upper plate congener
#print abv
elif i == (len(y)-6): # Reverse Liquid Management Head
if ((ethanol + water)/1000000 - Vdead) > eps:
h2 = ((ethanol+water)/1000000 - Vdead)/(2*tube_area) # m, height driving flow through needle valve
else:
h2 = eps
if h2 < eps:
h2 = eps
"""
if (h2 - h2max) > eps:
takeoff = (2*9.81*(h2-h2max))**(0.5)*tube_area*1000000
#h2_reflux = h2max
elif h2 < eps:
h2 = eps
h2reflux = eps
takeoff = eps # formerly not here
else:
takeoff = eps # formerly not here
#print h2
if takeoff < eps:
takeoff = eps
"""
Cv = 0.43*reflux_knob # flow coefficient
Kv = 0.865*Cv # flow factor (metric)
#rho = (0.5*ethanol_density + 0.5*water_density)
rho = abv*ethanol_density + (1.0-abv)*water_density # g/mL Including abv here creates instability.
if rho < eps:
rho = eps
elif 1-rho < eps:
rho = 1-eps
SG = rho/water_density
DP = h2*rho*1000.0*9.81 # change in pressure, Pa = m*kg/m^3 *m/s^2 = kg*m/s^2 * 1/m^2 = N/m^2
Q = Kv*((DP/100000.0)/SG)**(0.5) # m^3/h
head_reflux = Q*1000000.0/3600.0 # mL/s
if head_reflux < eps:
head_reflux = eps
#head_reflux = 0.2
#takeoff = 0.0
#if h2 > h2max:
# takeoff = vap_rate-head_reflux
if ((h2-h2max) > eps) & ((vap_rate - head_reflux) > eps):
takeoff = vap_rate - head_reflux
else:
takeoff = 0.0
if takeoff < eps:
takeoff = eps
#takeoff = 0.0
#head_reflux = 0.0
#print('takeoff = %2.2f' %takeoff)
#eps1 = 0.0000000000000001
if y[i] > eps:
dydt[i-3] += abv*head_reflux # lower plate ethanol
if y[i+1] > eps:
dydt[i-2] += (1.0-abv)*head_reflux # lower plate water
if y[i+2] > eps:
dydt[i-1] += congener_molarity*head_reflux # lower plate congener
if y[i] > eps:
dydt[i] += -abv*head_reflux - abv*takeoff # current plate ethanol
if y[i+1] > eps:
dydt[i+1] += -(1.0-abv)*head_reflux - (1.0-abv)*takeoff # current plate water
if y[i+2] > eps:
dydt[i+2] += -congener_molarity*head_reflux - congener_molarity*takeoff # current plate congener
if y[i] > eps:
dydt[i+3] += abv*takeoff # takeoff ethanol
if y[i+1] > eps:
dydt[i+4] += (1.0-abv)*takeoff # takeoff plate water
if y[i+2] > eps:
dydt[i+5] += congener_molarity*takeoff # takeoff plate congener
#print abv
"""
else if i = len(y) - 6: # liquid management head
"""
else: # any other plate
if y[i] > eps:
dydt[i-3] += abv*reflux_rate # lower plate ethanol
if y[i+1] > eps:
dydt[i-2] += (1.0-abv)*reflux_rate # lower plate water
if y[i+2] > eps:
dydt[i-1] += congener_molarity*reflux_rate # lower plate congener
if y[i] > eps:
dydt[i] += -abv*reflux_rate - vapor_abv*vap_rate # current plate ethanol
if y[i+1] > eps:
dydt[i+1] += -(1.0-abv)*reflux_rate -(1.0-vapor_abv)*vap_rate # current plate water
if y[i+2] > eps:
dydt[i+2] += -congener_molarity*reflux_rate - congener_molarity*congener_partial_pressure/1.01325*vap_rate # current plate congener
if y[i] > eps:
dydt[i+3] += vapor_abv*vap_rate # upper plate ethanol
if y[i+1] > eps:
dydt[i+4] += (1.0-vapor_abv)*vap_rate # upper plate water
if y[i+2] > eps:
dydt[i+5] += congener_molarity*congener_partial_pressure/1.01325*vap_rate # upper plate congener
#print dydt
return dydt
def distill(ethanol,water,plates,congener,congener_identity,reflux_knob,power,timestep,tend):
"""
Units:
ethanol - initial ethanol in mL
water - initial water in mL
plates - 1 for the pot, 1 for the head, 1 for the takeoff
congener - initial congener in mol
congener_identity - text
reflux_knob -- setting on the reflux valve, 0-1
power - W
timestep- s
time - s
"""
col0 = initialize(ethanol,water,plates,congener)
t = np.linspace(0,tend,timestep)
sol = odeint(col, col0, t, args=(plates,congener_identity,reflux_knob,power),full_output=0,rtol=1.0,atol=1.0)
#print sol
"""
plt.figure(1)
plt.subplot(511)
plt.plot(t,sol[:,0], 'b', label='ethanol|pot(t)')
plt.plot(t,sol[:,1], 'g', label='water|pot(t)')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(512)
plt.plot(t,sol[:,3], 'b', label='ethanol|plate1(t)')
plt.plot(t,sol[:,4], 'g', label='water|plate1(t)')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(513)
plt.plot(t,sol[:,6], 'b', label='ethanol|plate2(t)')
plt.plot(t,sol[:,7], 'g', label='water|plate2(t)')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(514)
plt.plot(t,sol[:,9], 'b', label='ethanol|head')
plt.plot(t,sol[:,10], 'g', label='water|head')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(515)
plt.plot(t,sol[:,12], 'b', label='ethanol|takeoff')
plt.plot(t,sol[:,13], 'g', label='water|takeoff')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.show()
"""
plt.figure(1)
plt.subplot(10,1,1)
plt.plot(t,sol[:,0]/(sol[:,0] + sol[:,1]), 'b', label='abv|pot')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,2)
plt.plot(t,(sol[:,0] + sol[:,1]), 'g', label='Volume (mL)|pot')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,3)
plt.plot(t,sol[:,3]/(sol[:,3] + sol[:,4]), 'b', label='abv|plate 1')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,4)
plt.plot(t,(sol[:,3] + sol[:,4]), 'g', label='Volume (mL)|plate 1')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,5)
plt.plot(t,sol[:,6]/(sol[:,6] + sol[:,7]), 'b', label='abv|plate 2')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,6)
plt.plot(t,(sol[:,6] + sol[:,7]), 'g', label='Volume (mL)|plate 2')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,7)
plt.plot(t,sol[:,9]/(sol[:,9] + sol[:,10]), 'b', label='abv|head')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,8)
plt.plot(t,(sol[:,9] + sol[:,10]), 'g', label='Volume (mL)|head')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,9)
plt.plot(t,sol[:,12]/(sol[:,12] + sol[:,13]), 'b', label='abv|takeoff')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.subplot(10,1,10)
plt.plot(t,(sol[:,12] + sol[:,13]), 'g', label='Volume (mL)|takeoff')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.show()
if __name__ == "__main__":
distill(float(sys.argv[1]), float(sys.argv[2]), int(sys.argv[3]), float(sys.argv[4]), sys.argv[5], float(sys.argv[6]), float(sys.argv[7]), int(sys.argv[8]), int(sys.argv[9]))
#distill(ethanol,water,plates,congener,congener_identity,reflux_knob,power,timestep,tend)
|
gpl-3.0
|
hunse/deepnet
|
deepnet/image_tools/plotting.py
|
1
|
7841
|
import os
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
def display_available():
return ('DISPLAY' in os.environ)
# figsize = (13,8.5)
# def figure(fignum=None, figsize=(8, 6)):
# plt.ion()
# if fignum is None:
# f = plt.figure(figsize=figsize)
# else:
# f = plt.figure(fignum, figsize=figsize)
# plt.clf()
# return f
def show(image, ax=None, vlims=None, invert=False):
kwargs = dict(interpolation='none')
if image.ndim == 2:
kwargs['cmap'] = 'gray' if not invert else 'gist_yarg'
if vlims is not None:
kwargs['vmin'], kwargs['vmax'] = vlims
elif image.ndim == 3:
assert image.shape[2] == 3
if vlims is not None:
image = (image.clip(*vlims) - vlims[0]) / (vlims[1] - vlims[0])
else:
raise ValueError("Wrong number of image dimensions")
if ax is None: ax = plt.gca()
ax.imshow(image, **kwargs)
return ax
def tile(images, ax=None, rows=16, cols=24, random=False,
grid=False, gridwidth=1, gridcolor='r', **show_params):
"""
Plot tiled images to the current axis
:images Each row is one flattened image
"""
n_images = images.shape[0]
imshape = images.shape[1:]
m, n = imshape[:2]
n_channels = imshape[2] if len(imshape) > 2 else 1
inds = np.arange(n_images)
if random:
npr.shuffle(inds)
img_shape = (m*rows, n*cols)
if n_channels > 1:
img_shape = img_shape + (n_channels,)
img = np.zeros(img_shape, dtype=images.dtype)
for ind in xrange(min(rows*cols, n_images)):
i,j = (ind / cols, ind % cols)
img[i*m:(i+1)*m, j*n:(j+1)*n] = images[inds[ind]]
ax = show(img, ax=ax, **show_params)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if grid:
for i in xrange(1,rows):
ax.plot([-0.5, img.shape[1]-0.5], [i*m-0.5, i*m-0.5], '-',
color=gridcolor, linewidth=gridwidth)
for j in xrange(1,cols):
ax.plot([j*n-0.5, j*n-0.5], [-0.5, img.shape[0]-0.5], '-',
color=gridcolor, linewidth=gridwidth)
ax.set_xlim([-0.5, img.shape[1]-0.5])
ax.set_ylim([-0.5, img.shape[0]-0.5])
ax.invert_yaxis()
def compare(imagesetlist,
ax=None, rows=5, cols=20, vlims=None, grid=True, random=False):
d = len(imagesetlist)
n_images = imagesetlist[0].shape[0]
imshape = imagesetlist[0].shape[1:]
m, n = imshape[:2]
n_channels = imshape[2] if len(imshape) > 2 else 1
inds = np.arange(n_images)
if random:
npr.shuffle(inds)
img_shape = (d*m*rows, n*cols)
if n_channels > 1:
img_shape = img_shape + (n_channels,)
img = np.zeros(img_shape, dtype=imagesetlist[0].dtype)
for ind in range(min(rows*cols, n_images)):
i,j = (ind / cols, ind % cols)
for k in xrange(d):
img[(d*i+k)*m:(d*i+k+1)*m, j*n:(j+1)*n] = \
imagesetlist[k][inds[ind],:].reshape(imshape)
ax = show(img, ax=ax, vlims=vlims)
if grid:
for i in xrange(1,rows):
ax.plot( [-0.5, img.shape[1]-0.5], (d*i*m-0.5)*np.ones(2), 'r-' )
for j in xrange(1,cols):
ax.plot( [j*n-0.5, j*n-0.5], [-0.5, img.shape[0]-0.5], 'r-')
ax.set_xlim([-0.5, img.shape[1]-0.5])
ax.set_ylim([-0.5, img.shape[0]-0.5])
ax.invert_yaxis()
def activations(acts, func, ax=None):
if ax is None:
ax = plt.gca()
N = acts.size
nbins = max(np.sqrt(N), 10)
# minact = np.min(acts)
# maxact = np.max(acts)
minact, maxact = (-2, 2)
ax.hist(acts.ravel(), bins=nbins, range=(minact,maxact), normed=True)
# barwidth = (maxact - minact) / float(nbins)
# leftout = np.sum(acts < minact) / float(N)
# rightout = np.sum(acts > maxact) / float(N)
# ax.bar([minact-barwidth, maxact], [leftout, rightout], width=barwidth)
x = np.linspace(minact, maxact, 101)
ax.plot(x, func(x))
ax.set_xlim([minact, maxact])
# ax.set_xlim([minact-barwidth, maxact+barwidth])
def filters(filters, ax=None, **kwargs):
std = filters.std()
tile(filters, ax=ax, vlims=(-2*std, 2*std), grid=True, **kwargs)
# class Imageset(object):
# """
# A container for a set of images, that facilitates common functions
# including batches and visualization.
# """
# # batchlen = 100
# figsize = (12,6)
# def __init__(self, images, shape, batchlen=100, vlims=(0,1)):
# """
# :images[nparray] Images, where each row is one (flattened) image
# :shape[tuple] Shape of an image (for unflattening)
# :vlims[tuple] Limits of image values (for display)
# """
# self.images = images
# self.shape = tuple(shape)
# self.batchlen = batchlen
# self.vlims = vlims
# self.num_examples = images.shape[0]
# self.npixels = images.shape[1]
# if self.npixels != shape[0]*shape[1]:
# raise Exception('Shape must match number of pixels in images')
# @property
# def nbatches(self):
# if self.num_examples % self.batchlen == 0:
# return self.num_examples/self.batchlen
# else:
# return None
# def todict(self):
# return {'images': self.images, 'shape': self.shape, 'vlims': self.vlims}
# @staticmethod
# def fromdict(d):
# return Imageset(**d)
# def tofile(self, filename):
# np.savez(filename, self.todict())
# @staticmethod
# def fromfile(filename):
# d = np.load(filename)['arr_0'].item()
# return Imageset.fromdict(d)
# def imageset_like(self, images, shape=None):
# return type(self)(images,
# shape=self.shape if shape is None else shape,
# vlims=self.vlims)
# def image(self, i):
# if i < 0 or i >= self.num_examples:
# raise Exception('Invalid image index')
# else:
# return self.images[i,:]
# def subset(self, imin, imax=None):
# if imax is None:
# return Imageset(self.images[0:imin], self.shape, vlims=self.vlims)
# else:
# return Imageset(self.images[imin:imax], self.shape, vlims=self.vlims)
# @property
# def batchshape(self):
# if self.nbatches is None:
# raise Exception('Examples cannot be evenly divided into batches')
# else:
# return (self.batchlen, self.npixels)
# def batch(self, i):
# if self.nbatches is None:
# raise Exception('Examples cannot be evenly divided into batches')
# elif i < 0 or i >= self.nbatches:
# raise Exception('Invalid batch index')
# else:
# return self.images[i*self.batchlen:(i+1)*self.batchlen,:]
# def show(self, ind, fignum=None):
# figure(fignum=fignum, figsize=self.figsize)
# show(plt.gca(), self.image(ind).reshape(self.shape), vlims=self.vlims)
# plt.tight_layout()
# plt.draw()
# def tile(self, fignum=None, rows=16, cols=24, grid=False, random=False):
# figure(fignum=fignum, figsize=self.figsize)
# tile(plt.gca(), self.images, imshape=self.shape, vlims=self.vlims,
# rows=rows, cols=cols, grid=grid, random=random)
# plt.tight_layout()
# plt.draw()
# def compare(self, compim, fignum=None, **kwargs):
# figure(fignum=fignum, figsize=self.figsize)
# compare(plt.gca(), [self.images, compim.images], imshape=self.shape,
# vlims=self.vlims, **kwargs)
# plt.tight_layout()
# plt.draw()
# def rmse(self, comim):
# return np.mean(np.sqrt(np.mean((self.images - comim.images)**2, axis=1)))
|
mit
|
andyraib/data-storage
|
python_scripts/env/lib/python3.6/site-packages/pandas/tests/types/test_dtypes.py
|
7
|
13569
|
# -*- coding: utf-8 -*-
from itertools import product
import nose
import numpy as np
import pandas as pd
from pandas import Series, Categorical, date_range
from pandas.types.dtypes import DatetimeTZDtype, PeriodDtype, CategoricalDtype
from pandas.types.common import (is_categorical_dtype, is_categorical,
is_datetime64tz_dtype, is_datetimetz,
is_period_dtype, is_period,
is_dtype_equal, is_datetime64_ns_dtype,
is_datetime64_dtype, is_string_dtype,
_coerce_to_dtype)
import pandas.util.testing as tm
_multiprocess_can_split_ = True
class Base(object):
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
self.assertRaises(self.dtype == 'foo')
self.assertFalse(is_dtype_equal(self.dtype, np.int64))
def test_numpy_informed(self):
# np.dtype doesn't know about our new dtype
def f():
np.dtype(self.dtype)
self.assertRaises(TypeError, f)
self.assertNotEqual(self.dtype, np.str_)
self.assertNotEqual(np.str_, self.dtype)
def test_pickle(self):
result = self.round_trip_pickle(self.dtype)
self.assertEqual(result, self.dtype)
class TestCategoricalDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = CategoricalDtype()
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = CategoricalDtype()
self.assertTrue(dtype == dtype2)
self.assertTrue(dtype2 == dtype)
self.assertTrue(dtype is dtype2)
self.assertTrue(dtype2 is dtype)
self.assertTrue(hash(dtype) == hash(dtype2))
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype, 'category'))
self.assertTrue(is_dtype_equal(self.dtype, CategoricalDtype()))
self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
self.assertTrue(is_dtype_equal(self.dtype, result))
self.assertRaises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_is_dtype(self):
self.assertTrue(CategoricalDtype.is_dtype(self.dtype))
self.assertTrue(CategoricalDtype.is_dtype('category'))
self.assertTrue(CategoricalDtype.is_dtype(CategoricalDtype()))
self.assertFalse(CategoricalDtype.is_dtype('foo'))
self.assertFalse(CategoricalDtype.is_dtype(np.float64))
def test_basic(self):
self.assertTrue(is_categorical_dtype(self.dtype))
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
self.assertTrue(is_categorical_dtype(s.dtype))
self.assertTrue(is_categorical_dtype(s))
self.assertFalse(is_categorical_dtype(np.dtype('float64')))
self.assertTrue(is_categorical(s.dtype))
self.assertTrue(is_categorical(s))
self.assertFalse(is_categorical(np.dtype('float64')))
self.assertFalse(is_categorical(1.0))
class TestDatetimeTZDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = DatetimeTZDtype('ns', 'US/Eastern')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
self.assertTrue(dtype == dtype2)
self.assertTrue(dtype2 == dtype)
self.assertTrue(dtype3 == dtype)
self.assertTrue(dtype is dtype2)
self.assertTrue(dtype2 is dtype)
self.assertTrue(dtype3 is dtype)
self.assertTrue(hash(dtype) == hash(dtype2))
self.assertTrue(hash(dtype) == hash(dtype3))
def test_construction(self):
self.assertRaises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype('datetime64[ns, CET]')
self.assertTrue(issubclass(type(a), type(a)))
self.assertTrue(issubclass(type(a), type(b)))
def test_coerce_to_dtype(self):
self.assertEqual(_coerce_to_dtype('datetime64[ns, US/Eastern]'),
DatetimeTZDtype('ns', 'US/Eastern'))
self.assertEqual(_coerce_to_dtype('datetime64[ns, Asia/Tokyo]'),
DatetimeTZDtype('ns', 'Asia/Tokyo'))
def test_compat(self):
self.assertFalse(is_datetime64_ns_dtype(self.dtype))
self.assertFalse(is_datetime64_ns_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(is_datetime64_dtype(self.dtype))
self.assertFalse(is_datetime64_dtype('datetime64[ns, US/Eastern]'))
def test_construction_from_string(self):
result = DatetimeTZDtype('datetime64[ns, US/Eastern]')
self.assertTrue(is_dtype_equal(self.dtype, result))
result = DatetimeTZDtype.construct_from_string(
'datetime64[ns, US/Eastern]')
self.assertTrue(is_dtype_equal(self.dtype, result))
self.assertRaises(TypeError,
lambda: DatetimeTZDtype.construct_from_string('foo'))
def test_is_dtype(self):
self.assertTrue(DatetimeTZDtype.is_dtype(self.dtype))
self.assertTrue(DatetimeTZDtype.is_dtype('datetime64[ns, US/Eastern]'))
self.assertFalse(DatetimeTZDtype.is_dtype('foo'))
self.assertTrue(DatetimeTZDtype.is_dtype(DatetimeTZDtype(
'ns', 'US/Pacific')))
self.assertFalse(DatetimeTZDtype.is_dtype(np.float64))
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype,
'datetime64[ns, US/Eastern]'))
self.assertTrue(is_dtype_equal(self.dtype, DatetimeTZDtype(
'ns', 'US/Eastern')))
self.assertFalse(is_dtype_equal(self.dtype, 'foo'))
self.assertFalse(is_dtype_equal(self.dtype, DatetimeTZDtype('ns',
'CET')))
self.assertFalse(is_dtype_equal(
DatetimeTZDtype('ns', 'US/Eastern'), DatetimeTZDtype(
'ns', 'US/Pacific')))
# numpy compat
self.assertTrue(is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]"))
def test_basic(self):
self.assertTrue(is_datetime64tz_dtype(self.dtype))
dr = date_range('20130101', periods=3, tz='US/Eastern')
s = Series(dr, name='A')
# dtypes
self.assertTrue(is_datetime64tz_dtype(s.dtype))
self.assertTrue(is_datetime64tz_dtype(s))
self.assertFalse(is_datetime64tz_dtype(np.dtype('float64')))
self.assertFalse(is_datetime64tz_dtype(1.0))
self.assertTrue(is_datetimetz(s))
self.assertTrue(is_datetimetz(s.dtype))
self.assertFalse(is_datetimetz(np.dtype('float64')))
self.assertFalse(is_datetimetz(1.0))
def test_dst(self):
dr1 = date_range('2013-01-01', periods=3, tz='US/Eastern')
s1 = Series(dr1, name='A')
self.assertTrue(is_datetimetz(s1))
dr2 = date_range('2013-08-01', periods=3, tz='US/Eastern')
s2 = Series(dr2, name='A')
self.assertTrue(is_datetimetz(s2))
self.assertEqual(s1.dtype, s2.dtype)
def test_parser(self):
# pr #11245
for tz, constructor in product(('UTC', 'US/Eastern'),
('M8', 'datetime64')):
self.assertEqual(
DatetimeTZDtype('%s[ns, %s]' % (constructor, tz)),
DatetimeTZDtype('ns', tz),
)
def test_empty(self):
dt = DatetimeTZDtype()
with tm.assertRaises(AttributeError):
str(dt)
class TestPeriodDtype(Base, tm.TestCase):
def setUp(self):
self.dtype = PeriodDtype('D')
def test_construction(self):
with tm.assertRaises(ValueError):
PeriodDtype('xx')
for s in ['period[D]', 'Period[D]', 'D']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Day())
self.assertTrue(is_period_dtype(dt))
for s in ['period[3D]', 'Period[3D]', '3D']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Day(3))
self.assertTrue(is_period_dtype(dt))
for s in ['period[26H]', 'Period[26H]', '26H',
'period[1D2H]', 'Period[1D2H]', '1D2H']:
dt = PeriodDtype(s)
self.assertEqual(dt.freq, pd.tseries.offsets.Hour(26))
self.assertTrue(is_period_dtype(dt))
def test_subclass(self):
a = PeriodDtype('period[D]')
b = PeriodDtype('period[3D]')
self.assertTrue(issubclass(type(a), type(a)))
self.assertTrue(issubclass(type(a), type(b)))
def test_identity(self):
self.assertEqual(PeriodDtype('period[D]'),
PeriodDtype('period[D]'))
self.assertIs(PeriodDtype('period[D]'),
PeriodDtype('period[D]'))
self.assertEqual(PeriodDtype('period[3D]'),
PeriodDtype('period[3D]'))
self.assertIs(PeriodDtype('period[3D]'),
PeriodDtype('period[3D]'))
self.assertEqual(PeriodDtype('period[1S1U]'),
PeriodDtype('period[1000001U]'))
self.assertIs(PeriodDtype('period[1S1U]'),
PeriodDtype('period[1000001U]'))
def test_coerce_to_dtype(self):
self.assertEqual(_coerce_to_dtype('period[D]'),
PeriodDtype('period[D]'))
self.assertEqual(_coerce_to_dtype('period[3M]'),
PeriodDtype('period[3M]'))
def test_compat(self):
self.assertFalse(is_datetime64_ns_dtype(self.dtype))
self.assertFalse(is_datetime64_ns_dtype('period[D]'))
self.assertFalse(is_datetime64_dtype(self.dtype))
self.assertFalse(is_datetime64_dtype('period[D]'))
def test_construction_from_string(self):
result = PeriodDtype('period[D]')
self.assertTrue(is_dtype_equal(self.dtype, result))
result = PeriodDtype.construct_from_string('period[D]')
self.assertTrue(is_dtype_equal(self.dtype, result))
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('foo')
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('period[foo]')
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('foo[D]')
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns]')
with tm.assertRaises(TypeError):
PeriodDtype.construct_from_string('datetime64[ns, US/Eastern]')
def test_is_dtype(self):
self.assertTrue(PeriodDtype.is_dtype(self.dtype))
self.assertTrue(PeriodDtype.is_dtype('period[D]'))
self.assertTrue(PeriodDtype.is_dtype('period[3D]'))
self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('3D')))
self.assertTrue(PeriodDtype.is_dtype('period[U]'))
self.assertTrue(PeriodDtype.is_dtype('period[S]'))
self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('U')))
self.assertTrue(PeriodDtype.is_dtype(PeriodDtype('S')))
self.assertFalse(PeriodDtype.is_dtype('D'))
self.assertFalse(PeriodDtype.is_dtype('3D'))
self.assertFalse(PeriodDtype.is_dtype('U'))
self.assertFalse(PeriodDtype.is_dtype('S'))
self.assertFalse(PeriodDtype.is_dtype('foo'))
self.assertFalse(PeriodDtype.is_dtype(np.object_))
self.assertFalse(PeriodDtype.is_dtype(np.int64))
self.assertFalse(PeriodDtype.is_dtype(np.float64))
def test_equality(self):
self.assertTrue(is_dtype_equal(self.dtype, 'period[D]'))
self.assertTrue(is_dtype_equal(self.dtype, PeriodDtype('D')))
self.assertTrue(is_dtype_equal(self.dtype, PeriodDtype('D')))
self.assertTrue(is_dtype_equal(PeriodDtype('D'), PeriodDtype('D')))
self.assertFalse(is_dtype_equal(self.dtype, 'D'))
self.assertFalse(is_dtype_equal(PeriodDtype('D'), PeriodDtype('2D')))
def test_basic(self):
self.assertTrue(is_period_dtype(self.dtype))
pidx = pd.period_range('2013-01-01 09:00', periods=5, freq='H')
self.assertTrue(is_period_dtype(pidx.dtype))
self.assertTrue(is_period_dtype(pidx))
self.assertTrue(is_period(pidx))
s = Series(pidx, name='A')
# dtypes
# series results in object dtype currently,
# is_period checks period_arraylike
self.assertFalse(is_period_dtype(s.dtype))
self.assertFalse(is_period_dtype(s))
self.assertTrue(is_period(s))
self.assertFalse(is_period_dtype(np.dtype('float64')))
self.assertFalse(is_period_dtype(1.0))
self.assertFalse(is_period(np.dtype('float64')))
self.assertFalse(is_period(1.0))
def test_empty(self):
dt = PeriodDtype()
with tm.assertRaises(AttributeError):
str(dt)
def test_not_string(self):
# though PeriodDtype has object kind, it cannot be string
self.assertFalse(is_string_dtype(PeriodDtype('D')))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
apache-2.0
|
msincenselee/vnpy
|
vnpy/gateway/gj/gj_gateway.py
|
1
|
49000
|
# 国金交易客户端 + easytrader 接口
# 华富资产 李来佳 28888502
import os
import sys
import copy
import csv
import dbf
import traceback
import pandas as pd
from typing import Any, Dict, List
from datetime import datetime, timedelta
from time import sleep
from functools import lru_cache
from collections import OrderedDict
from multiprocessing.dummy import Pool
from threading import Thread
from vnpy.event import EventEngine
from vnpy.trader.event import EVENT_TIMER
from vnpy.trader.constant import (
Exchange,
Product,
Direction,
OrderType,
Status,
Offset,
Interval
)
from vnpy.trader.gateway import BaseGateway, LocalOrderManager
from vnpy.trader.object import (
BarData,
CancelRequest,
OrderRequest,
SubscribeRequest,
TickData,
ContractData,
OrderData,
TradeData,
PositionData,
AccountData,
HistoryRequest
)
from vnpy.trader.utility import get_folder_path, print_dict, extract_vt_symbol, get_stock_exchange, append_data
from vnpy.data.tdx.tdx_common import get_stock_type_sz, get_stock_type_sh
from vnpy.api.easytrader.remoteclient import use as easytrader_use
# 代码 <=> 中文名称
symbol_name_map: Dict[str, str] = {}
# 代码 <=> 交易所
symbol_exchange_map: Dict[str, Exchange] = {}
# 时间戳对齐
TIME_GAP = 8 * 60 * 60 * 1000000000
INTERVAL_VT2TQ = {
Interval.MINUTE: 60,
Interval.HOUR: 60 * 60,
Interval.DAILY: 60 * 60 * 24,
}
EXCHANGE_NAME2VT: Dict[str, Exchange] = {
"上交所A": Exchange.SSE,
"深交所A": Exchange.SZSE,
"上A": Exchange.SSE,
"深A": Exchange.SZSE
}
DIRECTION_STOCK_NAME2VT: Dict[str, Any] = {
"证券卖出": Direction.SHORT,
"证券买入": Direction.LONG,
"卖出": Direction.SHORT,
"买入": Direction.LONG,
"债券买入": Direction.LONG,
"债券卖出": Direction.SHORT,
"申购": Direction.LONG
}
def format_dict(d, dict_define):
"""根据dict格式定义进行value转换"""
for k in dict_define.keys():
# 原值
v = d.get(k, '')
# 目标转换格式
v_format = dict_define.get(k, None)
if v_format is None:
continue
if 'C' in v_format:
str_len = int(v_format.replace('C', ''))
new_v = '{}{}'.format(' ' * (str_len - len(v)), v)
d.update({k: new_v})
continue
elif "N" in v_format:
v_format = v_format.replace('N', '')
if '.' in v_format:
int_len, float_len = v_format.split('.')
int_len = int(int_len)
float_len = int(float_len)
str_v = str(v)
new_v = '{}{}'.format(' ' * (int_len - len(str_v)), str_v)
else:
int_len = int(v_format)
str_v = str(v)
new_v = '{}{}'.format(' ' * (int_len - len(str_v)), str_v)
d.update({k: new_v})
return d
ORDERTYPE_NAME2VT: Dict[str, OrderType] = {
"五档即成剩撤": OrderType.MARKET,
"五档即成剩转": OrderType.MARKET,
"即成剩撤": OrderType.MARKET,
"对手方最优": OrderType.MARKET,
"本方最优": OrderType.MARKET,
"限价单": OrderType.LIMIT,
}
STATUS_NAME2VT: Dict[str, Status] = {
"未报": Status.SUBMITTING,
"待报": Status.SUBMITTING,
"正报": Status.SUBMITTING,
"已报": Status.NOTTRADED,
"废单": Status.REJECTED,
"部成": Status.PARTTRADED,
"已成": Status.ALLTRADED,
"部撤": Status.CANCELLED,
"已撤": Status.CANCELLED,
"待撤": Status.CANCELLING,
"已报待撤": Status.CANCELLING,
"未审批": Status.UNKNOWN,
"审批拒绝": Status.UNKNOWN,
"未审批即撤销": Status.UNKNOWN,
}
STOCK_CONFIG_FILE = 'tdx_stock_config.pkb2'
from pytdx.hq import TdxHq_API
# 通达信股票行情
from vnpy.data.tdx.tdx_common import get_cache_config, get_tdx_market_code
from pytdx.config.hosts import hq_hosts
from pytdx.params import TDXParams
class GjGateway(BaseGateway):
"""国金证券gateway"""
default_setting: Dict[str, Any] = {
"资金账号": "",
"登录密码": "",
"RPC IP": "localhost",
"RPC Port": 1430
}
# 接口支持得交易所清单
exchanges: List[Exchange] = [Exchange.SSE, Exchange.SZSE]
def __init__(self, event_engine: EventEngine, gateway_name='GJ'):
"""构造函数"""
super().__init__(event_engine, gateway_name=gateway_name)
# tdx 基础股票数据+行情
self.md_api = TdxMdApi(self)
# easytrader交易接口
self.td_api = GjTdApi(self)
# 天勤行情
self.tq_api = None
# 通达信是否连接成功
self.tdx_connected = False # 通达信行情API得连接状态
def connect(self, setting: dict) -> None:
"""连接"""
userid = setting["资金账号"]
password = setting["登录密码"]
# 运行easytrader restful 服务端的IP地址、端口
host = setting["RPC IP"]
port = setting["RPC Port"]
self.md_api.connect()
self.td_api.connect(user_id=userid,
user_pwd=password,
host=host,
port=port)
self.tq_api = TqMdApi(self)
self.tq_api.connect()
self.init_query()
def close(self) -> None:
""""""
self.md_api.close()
self.td_api.close()
def subscribe(self, req: SubscribeRequest) -> None:
""""""
if self.tq_api and self.tq_api.is_connected:
self.tq_api.subscribe(req)
else:
self.md_api.subscribe(req)
def send_order(self, req: OrderRequest) -> str:
""""""
return self.td_api.send_order(req)
def cancel_order(self, req: CancelRequest) -> None:
""""""
return self.td_api.cancel_order(req)
def query_account(self) -> None:
""""""
self.td_api.query_account()
def query_position(self) -> None:
""""""
self.td_api.query_position()
def query_orders(self) -> None:
self.td_api.query_orders()
def query_trades(self) -> None:
self.td_api.query_trades()
def process_timer_event(self, event) -> None:
"""定时器"""
self.count += 1
# 8秒,不要太快
if self.count < 8:
return
self.count = 0
func = self.query_functions.pop(0)
func()
self.query_functions.append(func)
def init_query(self) -> None:
"""初始化查询"""
self.count = 0
self.query_functions = [self.query_account, self.query_position, self.query_orders, self.query_trades]
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def reset_query(self) -> None:
"""重置查询(在委托发单、撤单后),优先查询订单和交易"""
self.count = 0
self.query_functions = [self.query_orders, self.query_trades, self.query_account, self.query_position]
class TdxMdApi(object):
"""通达信行情和基础数据"""
def __init__(self, gateway: GjGateway):
""""""
super().__init__()
self.gateway: GjGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.connect_status: bool = False
self.login_status: bool = False
self.req_interval = 0.5 # 操作请求间隔500毫秒
self.req_id = 0 # 操作请求编号
self.connection_status = False # 连接状态
self.symbol_exchange_dict = {} # tdx合约与vn交易所的字典
self.symbol_market_dict = {} # tdx合约与tdx市场的字典
self.symbol_vn_dict = {} # tdx合约与vtSymbol的对应
self.symbol_tick_dict = {} # tdx合约与最后一个Tick得字典
self.registed_symbol_set = set()
self.config = get_cache_config(STOCK_CONFIG_FILE)
self.symbol_dict = self.config.get('symbol_dict', {})
self.cache_time = self.config.get('cache_time', datetime.now() - timedelta(days=7))
self.commission_dict = {}
self.contract_dict = {}
# self.queue = Queue() # 请求队列
self.pool = None # 线程池
# self.req_thread = None # 定时器线程
# copy.copy(hq_hosts)
self.ip_list = [{'ip': "180.153.18.170", 'port': 7709},
{'ip': "180.153.18.171", 'port': 7709},
{'ip': "180.153.18.172", 'port': 80},
{'ip': "202.108.253.130", 'port': 7709},
{'ip': "202.108.253.131", 'port': 7709},
{'ip': "202.108.253.139", 'port': 80},
{'ip': "60.191.117.167", 'port': 7709},
{'ip': "115.238.56.198", 'port': 7709},
{'ip': "218.75.126.9", 'port': 7709},
{'ip': "115.238.90.165", 'port': 7709},
{'ip': "124.160.88.183", 'port': 7709},
{'ip': "60.12.136.250", 'port': 7709},
{'ip': "218.108.98.244", 'port': 7709},
# {'ip': "218.108.47.69", 'port': 7709},
{'ip': "114.80.63.12", 'port': 7709},
{'ip': "114.80.63.35", 'port': 7709},
{'ip': "180.153.39.51", 'port': 7709},
# {'ip': '14.215.128.18', 'port': 7709},
# {'ip': '59.173.18.140', 'port': 7709}
]
self.best_ip = {'ip': None, 'port': None}
self.api_dict = {} # API 的连接会话对象字典
self.last_tick_dt = {} # 记录该会话对象的最后一个tick时间
self.security_count = 50000
# 股票code name列表
self.stock_codelist = None
def ping(self, ip, port=7709):
"""
ping行情服务器
:param ip:
:param port:
:param type_:
:return:
"""
apix = TdxHq_API()
__time1 = datetime.now()
try:
with apix.connect(ip, port):
if apix.get_security_count(TDXParams.MARKET_SZ) > 9000: # 0:深市 股票数量 = 9260
_timestamp = datetime.now() - __time1
self.gateway.write_log('服务器{}:{},耗时:{}'.format(ip, port, _timestamp))
return _timestamp
else:
self.gateway.write_log(u'该服务器IP {}无响应'.format(ip))
return timedelta(9, 9, 0)
except:
self.gateway.write_error(u'tdx ping服务器,异常的响应{}'.format(ip))
return timedelta(9, 9, 0)
def select_best_ip(self):
"""
选择行情服务器
:return:
"""
self.gateway.write_log(u'选择通达信股票行情服务器')
data_future = [self.ping(x.get('ip'), x.get('port')) for x in self.ip_list]
best_future_ip = self.ip_list[data_future.index(min(data_future))]
self.gateway.write_log(u'选取 {}:{}'.format(
best_future_ip['ip'], best_future_ip['port']))
return best_future_ip
def connect(self, n=3):
"""
连接通达讯行情服务器
:param n:
:return:
"""
if self.connection_status:
for api in self.api_dict:
if api is not None or getattr(api, "client", None) is not None:
self.gateway.write_log(u'当前已经连接,不需要重新连接')
return
self.gateway.write_log(u'开始通达信行情服务器')
if len(self.symbol_dict) == 0:
self.gateway.write_error(f'本地没有股票信息的缓存配置文件')
else:
self.cov_contracts()
# 选取最佳服务器
if self.best_ip['ip'] is None and self.best_ip['port'] is None:
self.best_ip = self.select_best_ip()
# 创建n个api连接对象实例
for i in range(n):
try:
api = TdxHq_API(heartbeat=True, auto_retry=True, raise_exception=True)
api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = api.get_security_count(TDXParams.MARKET_SZ)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'创建第{}个tdx连接'.format(i + 1))
self.api_dict[i] = api
self.last_tick_dt[i] = datetime.now()
self.connection_status = True
self.security_count = c
# if len(symbol_name_map) == 0:
# self.get_stock_list()
except Exception as ex:
self.gateway.write_error(u'连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
return
# 创建连接池,每个连接都调用run方法
self.pool = Pool(n)
self.pool.map_async(self.run, range(n))
# 设置上层的连接状态
self.gateway.tdxConnected = True
def reconnect(self, i):
"""
重连
:param i:
:return:
"""
try:
self.best_ip = self.select_best_ip()
api = TdxHq_API(heartbeat=True, auto_retry=True)
api.connect(self.best_ip['ip'], self.best_ip['port'])
# 尝试获取市场合约统计
c = api.get_security_count(TDXParams.MARKET_SZ)
if c is None or c < 10:
err_msg = u'该服务器IP {}/{}无响应'.format(self.best_ip['ip'], self.best_ip['port'])
self.gateway.write_error(err_msg)
else:
self.gateway.write_log(u'重新创建第{}个tdx连接'.format(i + 1))
self.api_dict[i] = api
sleep(1)
except Exception as ex:
self.gateway.write_error(u'重新连接服务器tdx[{}]异常:{},{}'.format(i, str(ex), traceback.format_exc()))
return
def close(self):
"""退出API"""
self.connection_status = False
# 设置上层的连接状态
self.gateway.tdxConnected = False
if self.pool is not None:
self.pool.close()
self.pool.join()
def subscribe(self, req):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
vn_symbol = str(req.symbol)
if '.' in vn_symbol:
vn_symbol = vn_symbol.split('.')[0]
self.gateway.write_log(u'通达信行情订阅 {}'.format(str(vn_symbol)))
tdx_symbol = vn_symbol # [0:-2] + 'L9'
tdx_symbol = tdx_symbol.upper()
self.gateway.write_log(u'{}=>{}'.format(vn_symbol, tdx_symbol))
self.symbol_vn_dict[tdx_symbol] = vn_symbol
if tdx_symbol not in self.registed_symbol_set:
self.registed_symbol_set.add(tdx_symbol)
# 查询股票信息
self.qry_instrument(vn_symbol)
self.check_status()
def check_status(self):
# self.gateway.write_log(u'检查tdx接口状态')
if len(self.registed_symbol_set) == 0:
return True
# 若还没有启动连接,就启动连接
over_time = [((datetime.now() - dt).total_seconds() > 60) for dt in self.last_tick_dt.values()]
if not self.connection_status or len(self.api_dict) == 0 or any(over_time):
self.gateway.write_log(u'tdx还没有启动连接,就启动连接')
self.close()
self.pool = None
self.api_dict = {}
pool_cout = getattr(self.gateway, 'tdx_pool_count', 3)
self.connect(pool_cout)
# self.gateway.write_log(u'tdx接口状态正常')
def qry_instrument(self, symbol):
"""
查询/更新股票信息
:return:
"""
if not self.connection_status:
return
api = self.api_dict.get(0)
if api is None:
self.gateway.write_log(u'取不到api连接,更新合约信息失败')
return
# TODO: 取得股票的中文名
market_code = get_tdx_market_code(symbol)
api.to_df(api.get_finance_info(market_code, symbol))
# 如果有预定的订阅合约,提前订阅
# if len(all_contacts) > 0:
# cur_folder = os.path.dirname(__file__)
# export_file = os.path.join(cur_folder,'contracts.csv')
# if not os.path.exists(export_file):
# df = pd.DataFrame(all_contacts)
# df.to_csv(export_file)
def cov_contracts(self):
"""转换本地缓存=》合约信息推送"""
for symbol_marketid, info in self.symbol_dict.items():
symbol, market_id = symbol_marketid.split('_')
exchange = info.get('exchange', '')
if len(exchange) == 0:
continue
vn_exchange_str = get_stock_exchange(symbol)
# 排除通达信的指数代码
if exchange != vn_exchange_str:
continue
exchange = Exchange(exchange)
if info['stock_type'] == 'stock_cn':
product = Product.EQUITY
elif info['stock_type'] in ['bond_cn', 'cb_cn']:
product = Product.BOND
elif info['stock_type'] == 'index_cn':
product = Product.INDEX
elif info['stock_type'] == 'etf_cn':
product = Product.ETF
else:
product = Product.EQUITY
volume_tick = info['volunit']
if symbol.startswith('688'):
volume_tick = 200
contract = ContractData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
name=info['name'],
product=product,
pricetick=round(0.1 ** info['decimal_point'], info['decimal_point']),
size=1,
min_volume=volume_tick,
margin_rate=1
)
if product != Product.INDEX:
# 缓存 合约 =》 中文名
symbol_name_map.update({contract.symbol: contract.name})
# 缓存代码和交易所的印射关系
symbol_exchange_map[contract.symbol] = contract.exchange
self.contract_dict.update({contract.symbol: contract})
self.contract_dict.update({contract.vt_symbol: contract})
# 推送
self.gateway.on_contract(contract)
def get_stock_list(self):
"""股票所有的code&name列表"""
api = self.api_dict.get(0)
if api is None:
self.gateway.write_log(u'取不到api连接,更新合约信息失败')
return None
self.gateway.write_log(f'查询所有的股票信息')
data = pd.concat(
[pd.concat([api.to_df(api.get_security_list(j, i * 1000)).assign(sse='sz' if j == 0 else 'sh').set_index(
['code', 'sse'], drop=False) for i in range(int(api.get_security_count(j) / 1000) + 1)], axis=0) for j
in range(2)], axis=0)
sz = data.query('sse=="sz"')
sh = data.query('sse=="sh"')
sz = sz.assign(sec=sz.code.apply(get_stock_type_sz))
sh = sh.assign(sec=sh.code.apply(get_stock_type_sh))
temp_df = pd.concat([sz, sh]).query('sec in ["stock_cn","etf_cn","bond_cn","cb_cn"]').sort_index().assign(
name=data['name'].apply(lambda x: str(x)[0:6]))
hq_codelist = temp_df.loc[:, ['code', 'name']].set_index(['code'], drop=False)
for i in range(0, len(temp_df)):
row = temp_df.iloc[i]
if row['sec'] == 'etf_cn':
product = Product.ETF
elif row['sec'] in ['bond_cn', 'cb_cn']:
product = Product.BOND
else:
product = Product.EQUITY
volume_tick = 100 if product != Product.BOND else 10
if row['code'].startswith('688'):
volume_tick = 200
contract = ContractData(
gateway_name=self.gateway_name,
symbol=row['code'],
exchange=Exchange.SSE if row['sse'] == 'sh' else Exchange.SZSE,
name=row['name'],
product=product,
pricetick=round(0.1 ** row['decimal_point'], row['decimal_point']),
size=1,
min_volume=volume_tick,
margin_rate=1
)
# 缓存 合约 =》 中文名
symbol_name_map.update({contract.symbol: contract.name})
# 缓存代码和交易所的印射关系
symbol_exchange_map[contract.symbol] = contract.exchange
self.contract_dict.update({contract.symbol: contract})
self.contract_dict.update({contract.vt_symbol: contract})
# 推送
self.gateway.on_contract(contract)
return hq_codelist
def run(self, i):
"""
版本1:Pool内得线程,持续运行,每个线程从queue中获取一个请求并处理
版本2:Pool内线程,从订阅合约集合中,取出符合自己下标 mode n = 0的合约,并发送请求
:param i:
:return:
"""
# 版本2:
try:
api_count = len(self.api_dict)
last_dt = datetime.now()
self.gateway.write_log(u'开始运行tdx[{}],{}'.format(i, last_dt))
while self.connection_status:
symbols = set()
for idx, tdx_symbol in enumerate(list(self.registed_symbol_set)):
# self.gateway.write_log(u'tdx[{}], api_count:{}, idx:{}, tdx_symbol:{}'.format(i, api_count, idx, tdx_symbol))
if idx % api_count == i:
try:
symbols.add(tdx_symbol)
self.processReq(tdx_symbol, i)
except BrokenPipeError as bex:
self.gateway.write_error(u'BrokenPipeError{},重试重连tdx[{}]'.format(str(bex), i))
self.reconnect(i)
sleep(5)
break
except Exception as ex:
self.gateway.write_error(
u'tdx[{}] exception:{},{}'.format(i, str(ex), traceback.format_exc()))
# api = self.api_dict.get(i,None)
# if api is None or getattr(api,'client') is None:
self.gateway.write_error(u'重试重连tdx[{}]'.format(i))
print(u'重试重连tdx[{}]'.format(i), file=sys.stderr)
self.reconnect(i)
# self.gateway.write_log(u'tdx[{}] sleep'.format(i))
sleep(self.req_interval)
dt = datetime.now()
if last_dt.minute != dt.minute:
self.gateway.write_log('tdx[{}] check point. {}, process symbols:{}'.format(i, dt, symbols))
last_dt = dt
except Exception as ex:
self.gateway.write_error(u'tdx[{}] pool.run exception:{},{}'.format(i, str(ex), traceback.format_exc()))
self.gateway.write_error(u'tdx[{}] {}退出'.format(i, datetime.now()))
def processReq(self, req, i):
"""
处理行情信息ticker请求
:param req:
:param i:
:return:
"""
symbol = req
if '.' in symbol:
symbol, exchange = symbol.split('.')
if exchange == 'SZSE':
market_code = 0
else:
market_code = 1
else:
market_code = get_tdx_market_code(symbol)
exchange = get_stock_exchange(symbol)
exchange = Exchange(exchange)
api = self.api_dict.get(i, None)
if api is None:
self.gateway.write_log(u'tdx[{}] Api is None'.format(i))
raise Exception(u'tdx[{}] Api is None'.format(i))
symbol_config = self.symbol_dict.get('{}_{}'.format(symbol, market_code), {})
decimal_point = symbol_config.get('decimal_point', 2)
# self.gateway.write_log(u'tdx[{}] get_instrument_quote:({},{})'.format(i,self.symbol_market_dict.get(symbol),symbol))
rt_list = api.get_security_quotes([(market_code, symbol)])
if rt_list is None or len(rt_list) == 0:
self.gateway.write_log(u'tdx[{}]: rt_list为空'.format(i))
return
# else:
# self.gateway.write_log(u'tdx[{}]: rt_list数据:{}'.format(i, rt_list))
if i in self.last_tick_dt:
self.last_tick_dt[i] = datetime.now()
# <class 'list'>: [OrderedDict([
# ('market', 0),
# ('code', '000001'),
# ('active1', 1385),
# ('price', 13.79),
# ('last_close', 13.69),
# ('open', 13.65), ('high', 13.81), ('low', 13.56),
# ('reversed_bytes0', 10449822), ('reversed_bytes1', -1379),
# ('vol', 193996), ('cur_vol', 96),
# ('amount', 264540864.0),
# ('s_vol', 101450),
# ('b_vol', 92546),
# ('reversed_bytes2', 0), ('reversed_bytes3', 17185),
# ('bid1', 13.79), ('ask1', 13.8), ('bid_vol1', 877), ('ask_vol1', 196),
# ('bid2', 13.78), ('ask2', 13.81), ('bid_vol2', 2586), ('ask_vol2', 1115),
# ('bid3', 13.77), ('ask3', 13.82), ('bid_vol3', 1562), ('ask_vol3', 807),
# ('bid4', 13.76), ('ask4', 13.83), ('bid_vol4', 211), ('ask_vol4', 711),
# ('bid5', 13.75), ('ask5', 13.84), ('bid_vol5', 1931), ('ask_vol5', 1084),
# ('reversed_bytes4', (385,)), ('reversed_bytes5', 1), ('reversed_bytes6', -41), ('reversed_bytes7', -29), ('reversed_bytes8', 1), ('reversed_bytes9', 0.88),
# ('active2', 1385)])]
dt = datetime.now()
for d in list(rt_list):
# 忽略成交量为0的无效单合约tick数据
if d.get('cur_vol', 0) <= 0:
# self.gateway.write_log(u'忽略成交量为0的无效单合约tick数据:')
continue
code = d.get('code', None)
if symbol != code and code is not None:
self.gateway.write_log(u'忽略合约{} {} 不一致的tick数据:{}'.format(symbol, d.get('code'), rt_list))
continue
tick = TickData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
datetime=dt,
date=dt.strftime('%Y-%m-%d'),
time=dt.strftime('%H:%M:%S')
)
if decimal_point > 2:
tick.pre_close = round(d.get('last_close') / (10 ** (decimal_point - 2)), decimal_point)
tick.high_price = round(d.get('high') / (10 ** (decimal_point - 2)), decimal_point)
tick.open_price = round(d.get('open') / (10 ** (decimal_point - 2)), decimal_point)
tick.low_price = round(d.get('low') / (10 ** (decimal_point - 2)), decimal_point)
tick.last_price = round(d.get('price') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_price_1 = round(d.get('bid1') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_1 = d.get('bid_vol1')
tick.ask_price_1 = round(d.get('ask1') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_1 = d.get('ask_vol1')
if d.get('bid5'):
tick.bid_price_2 = round(d.get('bid2') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_2 = d.get('bid_vol2')
tick.ask_price_2 = round(d.get('ask2') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_2 = d.get('ask_vol2')
tick.bid_price_3 = round(d.get('bid3') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_3 = d.get('bid_vol3')
tick.ask_price_3 = round(d.get('ask3') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_3 = d.get('ask_vol3')
tick.bid_price_4 = round(d.get('bid4') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_4 = d.get('bid_vol4')
tick.ask_price_4 = round(d.get('ask4') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_4 = d.get('ask_vol4')
tick.bid_price_5 = round(d.get('bid5') / (10 ** (decimal_point - 2)), decimal_point)
tick.bid_volume_5 = d.get('bid_vol5')
tick.ask_price_5 = round(d.get('ask5') / (10 ** (decimal_point - 2)), decimal_point)
tick.ask_volume_5 = d.get('ask_vol5')
else:
tick.pre_close = d.get('last_close')
tick.high_price = d.get('high')
tick.open_price = d.get('open')
tick.low_price = d.get('low')
tick.last_price = d.get('price')
tick.bid_price_1 = d.get('bid1')
tick.bid_volume_1 = d.get('bid_vol1')
tick.ask_price_1 = d.get('ask1')
tick.ask_volume_1 = d.get('ask_vol1')
if d.get('bid5'):
tick.bid_price_2 = d.get('bid2')
tick.bid_volume_2 = d.get('bid_vol2')
tick.ask_price_2 = d.get('ask2')
tick.ask_volume_2 = d.get('ask_vol2')
tick.bid_price_3 = d.get('bid3')
tick.bid_volume_3 = d.get('bid_vol3')
tick.ask_price_3 = d.get('ask3')
tick.ask_volume_3 = d.get('ask_vol3')
tick.bid_price_4 = d.get('bid4')
tick.bid_volume_4 = d.get('bid_vol4')
tick.ask_price_4 = d.get('ask4')
tick.ask_volume_4 = d.get('ask_vol4')
tick.bid_price_5 = d.get('bid5')
tick.bid_volume_5 = d.get('bid_vol5')
tick.ask_price_5 = d.get('ask5')
tick.ask_volume_5 = d.get('ask_vol5')
tick.volume = d.get('vol', 0)
tick.open_interest = d.get('amount', 0)
# 修正毫秒
last_tick = self.symbol_tick_dict.get(symbol, None)
if (last_tick is not None) and tick.datetime.replace(microsecond=0) == last_tick.datetime:
# 与上一个tick的时间(去除毫秒后)相同,修改为500毫秒
tick.datetime = tick.datetime.replace(microsecond=500)
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
else:
tick.datetime = tick.datetime.replace(microsecond=0)
tick.time = tick.datetime.strftime('%H:%M:%S.%f')[0:12]
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.trading_day = tick.datetime.strftime('%Y-%m-%d')
# 指数没有涨停和跌停,就用昨日收盘价正负10%
tick.limit_up = tick.pre_close * 1.1
tick.limit_down = tick.pre_close * 0.9
# 排除非交易时间得tick
if tick.datetime.hour not in [9, 10, 11, 13, 14, 15]:
return
elif tick.datetime.hour == 9 and tick.datetime.minute <= 25:
return
elif tick.datetime.hour == 15 and tick.datetime.minute >= 0:
return
self.symbol_tick_dict[symbol] = tick
self.gateway.on_tick(tick)
class GjTdApi(object):
"""国金证券的easytrader交易接口"""
def __init__(self, gateway: GjGateway):
""""""
super().__init__()
self.gateway: GjGateway = gateway
self.gateway_name: str = gateway.gateway_name
self.userid: str = "" # 资金账号
self.password: str = "" # 登录密码
self.host: str = "127.0.0.1"
self.port: int = 1430
# easytrader 对应的API
self.api = None
# 缓存了当前交易日
self.trading_day = datetime.now().strftime('%Y-%m-%d') # 格式 2020-01-13
self.trading_date = self.trading_day.replace('-', '') # 格式 20200113
self.connect_status: bool = False
self.login_status: bool = False
# 所有交易
self.trades = {} # tradeid: trade
# 本gateway的委托
self.orders = {} # sys_orderid: order
def close(self):
self.api = None
def connect(self, user_id, user_pwd, host, port):
"""连接"""
self.userid = user_id
self.password = user_pwd
self.rpc_host = host
self.rpc_port = port
# 创建 easy客户端
self.api = easytrader_use(broker='gj_client', host=self.rpc_host, port=self.rpc_port)
# 输入参数(资金账号、密码)
self.api.prepare(user=self.userid, password=self.password)
self.login_status = True
def query_account(self):
"""获取资金账号信息"""
if not self.api:
return
data = self.api.balance
if not isinstance(data, dict):
return
if '总资产' not in data:
return
account = AccountData(
gateway_name=self.gateway_name,
accountid=self.userid,
balance=float(data["总资产"]),
frozen=float(data["总资产"]) - float(data["资金余额"]),
currency="人民币",
trading_day=self.trading_day
)
self.gateway.on_account(account)
def query_position(self):
"""获取持仓信息"""
if not self.api:
return
for data in self.api.position:
if not isinstance(data, dict):
continue
symbol = data.get("证券代码", None)
if not symbol:
continue
# symbol => Exchange
exchange = symbol_exchange_map.get(symbol, None)
if not exchange:
exchange_str = get_stock_exchange(code=symbol)
if len(exchange_str) > 0:
exchange = Exchange(exchange_str)
symbol_exchange_map.update({symbol: exchange})
name = symbol_name_map.get(symbol, None)
if not name:
name = data["证券名称"]
symbol_name_map.update({symbol: name})
position = PositionData(
gateway_name=self.gateway_name,
accountid=self.userid,
symbol=symbol,
exchange=exchange,
direction=Direction.NET,
name=name,
volume=int(data["股票余额"]),
yd_volume=int(data["可用余额"]),
price=float(data["参考成本价"]),
cur_price=float(data["市价"]),
pnl=float(data["参考盈亏"]),
holder_id=data["股东帐户"]
)
self.gateway.on_position(position)
def query_orders(self):
"""获取所有委托"""
if not self.api:
return
for data in self.api.today_entrusts:
if not isinstance(data, dict):
continue
sys_orderid = str(data.get("合同编号", ''))
if not sys_orderid:
continue
# 检查是否存在本地缓存中
order = self.orders.get(sys_orderid, None)
order_date = data["委托日期"] # 20170313
order_time = data["委托时间"] # '09:40:30'
order_status = STATUS_NAME2VT.get(data["备注"])
if order:
if order_status == order.status and order.traded == float(data["成交数量"]):
continue
order.status = order_status
order.traded = float(data["成交数量"])
# 委托单不存在本地映射库
else:
# 不处理以下状态
# if order_status in [Status.SUBMITTING, Status.REJECTED, Status.CANCELLED, Status.CANCELLING]:
# continue
order_dt = datetime.strptime(f'{order_date} {order_time}', "%Y%m%d %H:%M:%S")
direction = DIRECTION_STOCK_NAME2VT.get(data["操作"])
symbol = data.get("证券代码")
if not symbol:
continue
exchange = Exchange(get_stock_exchange(symbol))
if not exchange:
continue
if direction is None:
direction = Direction.NET
order = OrderData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
orderid=sys_orderid,
sys_orderid=sys_orderid,
accountid=self.userid,
type=ORDERTYPE_NAME2VT.get(data.get("价格类型"), OrderType.LIMIT),
direction=direction,
offset=Offset.NONE,
price=float(data["委托价格"]),
volume=float(data["委托数量"]),
traded=float(data["成交数量"]),
status=order_status,
datetime=order_dt,
time=order_dt.strftime('%H:%M:%S')
)
# 直接发出订单更新事件
self.gateway.write_log(f'账号订单查询,新增:{order.__dict__}')
self.orders[order.orderid] = order
self.gateway.on_order(copy.deepcopy(order))
continue
def query_trades(self):
"""获取所有成交"""
if not self.api:
return
for data in self.api.today_trades:
if not isinstance(data, dict):
continue
sys_orderid = str(data.get("合同编号", ""))
sys_tradeid = str(data.get("成交编号", ""))
if not sys_orderid:
continue
# 检查是否存在本地trades缓存中
trade = self.trades.get(sys_tradeid, None)
order = self.orders.get(sys_orderid, None)
# 如果交易不再本地映射关系
if trade is None and order is None:
trade_date = self.trading_day
trade_time = data["成交时间"]
trade_dt = datetime.strptime(f'{trade_date} {trade_time}', "%Y-%m-%d %H:%M:%S")
symbol = data.get('证券代码')
exchange = Exchange(get_stock_exchange(symbol))
trade = TradeData(
gateway_name=self.gateway_name,
symbol=symbol,
exchange=exchange,
orderid=sys_tradeid,
tradeid=sys_tradeid,
sys_orderid=sys_orderid,
accountid=self.userid,
direction=DIRECTION_STOCK_NAME2VT.get(data["操作"]),
offset=Offset.NONE,
price=float(data["成交均价"]),
volume=float(data["成交数量"]),
datetime=trade_dt,
time=trade_dt.strftime('%H:%M:%S'),
trade_amount=float(data["成交金额"]),
commission=0
)
self.trades[sys_tradeid] = trade
self.gateway.on_trade(copy.copy(trade))
continue
def send_order(self, req: OrderRequest):
"""委托发单"""
self.gateway.write_log(f'委托发单:{req.__dict__}')
if req.direction == Direction.LONG:
ret = self.api.buy(req.symbol, price=req.price, amount=req.volume)
else:
ret = self.api.sell(req.symbol, price=req.price, amount=req.volume)
if isinstance(ret, dict) and 'entrust_no' in ret:
sys_orderid = str(ret['entrust_no'])
# req => order
order = req.create_order_data(orderid=sys_orderid, gateway_name=self.gateway_name)
order.offset = Offset.NONE
order.sys_orderid = sys_orderid
order.accountid = self.userid
# 设置状态为提交中
order.status = Status.SUBMITTING
# 重置查询
self.gateway.reset_query()
# 登记并发送on_order事件
self.gateway.write_log(f'send_order,提交easytrader委托:{order.__dict__}')
self.orders[sys_orderid] = order
self.gateway.on_order(order)
return order.vt_orderid
else:
self.gateway.write_error('返回异常:{ret}')
return ""
def cancel_order(self, req: CancelRequest):
"""
撤单
:param req:
:return:
"""
self.gateway.write_log(f'委托撤单:{req.__dict__}')
if not self.api:
return False
# 获取订单
order = self.orders.get(req.orderid, None)
# 订单不存在
if order is None:
self.gateway.write_log(f'订单{req.orderid}不存在, 撤单失败')
return False
# 或者已经全部成交,已经被拒单,已经撤单
if order.status in [Status.ALLTRADED, Status.REJECTED, Status.CANCELLING,
Status.CANCELLED]:
self.gateway.write_log(f'订单{req.orderid}存在, 状态为:{order.status}, 不能再撤单')
return False
ret = self.api.cancel_entrust(order.sys_orderid)
if '已成功' in ret.get('message', ''):
# 重置查询
self.gateway.reset_query()
return True
else:
self.gateway.write_error('委托撤单失败:{}'.format(ret.get('message')))
return False
def cancel_all(self):
"""
全撤单
:return:
"""
self.gateway.write_log(f'全撤单')
if not self.api:
return
for order in self.orders.values():
if order.status in [Status.SUBMITTING, Status.NOTTRADED, Status.PARTTRADED, Status.UNKNOWN] \
and order.sys_orderid:
ret = self.api.cancel_entrust(order.sys_orderid)
class TqMdApi():
"""天勤行情API"""
def __init__(self, gateway):
""""""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.api = None
self.is_connected = False
self.subscribe_array = []
# 行情对象列表
self.quote_objs = []
# 数据更新线程
self.update_thread = None
# 所有的合约
self.all_instruments = []
self.ticks = {}
def connect(self, setting={}):
""""""
if self.api and self.is_connected:
self.gateway.write_log(f'天勤行情已经接入,无需重新连接')
return
try:
from tqsdk import TqApi
self.api = TqApi(_stock=True, url="wss://api.shinnytech.com/t/nfmd/front/mobile")
except Exception as e:
self.gateway.write_log(f'天勤股票行情API接入异常:'.format(str(e)))
self.gateway.write_log(traceback.format_exc())
if self.api:
self.is_connected = True
self.gateway.write_log(f'天勤股票行情API已连接')
self.update_thread = Thread(target=self.update)
self.update_thread.start()
def generate_tick_from_quote(self, vt_symbol, quote) -> TickData:
"""
生成TickData
"""
# 清洗 nan
quote = {k: 0 if v != v else v for k, v in quote.items()}
symbol, exchange = extract_vt_symbol(vt_symbol)
return TickData(
symbol=symbol,
exchange=exchange,
datetime=datetime.strptime(quote["datetime"], "%Y-%m-%d %H:%M:%S.%f"),
name=symbol,
volume=quote["volume"],
open_interest=quote["open_interest"],
last_price=quote["last_price"],
limit_up=quote["upper_limit"],
limit_down=quote["lower_limit"],
open_price=quote["open"],
high_price=quote["highest"],
low_price=quote["lowest"],
pre_close=quote["pre_close"],
bid_price_1=quote["bid_price1"],
bid_price_2=quote["bid_price2"],
bid_price_3=quote["bid_price3"],
bid_price_4=quote["bid_price4"],
bid_price_5=quote["bid_price5"],
ask_price_1=quote["ask_price1"],
ask_price_2=quote["ask_price2"],
ask_price_3=quote["ask_price3"],
ask_price_4=quote["ask_price4"],
ask_price_5=quote["ask_price5"],
bid_volume_1=quote["bid_volume1"],
bid_volume_2=quote["bid_volume2"],
bid_volume_3=quote["bid_volume3"],
bid_volume_4=quote["bid_volume4"],
bid_volume_5=quote["bid_volume5"],
ask_volume_1=quote["ask_volume1"],
ask_volume_2=quote["ask_volume2"],
ask_volume_3=quote["ask_volume3"],
ask_volume_4=quote["ask_volume4"],
ask_volume_5=quote["ask_volume5"],
gateway_name=self.gateway_name
)
def update(self) -> None:
"""
更新行情/委托/账户/持仓
"""
while self.api.wait_update():
# 更新行情信息
for vt_symbol, quote in self.quote_objs:
if self.api.is_changing(quote):
tick = self.generate_tick_from_quote(vt_symbol, quote)
tick and self.gateway.on_tick(tick) and self.gateway.on_custom_tick(tick)
def subscribe(self, req: SubscribeRequest) -> None:
"""
订阅行情
"""
if req.vt_symbol not in self.subscribe_array:
symbol, exchange = extract_vt_symbol(req.vt_symbol)
try:
quote = self.api.get_quote(f'{exchange.value}.{symbol}')
self.quote_objs.append((req.vt_symbol, quote))
self.subscribe_array.append(req.vt_symbol)
except Exception as ex:
self.gateway.write_log('订阅天勤行情异常:{}'.format(str(ex)))
def query_history(self, req: HistoryRequest) -> List[BarData]:
"""
获取历史数据
"""
symbol = req.symbol
exchange = req.exchange
interval = req.interval
start = req.start
end = req.end
# 天勤需要的数据
tq_symbol = f'{exchange.value}.{symbol}'
tq_interval = INTERVAL_VT2TQ.get(interval)
end += timedelta(1)
total_days = end - start
# 一次最多只能下载 8964 根Bar
min_length = min(8964, total_days.days * 500)
df = self.api.get_kline_serial(tq_symbol, tq_interval, min_length).sort_values(
by=["datetime"]
)
# 时间戳对齐
df["datetime"] = pd.to_datetime(df["datetime"] + TIME_GAP)
# 过滤开始结束时间
df = df[(df["datetime"] >= start - timedelta(days=1)) & (df["datetime"] < end)]
data: List[BarData] = []
if df is not None:
for ix, row in df.iterrows():
bar = BarData(
symbol=symbol,
exchange=exchange,
interval=interval,
datetime=row["datetime"].to_pydatetime(),
open_price=row["open"],
high_price=row["high"],
low_price=row["low"],
close_price=row["close"],
volume=row["volume"],
open_interest=row.get("close_oi", 0),
gateway_name=self.gateway_name,
)
data.append(bar)
return data
def close(self) -> None:
""""""
try:
if self.api and self.api.wait_update():
self.api.close()
self.is_connected = False
if self.update_thread:
self.update_thread.join()
except Exception as e:
self.gateway.write_log('退出天勤行情api异常:{}'.format(str(e)))
|
mit
|
mesowx/MesoPy
|
examples/Examples Source Code/Map_Plot.py
|
3
|
2717
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 09:54:26 2015
@author: joshclark
This script demonstrates using cartopy to view data obtained from MesoPy on a map
It uses some boilerplate from the cartopy example project for mapquest tiles
"""
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
from MesoPy import Meso
# import pprint
def main():
# This is just useful for formatting returned dict
# pp = pprint.PrettyPrinter(indent=2)
# Create instance of Meso object, pass in YOUR token
m = Meso(token='YOUR TOKEN')
# Use to lookup stations, could specify counties or whatever here
# findstationids = m.station_list(state='CO')
# print(findstationids)
# Grab most recent temp (F) ob in last 90 min at each of the below stations
stations = ['kgxy, kccu, kcos, kden, kgjt, kbdu, kpub, klhx, kspd, kdro, ksbs, keeo, kguc, klic, '
'kstk, kals, ktad']
latest = m.latest(stid=stations, within='90', vars='air_temp', units='temp|F')
# create a list to store everything, iterate over the number of objs returned in latest and append
# lat, long, temp, and stid for use later
data = []
[data.append((float(ob['LATITUDE']), float(ob['LONGITUDE']), float(ob['OBSERVATIONS']['air_temp_value_1']['value']),
ob['STID'])) for ob in latest['STATION']]
print(data)
# Create a MapQuest open aerial instance.
map_quest_aerial = cimgt.MapQuestOpenAerial()
# Create a GeoAxes in the tile's projection.
ax = plt.axes(projection=map_quest_aerial.crs)
# Limit the extent of the map to Colorado's borders
ax.set_extent([-102.03, -109.03, 37, 41])
# Add the MapQuest data at zoom level 8.
ax.add_image(map_quest_aerial, 8)
# Plot lat/long pts with below params
for lat, lon, temp, stid in data:
plt.plot(lon, lat, marker='o', color='y', markersize=1,
alpha=0.7, transform=ccrs.Geodetic())
# Transforms for the text func we're about to call
geodetic_transform = ccrs.Geodetic()._as_mpl_transform(ax)
text_transform = offset_copy(geodetic_transform, units='dots', x=0, y=0)
# Plot temp and station id for each of the markers
for lat, lon, temp, stid in data:
plt.text(lon, lat, stid + '\n' + str(round(temp, 1)) + u' \N{DEGREE SIGN}' + 'F',
verticalalignment='center', horizontalalignment='center',
transform=text_transform, fontsize=9,
bbox=dict(facecolor='wheat', alpha=0.5, boxstyle='round'))
plt.title('Current Weather Around Colorado')
plt.show()
if __name__ == '__main__':
main()
|
mit
|
mauimuc/gptt
|
src/fig_kernel_pri.py
|
1
|
2456
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Stefan Mauerberger"
__copyright__ = "Copyright (C) 2017 Stefan Mauerberger"
__license__ = "GPLv3"
''' Save a plot of the prior covariance kernel as PGF file '''
import numpy as np
from matplotlib import pyplot as plt
from gptt import dt_latlon, gauss_kernel
from plotting import rcParams, prepare_map, cmap_sd
from ConfigParser import ConfigParser
from reference import dt_obs
from gptt import read_station_file, ListPairs
plt.rcParams.update(rcParams)
# Read parameter file
config = ConfigParser()
with open('../par/example.ini') as fh:
config.readfp(fh)
# Kernel Parameters
tau = config.getfloat('Prior', 'tau') # A priori uncertainty; standard deviation
ell = config.getfloat('Prior', 'ell') # Characteristic length
# Read station coordinates
station_file = config.get('Observations', 'station_file')
all_stations = read_station_file(station_file)
# Read pseudo data
data_file = config.get('Observations', 'data')
pseudo_data = np.genfromtxt(data_file, dtype=dt_obs)
# Observations
pairs = ListPairs(pseudo_data, all_stations)
# Only those stations occurring in the data
stations = pairs.stations
# Prepare map
fig = plt.figure()
ax_map = fig.add_subplot(111)
m = prepare_map(ax=ax_map)
# Add axes for the colorbar
bbox = ax_map.get_position()
ax_cbr = fig.add_axes( (bbox.x0, bbox.y0 - 0.06, bbox.width, 0.04) )
# Plot station locations
m.scatter(stations['lon'], stations['lat'], lw=0, color='g', latlon=True, zorder=10)
# Find stations which are the furtherest apart
pair = max(pairs, key=lambda pair: pair.central_angle)
# Middle point of the great circle path
p = pair.great_circle_path[12]
# Make a lat, lon grid round the middle point
N = 100j
lllat = p['lat'] - 1
urlat = p['lat'] + 1
lllon = p['lon'] - 1.5
urlon = p['lon'] + 1.5
grid = np.rec.fromarrays(np.mgrid[lllat:urlat:N, lllon:urlon:N], dtype=dt_latlon)
# Calculate kernel at the middle point
K = gauss_kernel(p, grid, tau=tau, ell=ell)
K = np.ma.masked_less(K, K.max()/50)
# Plot correlation kernel; pcolor needs points in between
lat, lon = np.mgrid[lllat:urlat:N+1j, lllon:urlon:N+1j]
pcol = m.pcolormesh(lon, lat, K, latlon=True, cmap=cmap_sd, rasterized=True, \
vmin=0, vmax=K.max(), zorder=1)
# Make colorbar
cbar = plt.colorbar(pcol, cax=ax_cbr, orientation='horizontal')
cbar.set_label(r'$m^2 s^{-2}$')
cbar.solids.set_edgecolor("face")
plt.savefig('../fig_kernel_pri.pgf')
|
gpl-3.0
|
DSLituiev/scikit-learn
|
examples/linear_model/plot_ols.py
|
104
|
1936
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
chrisburr/scikit-learn
|
examples/cluster/plot_kmeans_assumptions.py
|
270
|
2040
|
"""
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.