repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tarthy6/dozer-thesis | py/plot.py | 3 | 43753 | # encoding: utf-8
# 2008 © Václav Šmilauer <[email protected]>
"""
Module containing utility functions for plotting inside woo. Most functionality is exposed through :obj:`woo.core.Plot`, however.
"""
## all exported names
__all__=['live','liveInterval','autozoom','legendAlpha','scientific','scatterMarkerKw']
import sys
PY3K=sys.version_info[0]==3
pilOk=False
try:
import PIL as Image
pilOk=True
except ImportError: pass
try:
import Image
pilOk=True
except ImportError: pass
if not pilOk: print 'WARN: PIL/Image module (python-imaging) not importable, embedding images into plots will give errors.'
# PY3K
if PY3K:
def _bytes(s): return bytes(s,'ascii')
else:
def _bytes(s): return s
import matplotlib,os,time,math,itertools,sys
# running in batch
#
# If GtkAgg is the default, X must be working, which is not the case
# with batches (DISPLAY is unset in such case) and importing pylab fails then.
#
# Agg does not require the GUI part and works without any DISPLAY active
# just fine.
#
# see http://www.mail-archive.com/[email protected]/msg04320.html
# and https://lists.launchpad.net/woo-users/msg03289.html
#
# IMPORTANT: this sets woo.runtime.hasDisplay
try: import woo.qt
except ImportError: pass
import woo.runtime, wooMain, woo.config
if wooMain.options.fakeDisplay: woo.runtime.hasDisplay=False
if 'qt4' not in woo.config.features: woo.runtime.hasDisplay=False
if woo.runtime.hasDisplay==None: # not yet set
raise RuntimeError('woo.plot imported before woo.runtime.hasDisplay is set. This should not really happen, please report.')
if not woo.runtime.hasDisplay:
#from matplotlib.backends.backend_agg import FigureCanvasAgg as WooFigureCanvas
matplotlib.use('Agg') ## pylab API
else:
matplotlib.use('Qt4Agg') # pylab API
#from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as WooFigureCanvas
from matplotlib.backends.backend_agg import FigureCanvasAgg as _HeadlessFigureCanvas
from minieigen import *
matplotlib.rc('axes',grid=True) # put grid in all figures
import pylab
# simulation-specific bits moved to woo.core.Plot
# so that they are saved and reloaded with Scene automatically
#
# those remain module-global objects
#
live=True if woo.runtime.hasDisplay else False
"Enable/disable live plot updating. Disabled without display (useless)."
liveInterval=.5
"Interval for the live plot updating, in seconds."
autozoom=True
"Enable/disable automatic plot rezooming after data update."
legendAlpha=.6
'Transparency of legend frames in plots'
scientific=True if hasattr(matplotlib.axes.Axes,'ticklabel_format') else False ## safe default for older matplotlib versions
"Use scientific notation for axes ticks."
current=-1
"Point that is being tracked with a scatter point. -1 is for the last point, set to *nan* to disable."
afterCurrentAlpha=.2
"Color alpha value for part of lines after :obj:`woo.plot.current`, between 0 (invisible) to 1 (full color)"
scatterMarkerKw=dict(verts=[(0.,0.),(-30.,10.),(-25,0),(-30.,-10.)],marker=None)
"Parameters for the current position marker"
annotateKw=dict(horizontalalignment='left',verticalalignment='upper right',fontsize=9)
"Parameters for annotation (current value) display"
lineKw=dict(linewidth=1.5,alpha=.8)
"Parameters for the normal line plot"
componentSeparator='_'
componentSuffixes={Vector2:{-1:'norm',0:'x',1:'y'},Vector3:{-1:'norm',0:'x',1:'y',2:'z'},Vector2i:{0:'x',1:'y'},Vector3i:{0:'x',1:'y',2:'z'},Vector6:{-1:'norm',0:'xx',1:'yy',2:'zz',3:'yz',4:'zx',5:'xy'},Matrix3:{(0,0):'xx',(1,1):'yy',(2,2):'zz',(0,1):'xy',(1,0):'yx',(0,2):'xz',(2,0):'zx',(1,2):'yz',(2,1):'zy'}}
# if a type with entry in componentSuffixes is given in addData, columns for individual components are synthesized using indices and suffixes given for each type; negative index means the norm, which is computed using the 'norm()' method (must be defined by the type)
# e.g. foo=Vector3r(1,2,3) will result in columns foo_x=1,foo_y=2,foo_z=3,foo_norm=3.741657...
def Scene_plot_reset(P):
"Reset all plot-related variables (data, plots, labels)"
P.data,P.plots,P.imgData={},{},{}
pylab.close('all')
def Scene_plot_resetData(P):
"Reset all plot data; keep plots and labels intact."
P.data={}
def Scene_plot_splitData(P):
"Make all plots discontinuous at this point (adds nan's to all data fields)"
P.addData({})
def Scene_plot_reverseData(P):
"""Reverse woo.core.Plot.data order.
Useful for tension-compression test, where the initial (zero) state is loaded and, to make data continuous, last part must *end* in the zero state.
"""
for k in P.data: P.data[k].reverse()
def addDataColumns(data,dd):
'''Add new columns with NaN data, without adding anything to other columns. Does nothing for columns that already exist'''
numSamples=len(data[data.keys()[0]]) if len(data)>0 else 0
for d in dd:
if d in data.keys(): continue
data[d]=[nan for i in range(numSamples)]
def Scene_plot_autoData(P,**kw):
"""Add data by evaluating contents of :obj:`woo.core.Plot.plots`. Expressions rasing exceptions will be handled gracefully, but warning is printed for each.
>>> from woo import plot; from woo.dem import *; from woo.core import *
>>> from pprint import pprint
>>> S=Scene(fields=[DemField(gravity=(0,0,-10))])
>>> S.plot.plots={'S.step':('S.time',None,'numParticles=len(S.dem.par)')}
>>> S.plot.autoData()
>>> pprint(S.plot.data)
{'S.step': [0], 'S.time': [0.0], 'numParticles': [0]}
Note that each item in :obj:`woo.core.Plot.plots` can be
* an expression to be evaluated (using the ``eval`` builtin);
* ``name=expression`` string, where ``name`` will appear as label in plots, and expression will be evaluated each time;
* a dictionary-like object -- current keys are labels of plots and current values are added to :obj:`woo.core.Plot.data`. The contents of the dictionary can change over time, in which case new lines will be created as necessary.
A simple simulation with plot can be written in the following way; note how the energy plot is specified.
>>> from woo import plot, utils
>>> S=Scene(fields=[DemField(gravity=(0,0,-10))])
>>> S.plot.plots={'i=S.step':('**S.energy','total energy=S.energy.total()',None,'rel. error=S.energy.relErr()')}
>>> # we create a simple simulation with one ball falling down
>>> S.dem.par.add(Sphere.make((0,0,0),1,mat=utils.defaultMaterial()))
0
>>> S.engines=[Leapfrog(damping=.4,reset=True),
... # get data required by plots at every step
... PyRunner(1,'S.plot.autoData()')
... ]
>>> S.trackEnergy=True
>>> S.run(3,True)
>>> pprint(S.plot.data) #doctest: +ELLIPSIS
{'grav': [0.0, 0.0, -20.357...],
'i': [0, 1, 2],
'kinetic': [0.0, 1.526..., 13.741...],
'nonviscDamp': [nan, nan, 8.143...],
'rel. error': [0.0, 1.0, 0.0361...],
'total energy': [0.0, 1.526..., 1.526...]}
.. plot::
import woo, woo.plot, woo.utils
from woo.dem import *
from woo.core import *
S=Scene(fields=[DemField(gravity=(0,0,-10))])
S.dem.par.add(Sphere.make((0,0,0),1));
S.engines=[Leapfrog(damping=.4,reset=True),PyRunner('S.plot.autoData()')]
S.plot.plots={'i=S.step':('**S.energy','total energy=S.energy.total()',None,'rel. error=S.energy.relErr()')}
S.trackEnergy=True
S.run(500,True)
S.plot.legendLoc=('lower left','upper right')
S.plot.plot()
"""
def colDictUpdate(col,dic,kw):
'update *dic* with the value from col, which is a "expr" or "name=expr" string; all exceptions from ``eval`` are caught and warning is printed without adding any data.'
name,expr=col.split('=',1) if '=' in col else (col,col)
try:
val=eval(expr,kw)
dic.update({name:val})
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating auto-column `'+expr+"'%s."%('' if name==expr else ' ('+name+')')
cols={}
S=P.scene
# data,imgData,plots=P.data,P.imgData,P.plots
kw.update(S=S)
kw.update(woo=woo)
for p in P.plots:
pp=P.plots[p]
colDictUpdate(p.strip(),cols,kw)
for y in tuplifyYAxis(P.plots[p]):
# imgplot specifier
if y==None: continue
yy=addPointTypeSpecifier(y,noSplit=True)[0]
yy1=yy.split('=')[-1]
# dict-like object
# if hasattr(yy,'keys'): cols.update(dict(yy))
# callable returning list sequence of expressions to evaluate
if yy1.startswith('**'):
try:
dd=eval(yy1[2:],{'S':S})
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating dictionary-returning expression "'+yy1[2:]+':'
for k,v in dd.items(): cols[k]=v
elif yy1.startswith('*'):
ee=eval(yy1[1:],{'S':S})
for e in ee: colDictUpdate(e,cols,{'S':S})
else: colDictUpdate(yy,cols,kw)
P.addData(cols)
def Scene_plot_addData(P,*d_in,**kw):
"""Add data from arguments name1=value1,name2=value2 to woo.plot.data.
(the old {'name1':value1,'name2':value2} is deprecated, but still supported)
New data will be padded with nan's, unspecified data will be nan (nan's don't appear in graphs).
This way, equal length of all data is assured so that they can be plotted one against any other.
>>> S=woo.master.scene
>>> from pprint import pprint
>>> S.plot.resetData()
>>> S.plot.addData(a=1)
>>> S.plot.addData(b=2)
>>> S.plot.addData(a=3,b=4)
>>> pprint(S.plot.data)
{'a': [1, nan, 3], 'b': [nan, 2, 4]}
Some sequence types can be given to addData; they will be saved in synthesized columns for individual components.
>>> S.plot.resetData()
>>> S.plot.addData(c=Vector3(5,6,7),d=Matrix3(8,9,10, 11,12,13, 14,15,16))
>>> pprint(S.plot.data) #doctest: +ELLIPSIS
{'c_norm': [10.488...],
'c_x': [5.0],
'c_y': [6.0],
'c_z': [7.0],
'd_xx': [8.0],
'd_xy': [9.0],
'd_xz': [10.0],
'd_yx': [11.0],
'd_yy': [12.0],
'd_yz': [13.0],
'd_zx': [14.0],
'd_zy': [15.0],
'd_zz': [16.0]}
"""
data,imgData=P.data,P.imgData
import numpy
if len(data)>0: numSamples=len(data[data.keys()[0]])
else: numSamples=0
# align with imgData, if there is more of them than data
if len(imgData)>0 and numSamples==0: numSamples=max(numSamples,len(imgData[imgData.keys()[0]]))
d=(d_in[0] if len(d_in)>0 else {})
d.update(**kw)
# handle types composed of multiple values (vectors, matrices)
dNames=d.keys()[:] # make copy, since dict cannot change size if iterated over directly
for name in dNames:
if type(d[name]) in componentSuffixes:
val=d[name]
suffixes=componentSuffixes[type(d[name])]
for ix in suffixes:
d[name+componentSeparator+suffixes[ix]]=(d[name][ix] if ix>=0 else d[name].norm())
del d[name]
elif hasattr(d[name],'__len__'):
raise ValueError('plot.addData given unhandled sequence type (is a '+type(d[name]).__name__+', must be number or '+'/'.join([k.__name__ for k in componentSuffixes])+')')
for name in d:
if not name in data.keys(): data[name]=[]
for name in data:
data[name]+=(numSamples-len(data[name]))*[nan]
data[name].append(d[name] if name in d else nan)
#print [(k,len(data[k])) for k in data.keys()]
#numpy.array([nan for i in range(numSamples)])
#numpy.append(data[name],[d[name]],1)
def Scene_plot_addImgData(P,**kw):
data,imgData=P.data,P.imgData
for k in kw:
if k not in imgData: imgData[k]=[]
# align imgData with data
if len(data.keys())>0 and len(imgData.keys())>0:
nData,nImgData=len(data[data.keys()[0]]),len(imgData[imgData.keys()[0]])
#if nImgData>nData-1: raise RuntimeError("imgData is already the same length as data?")
if nImgData<nData-1: # repeat last value
for k in imgData.keys():
lastValue=imgData[k][-1] if len(imgData[k])>0 else None
imgData[k]+=(nData-len(imgData[k])-1)*[lastValue]
elif nData<nImgData:
for k in data.keys():
lastValue=data[k][-1] if len(data[k])>0 else nan
data[k]+=(nImgData-nData)*[lastValue] # add one more, because we will append to imgData below
# add values from kw
newLen=(len(imgData[imgData.keys()[0]]) if imgData else 0)+1 # current length plus 1
for k in kw:
if k in imgData and len(imgData[k])>0: imgData[k]+=(newLen-len(imgData[k])-1)*[imgData[k][-1]]+[kw[k]] # repeat last element as necessary
else: imgData[k]=(newLen-1)*[None]+[kw[k]] # repeat None if no previous value
# align values which were not in kw by repeating the last value
for k in imgData:
if len(imgData[k])<newLen: imgData[k]+=(newLen-len(imgData[k]))*[imgData[k][-1]]
assert len(set([len(i) for i in imgData.values()]))<=1 # no data or all having the same value
# not public functions
def addPointTypeSpecifier(o,noSplit=False):
"""Add point type specifier to simple variable name; optionally take only the part before '=' from the first item."""
if type(o) in [tuple,list]:
if noSplit or not type(o[0])==str: return o
else: return (o[0].split('=',1)[0],)+tuple(o[1:])
else: return (o if (noSplit or not type(o)==str) else (o.split('=',1)[0]),'')
def tuplifyYAxis(pp):
"""convert one variable to a 1-tuple"""
if type(pp) in [tuple,list]: return pp
else: return (pp,)
def xlateLabel(l,labels):
"Return translated label; return l itself if not in the labels dict."
if l in labels.keys(): return labels[l]
else: return l
class LineRef:
"""Holds reference to plot line and to original data arrays (which change during the simulation),
and updates the actual line using those data upon request."""
def __init__(self,line,scatter,annotation,line2,xdata,ydata,imgData=None,dataName=None):
self.line,self.scatter,self.annotation,self.line2,self.xdata,self.ydata,self.imgData,self.dataName=line,scatter,annotation,line2,xdata,ydata,imgData,dataName
def update(self):
if isinstance(self.line,matplotlib.image.AxesImage):
# image name
try:
if len(self.xdata)==0 and self.dataName: self.xdata=self.imgData[self.dataName] # empty list reference an empty singleton, not the list we want; adjust here
import Image
if self.xdata[current]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(self.xdata[current])
self.line.set_data(img)
except IndexError: pass
else:
# regular data
import numpy
# current==-1 avoids copy slicing data in the else part
if current==None or current==-1 or afterCurrentAlpha==1:
self.line.set_xdata(self.xdata); self.line.set_ydata(self.ydata)
self.line2.set_xdata([]); self.line2.set_ydata([])
else:
try: # try if we can extend the first part by one so that lines are connected
self.xdata[:current+1]; preCurrEnd=current+1
except IndexError: preCurrEnd=current
preCurrEnd=current+(1 if len(self.xdata)>current else 0)
self.line.set_xdata(self.xdata[:preCurrEnd]); self.line.set_ydata(self.ydata[:preCurrEnd])
self.line2.set_xdata(self.xdata[current:]); self.line2.set_ydata(self.ydata[current:])
try:
x,y=self.xdata[current],self.ydata[current]
except IndexError: x,y=0,0
# this could be written in a nicer way, very likely
try:
pt=numpy.ndarray((2,),buffer=numpy.array([float(x),float(y)]))
if self.scatter:
self.scatter.set_offsets(pt)
# change rotation of the marker (possibly incorrect)
try:
dx,dy=self.xdata[current]-self.xdata[current-1],self.ydata[current]-self.ydata[current-1]
# smoothing from last n values, if possible
# FIXME: does not show arrow at all if less than window values
#try:
# window=10
# dx,dy=[numpy.average(numpy.diff(dta[current-window:current])) for dta in self.xdata,self.ydata]
#except IndexError: pass
# there must be an easier way to find on-screen derivative angle, ask on the matplotlib mailing list
axes=self.line.get_axes()
p=axes.patch; xx,yy=p.get_verts()[:,0],p.get_verts()[:,1]; size=max(xx)-min(xx),max(yy)-min(yy)
aspect=(size[1]/size[0])*(1./axes.get_data_ratio())
angle=math.atan(aspect*dy/dx)
if dx<0: angle-=math.pi
self.scatter.set_transform(matplotlib.transforms.Affine2D().rotate(angle))
except IndexError: pass
if self.annotation:
if math.isnan(x) or math.isnan(y):
if hasattr(self.annotation,'xyann'): self.annotation.xyann=(x,y)
else: self.annotation.xytext=(0,0)
self.annotation.set_text('') # make invisible, place anywhere
else:
#
if hasattr(self.annotation,'xyann'): self.annotation.xyann=(x,y) # newer MPL versions (>=1.4)
else: self.annotation.xyann=(x,y)
self.annotation.set_text(self.annotation.annotateFmt.format(xy=(float(x),float(y))))
except TypeError: pass # this happens at i386 with empty data, saying TypeError: buffer is too small for requested array
liveTimeStamp=0 # timestamp when live update was started, so that the old thread knows to stop if that changes
nan=float('nan')
def createPlots(P,subPlots=True,noShow=False,replace=True,scatterSize=60,wider=False):
'''Create plots based on current data;
:param subPlots: show all plots in one figure as subplots; otherwise, create multiple figures
:param noShow: use headless backend for plots, and do not show plots on the screen
:param replace: do not close existing figures, and do not update P.currLineRefs
'''
import logging
data,imgData,plots,labels,xylabels,legendLoc,axesWd,annotateFmt=P.data,P.imgData,P.plots,P.labels,P.xylabels,P.legendLoc,P.axesWd,P.annotateFmt
if replace:
if P.currLineRefs:
logging.info('Closing existing figures')
ff=set([l.line.get_axes().get_figure() for l in P.currLineRefs]) # get all current figures
for f in ff: pylab.close(f) # close those
P.currLineRefs=[]
figs=[]
if len(plots)==0: return # nothing to plot
if subPlots:
# compute number of rows and colums for plots we have
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
if wider: subRows,subCols=subCols,subRows
# create a new figure; called once with subPlots, for each subplot without subPlots
def _newFig():
## pylab API
if not noShow: return pylab.figure() # this will go onto the screen; the pylab call sets up the windows as well
else: # with noShow
fig=matplotlib.figure.Figure()
canvas=_HeadlessFigureCanvas(fig) #
return fig
if subPlots: figs=[_newFig()]
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if not subPlots:
figs.append(_newFig())
axes=figs[-1].add_subplot(1,1,1)
else: axes=figs[-1].add_subplot(subRows,subCols,nPlot+1) # nPlot is 1-based in mpl, for matlab comatibility
axes.grid(True)
if plots[p]==None: # image plot
if not pStrip in imgData.keys(): imgData[pStrip]=[]
# fake (empty) image if no data yet
import Image
if len(imgData[pStrip])==0 or imgData[pStrip][-1]==None: img=Image.new('RGBA',(1,1),(0,0,0,0))
else: img=Image.open(imgData[pStrip][-1])
img=axes.imshow(img,origin='upper')
if replace: P.currLineRefs.append(LineRef(line=img,scatter=None,annotation=None,line2=None,xdata=imgData[pStrip],ydata=None,imgData=imgData,dataName=pStrip))
axes.set_axis_off()
continue
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
plots_p_y1,plots_p_y2=[],[]; y1=True
missing=set() # missing data columns
if pStrip not in data.keys(): missing.add(pStrip.decode('utf-8','ignore'))
for d in plots_p:
if d[0]==None:
y1=False; continue
if not isinstance(d[0],(str,unicode)): raise ValueError('Plots specifiers must be strings (not %s)'%(type(d[0]).__name__))
if y1: plots_p_y1.append(d)
else: plots_p_y2.append(d)
try:
if (
d[0] not in data.keys()
# and not callable(d[0])
and not (isinstance(d[0],(str,unicode)) and (d[0].startswith('**') or d[0].startswith('*'))) # hack for callable as strings
# and not hasattr(d[0],'keys')
):
missing.add(d[0])
except UnicodeEncodeError:
import warnings
warnings.error('UnicodeDecodeError when processing data set '+repr(d[0]))
if missing:
if len(data.keys())==0 or len(data[data.keys()[0]])==0: # no data at all yet, do not add garbage NaNs
for m in missing: data[m]=[]
else:
addDataColumns(data,missing)
try:
print 'Missing columns in Scene.plot.data, added NaNs:',', '.join([m.encode('utf-8') for m in missing])
except UnicodeDecodeError:
warnings.warn('UnicodeDecodeError reporting missing data columns -- harmless, just wondering...')
def createLines(pStrip,ySpecs,axes,isY1=True,y2Exists=False):
'''Create data lines from specifications; this code is common for y1 and y2 axes;
it handles y-data specified as callables/dicts passed as string (starting with '*'/'**'), which might create additional lines when updated with liveUpdate.
'''
# save the original specifications; they will be smuggled into the axes object
# the live updated will run yNameFuncs to see if there are new lines to be added
# and will add them if necessary
yNameFuncs=set()
yNames=set()
ySpecs2=[]
for ys in ySpecs:
if not isinstance(ys[0],(str,unicode)): raise ValueError('Plot specifications must be strings (not a %s).'%type(ys[0]))
if ys[0].startswith('**') or ys[0].startswith('*'):
evEx=eval(ys[0][(2 if ys[0].startswith('**') else 1):],{'S':P.scene})
yNameFuncs.add(evEx) # add callable or dictionary
# XXX: what is ys[1]? Previously, there was no line specifier there for dicts at least
# print evEx,type(evEx), evEx.__iter__(),type(evEx.__iter__())
ySpecs2+=[(ret,ys[1]) for ret in evEx] # traverse list or dict keys
else: ySpecs2.append(ys)
if len(ySpecs2)==0:
print 'woo.plot: creating fake plot, since there are no y-data yet'
line,=axes.plot([nan],[nan])
line2,=axes.plot([nan],[nan])
if replace: P.currLineRefs.append(LineRef(line=line,scatter=None,annotation=None,line2=line2,xdata=[nan],ydata=[nan]))
# set different color series for y1 and y2 so that they are recognizable
if matplotlib.rcParams.has_key('axes.color_cycle'): matplotlib.rcParams['axes.color_cycle']='b,g,r,c,m,y,k' if not isY1 else 'm,y,k,b,g,r,c'
for d in ySpecs2:
yNames.add(d)
# should have been handled above already
#if pStrip not in data:
# print 'Missing column %s in Scene.plot.data, added NaN.'%pString
# addDataColumns(data,[pStrip])
if d[0] not in data:
print 'Missing column %s in Scene.plot.data, added NaN.'%d[0]
addDataColumns(data,[d[0]])
line,=axes.plot(data[pStrip],data[d[0]],d[1],label=xlateLabel(d[0],P.labels),**lineKw)
lineKwWithoutAlpha=dict([(k,v) for k,v in lineKw.items() if k!='alpha'])
line2,=axes.plot([],[],d[1],color=line.get_color(),alpha=afterCurrentAlpha,**lineKwWithoutAlpha)
# use (0,0) if there are no data yet
scatterPt=[0,0] if len(data[pStrip])==0 else (data[pStrip][current],data[d[0]][current])
scatterPtPos=[scatterPt[0] if not math.isnan(scatterPt[0]) else 0,scatterPt[1] if not math.isnan(scatterPt[1]) else 0]
# if current value is NaN, use zero instead
scatter=axes.scatter(scatterPtPos[0],scatterPtPos[1],s=scatterSize,color=line.get_color(),**scatterMarkerKw)
if annotateFmt:
if math.isnan(scatterPtPos[0]) or math.isnan(scatterPtPos[1]): text=''
else: text=annotateFmt.format(xy=scatterPt)
annotation=axes.annotate(text,xy=scatterPtPos,color=line.get_color(),**annotateKw)
annotation.annotateFmt=annotateFmt
else: annotation=None
if replace: P.currLineRefs.append(LineRef(line=line,scatter=scatter,annotation=annotation,line2=line2,xdata=data[pStrip],ydata=data[d[0]]))
axes=line.get_axes()
labelLoc=(legendLoc[0 if isY1 else 1] if y2Exists>0 else 'best')
l=axes.legend(loc=labelLoc)
if l:
l.get_frame().set_alpha(legendAlpha)
if hasattr(l,'draggable'): l.draggable(True)
if scientific:
axes.ticklabel_format(style='sci',scilimits=(0,0),axis='both')
# fixes scientific exponent placement for y2: https://sourceforge.net/mailarchive/forum.php?thread_name=20101223174750.GD28779%40ykcyc&forum_name=matplotlib-users
if not isY1: axes.yaxis.set_offset_position('right')
if isY1:
axes.set_ylabel((', '.join([xlateLabel(_p[0],P.labels) for _p in ySpecs2])) if p not in xylabels or not xylabels[p][1] else xylabels[p][1])
axes.set_xlabel(xlateLabel(pStrip,P.labels) if (p not in xylabels or not xylabels[p][0]) else xylabels[p][0])
else:
axes.set_ylabel((', '.join([xlateLabel(_p[0],P.labels) for _p in ySpecs2])) if (p not in xylabels or len(xylabels[p])<3 or not xylabels[p][2]) else xylabels[p][2])
# if there are callable/dict ySpecs, save them inside the axes object, so that the live updater can use those
if yNameFuncs:
axes.wooYNames,axes.wooYFuncs,axes.wooXName,axes.wooLabelLoc=yNames,yNameFuncs,pStrip,labelLoc # prepend woo to avoid clashes
if 0:
# fix missing 'show' method; this has been fixed in matplotlib already, but we need to backport that
# see https://github.com/matplotlib/matplotlib/commit/15fd0ae587a57cb1d7b69546eb359085315148c8
# don't do that for headless backend, error there is fine
fig=axes.get_figure()
if not hasattr(fig,'show'):
mgr=getattr(fig.canvas,'manager')
if mgr: fig.show=lambda *args: mgr.window.show()
createLines(pStrip,plots_p_y1,axes=axes,isY1=True,y2Exists=len(plots_p_y2)>0)
if axesWd>0:
axes.axhline(linewidth=axesWd,color='k')
axes.axvline(linewidth=axesWd,color='k')
# create y2 lines, if any
if len(plots_p_y2)>0:
axes=axes.twinx() # create the y2 axis
createLines(pStrip,plots_p_y2,axes,isY1=False,y2Exists=True)
### scene is not directly accessible from here, do it like this:
S=woo.master.scene
if S.plot==P:
if 'title' in S.tags: axes.set_title(S.tags['title'])
return figs
def liveUpdate(P,timestamp):
global liveTimeStamp
liveTimeStamp=timestamp
import sys
while True:
if not live or liveTimeStamp!=timestamp:
return
figs,axes,linesData=set(),set(),set()
data=P.data
for l in P.currLineRefs:
l.update()
figs.add(l.line.get_figure())
axes.add(l.line.get_axes())
linesData.add(id(l.ydata))
# find callables in y specifiers, create new lines if necessary
for ax in axes:
if not hasattr(ax,'wooYFuncs') or not ax.wooYFuncs: continue # not defined of empty
yy=set();
for f in ax.wooYFuncs:
if callable(f): yy.update(f())
elif hasattr(f,'keys'):
yy.update(f.keys())
else: raise ValueError("Internal error: ax.wooYFuncs items must be callables or dictionary-like objects and nothing else.")
#print 'callables y names:',yy
news=yy-ax.wooYNames
if not news: continue
for new in news:
ax.wooYNames.add(new)
if new in data.keys() and id(data[new]) in linesData: continue # do not add when reloaded and the old lines are already there
print 'woo.plot: creating new line for',new
if not new in data.keys(): data[new]=len(data[ax.wooXName])*[nan] # create data entry if necessary
#print 'data',len(data[ax.wooXName]),len(data[new]),data[ax.wooXName],data[new]
line,=ax.plot(data[ax.wooXName],data[new],label=xlateLabel(new,P.labels)) # no line specifier
line2,=ax.plot([],[],color=line.get_color(),alpha=afterCurrentAlpha)
scatterPt=(0 if len(data[ax.wooXName])==0 or math.isnan(data[ax.wooXName][current]) else data[ax.wooXName][current]),(0 if len(data[new])==0 or math.isnan(data[new][current]) else data[new][current])
scatter=ax.scatter(scatterPt[0],scatterPt[1],s=60,color=line.get_color(),**scatterMarkerKw)
if P.annotateFmt:
annotation=ax.annotate(P.annotateFmt.format(xy=scatterPt),xy=scatterPt,color=line.get_color(),**annotateKw)
annotation.annotateFmt=P.annotateFmt
else: annotation=None
P.currLineRefs.append(LineRef(line=line,scatter=scatter,annotation=annotation,line2=line2,xdata=data[ax.wooXName],ydata=data[new]))
ax.set_ylabel(ax.get_ylabel()+(', ' if ax.get_ylabel() else '')+xlateLabel(new,P.labels))
# it is possible that the legend has not yet been created
l=ax.legend(loc=ax.wooLabelLoc)
if l:
l.get_frame().set_alpha(legendAlpha)
if hasattr(l,'draggable'): l.draggable(True)
if autozoom:
for ax in axes:
try:
ax.relim() # recompute axes limits
ax.autoscale_view()
except RuntimeError: pass # happens if data are being updated and have not the same dimension at the very moment
for fig in figs:
#sys.stderr.write('*')
try:
fig.canvas.draw()
except RuntimeError: pass # happens here too
#sys.stderr.write('(')
time.sleep(liveInterval)
#sys.stderr.write(')')
def savePlotSequence(P,fileBase,stride=1,imgRatio=(5,7),title=None,titleFrames=20,lastFrames=30):
'''Save sequence of plots, each plot corresponding to one line in history. It is especially meant to be used for :obj:`woo.utils.makeVideo`.
:param stride: only consider every stride-th line of history (default creates one frame per each line)
:param title: Create title frame, where lines of title are separated with newlines (``\\n``) and optional subtitle is separated from title by double newline.
:param int titleFrames: Create this number of frames with title (by repeating its filename), determines how long the title will stand in the movie.
:param int lastFrames: Repeat the last frame this number of times, so that the movie does not end abruptly.
:return: List of filenames with consecutive frames.
'''
data,imgData,plots=P.data,P.imgData,P.plots
fig=createPlots(P,noShow=True,replace=True,subPlots=True,scatterSize=60,wider=True)[0]
sqrtFigs=math.sqrt(len(plots))
fig.set_size_inches(8*sqrtFigs,5*sqrtFigs) # better readable
fig.subplots_adjust(left=.05,right=.95,bottom=.05,top=.95) # make it more compact
if len(plots)==1 and plots[plots.keys()[0]]==None: # only pure snapshot is there
fig.set_size_inches(5,5)
fig.subplots_adjust(left=0,right=1,bottom=0,top=1)
#if not data.keys(): raise ValueError("plot.data is empty.")
pltLen=max(len(data[data.keys()[0]]) if data else 0,len(imgData[imgData.keys()[0]]) if imgData else 0)
if pltLen==0: raise ValueError("Both plot.data and plot.imgData are empty.")
global current
ret=[]
print 'Saving %d plot frames, it can take a while...'%(pltLen)
for i,n in enumerate(range(0,pltLen,stride)):
current=n
for l in P.currLineRefs: l.update()
out=fileBase+'-%03d.png'%i
fig.savefig(out)
ret.append(out)
sys.stderr.write('[%d]'%i)
if len(ret)==0: raise RuntimeError("No images created?!")
if title:
import Image
titleImgName=fileBase+'-title.png'
createTitleFrame(titleImgName,Image.open(ret[-1]).size,title)
ret=titleFrames*[titleImgName]+ret
if lastFrames>1: ret+=(lastFrames-1)*[ret[-1]]
return ret
def createTitleFrame(out,size,title,bgColor=(.8,.6,.8),fgColor='#405090',logo=None,logoPos=(20,20)):
'''Create figure with title and save to file.
:param out: file to save the result to; format is anything supported by matplotlib.
:param size: figure size (for pixel output formats), tuple of (width,height)
:param str title: title and subtitle; lines are separated by single newlines (``\n``) and subtitle (if any) is separated from the title by two consecutive newlines (``\n\n``). Oversize lines are scaled to fit the width, line spacing fits all lines.
:param color fgColor: Font color, any `color format that Matplotlib understands <http://matplotlib.org/api/colors_api.html>`__.
:param color bgColor: Background color.
:param logo: filename or file-like object to be read via `matplotlib.pyploy.imread <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imread>`__.
:param logoPos: position where to place the logo.
'''
import matplotlib, matplotlib.figure, matplotlib.mathtext
# http://stackoverflow.com/a/13714720/761090
dpi=100 # does not matter as font is specified in inches
fig=matplotlib.figure.Figure(figsize=(size[0]/dpi,size[1]/dpi),dpi=dpi,facecolor=bgColor)
canvas=_HeadlessFigureCanvas(fig)
#fig.set_facecolor('blue'); fig.patch.set_color('blue'); fig.patch.set_facecolor('blue'); fig.patch.set_alpha(None)
titSub=title.split('\n\n')
if len(titSub)==0: subtitle=''
elif len(titSub)==1: title,subtitle=titSub
else: title,subtitle=titSub[0],'\n'.join(titSub[1:])
lines=[(t,True) for t in title.split('\n')]+([(t,False) for t in subtitle.split('\n')] if subtitle else [])
nLines=len(lines); fontSizes=size[1]/10.,size[1]/16.
def writeLine(text,vertPos,fontsize):
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize,dpi=fig.get_dpi(),color=fgColor)
textsize=rgba.shape[1],rgba.shape[0]
if textsize[0]>size[0]:
rgba,depth=matplotlib.mathtext.MathTextParser('Bitmap').to_rgba(text,fontsize=fontsize*size[0]/textsize[0],dpi=fig.get_dpi(),color=fgColor)
textsize=rgba.shape[1],rgba.shape[0]
fig.figimage(rgba.astype(float)/255.,xo=(size[0]-textsize[0])/2.,yo=vertPos-depth)
nTitle,nSubtitle=len(title.split('\n')),len(subtitle.split('\n')) if subtitle else 0
nLines=nTitle+nSubtitle
ht=size[1]; y0=ht-2*fontSizes[0]; yStep=(ht-2.5*fontSizes[0])/(nTitle+.6*nSubtitle+(.5 if nSubtitle else 0))
def lineYOffset(lineno):
# .5*yStep is per between title and subtitle
return nTitle*yStep+.5*yStep+(i-nTitle)*.6*yStep if i>=nTitle else i*yStep
if logo:
import matplotlib.pylab
logoData=pylab.imread(logo)
fig.figimage(logoData,xo=logoPos[0],yo=logoPos[1],origin='upper')
for i,(l,isTitle) in enumerate(lines):
writeLine(l,y0-lineYOffset(i),fontSizes[0 if isTitle else 1])
# http://stackoverflow.com/a/4805178/761090 - savefig default overrides facecolor set previously
fig.savefig(out,facecolor=fig.get_facecolor())
def Scene_plot_plot(P,noShow=False,subPlots=True):
"""Do the actual plot, which is either shown on screen (and nothing is returned: if *noShow* is ``False``) or, if *noShow* is ``True``, returned list of matplotlib's Figure objects.
You can use
>>> import woo,woo.core,os
>>> S=woo.core.Scene()
>>> S.plot.plots={'foo':('bar',)}
>>> S.plot.addData(foo=1,bar=2)
>>> somePdf=woo.master.tmpFilename()+'.pdf'
>>> S.plot.plot(noShow=True)[0].savefig(somePdf)
>>> os.path.exists(somePdf)
True
to save the figure to file automatically.
"""
figs=createPlots(P,subPlots=subPlots,noShow=noShow,replace=(False if noShow else True))
# figs=set([l.line.get_axes().get_figure() for l in P.currLineRefs])
if not figs:
import warnings
warnings.warn('Nothing to plot.')
return
if not hasattr(list(figs)[0],'show') and not noShow:
import warnings
warnings.warn('plot.plot not showing figure (matplotlib using headless backend?)')
noShow=True
if not noShow:
if not woo.runtime.hasDisplay: return # would error out with some backends, such as Agg used in batches
if 1:
if live:
import threading
t=threading.Thread(target=liveUpdate,args=(P,time.time()))
t.daemon=True
t.start()
# pylab.show() # this blocks for some reason; call show on figures directly
for f in figs:
f.show()
# should have fixed https://bugs.launchpad.net/woo/+bug/606220, but does not work apparently
if 0:
import matplotlib.backend_bases
if 'CloseEvent' in dir(matplotlib.backend_bases):
def closeFigureCallback(event):
ff=event.canvas.figure
# remove closed axes from our update list
P.currLineRefs=[l for l in P.currLineRefs if l.line.get_axes().get_figure()!=ff]
f.canvas.mpl_connect('close_event',closeFigureCallback)
# else:
# figs=list(set([l.line.get_axes().get_figure() for l in P.currLineRefs]))
return figs
def Scene_plot_saveDataTxt(P,fileName,vars=None):
"""Save plot data into a (optionally compressed) text file. The first line contains a comment (starting with ``#``) giving variable name for each of the columns. This format is suitable for being loaded for further processing (outside woo) with ``numpy.genfromtxt`` function, which recognizes those variable names (creating numpy array with named entries) and handles decompression transparently.
>>> import woo, woo.core
>>> from pprint import pprint
>>> S=woo.core.Scene()
>>> S.plot.addData(a=1,b=11,c=21,d=31) # add some data here
>>> S.plot.addData(a=2,b=12,c=22,d=32)
>>> pprint(S.plot.data)
{'a': [1, 2], 'b': [11, 12], 'c': [21, 22], 'd': [31, 32]}
>>> txt=woo.master.tmpFilename()+'.txt.bz2'
>>> S.plot.saveDataTxt(txt,vars=('a','b','c'))
>>> import numpy
>>> d=numpy.genfromtxt(txt,dtype=None,names=True)
>>> d['a']
array([1, 2])
>>> d['b']
array([11, 12])
:param fileName: file to save data to; if it ends with ``.bz2`` / ``.gz``, the file will be compressed using bzip2 / gzip.
:param vars: Sequence (tuple/list/set) of variable names to be saved. If ``None`` (default), all variables in :obj:`woo.core.Plot` are saved.
"""
import bz2,gzip
data=P.data
if not vars:
vars=data.keys(); vars.sort()
fileName=P.scene.expandTags(fileName)
if fileName.endswith('.bz2'): f=bz2.BZ2File(fileName,'wb')
elif fileName.endswith('.gz'): f=gzip.GzipFile(fileName,'wb')
else: f=open(fileName,'wb')
f.write(_bytes("# "+"\t".join(vars)+"\n"))
for i in range(len(data[vars[0]])):
f.write(_bytes("\t".join([str(data[var][i]) for var in vars])+"\n"))
f.close()
def savePylab(baseName,timestamp=False,title=None):
'''This function is not finished, do not use it.'''
import time
if len(data.keys())==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
saveDataTxt(fileName=baseName+'.data.bz2')
if len(plots)==0: raise RuntimeError("No plots to save, only data saved.")
py=file(baseName+'.py','w')
py.write('#!/usr/bin/env python\n# encoding: utf-8\n# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\nimport pylab, numpy\n')
py.write("data=numpy.genfromtxt('%s.data.bz2',dtype=None,names=True)\n"%baseName)
subCols=int(round(math.sqrt(len(plots)))); subRows=int(math.ceil(len(plots)*1./subCols))
for nPlot,p in enumerate(plots.keys()):
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue # image plots, which is not exported
if len(plots)==1: py.write('pylab.figure()\n')
else: py.write('pylab.subplot(%d,%d,%d)\n'%(subRows,subCols,nPlots))
def _mkTimestamp():
import time
return time.strftime('_%Y%m%d_%H:%M')
def Scene_plot_saveGnuplot(P,baseName,term='wxt',extension=None,timestamp=False,comment=None,title=None,varData=False,timeStamp=True):
"""Save data added with :obj:`woo.plot.addData` into (compressed) file and create .gnuplot file that attempts to mimick plots specified with :obj:`woo.plot.plots`.
:param baseName: used for creating baseName.gnuplot (command file for gnuplot), associated ``baseName.data.bz2`` (data) and output files (if applicable) in the form ``baseName.[plot number].extension``
:param term: specify the gnuplot terminal; defaults to ``x11``, in which case gnuplot will draw persistent windows to screen and terminate; other useful terminals are ``png``, ``cairopdf`` and so on
:param extension: extension for ``baseName`` defaults to terminal name; fine for png for example; if you use ``cairopdf``, you should also say ``extension='pdf'`` however
:param bool timestamp: append numeric time to the basename
:param bool varData: whether file to plot will be declared as variable or be in-place in the plot expression
:param comment: a user comment (may be multiline) that will be embedded in the control file
:return: name of the gnuplot file created.
"""
data,imgData,plots,labels,xylabels=P.data,P.imgData,P.plots,P.labels,P.xylabels
if len(data.keys())==0: raise RuntimeError("No data for plotting were saved.")
if timestamp: baseName+=_mkTimestamp()
baseNameNoPath=baseName.split('/')[-1]
vars=data.keys(); vars.sort()
P.saveDataTxt(fileName=baseName+'.data.bz2',vars=vars)
fPlot=file(baseName+".gnuplot",'w')
fPlot.write('#!/usr/bin/env gnuplot\n#\n')
if timeStamp: fPlot.write('# created '+time.asctime()+' ('+time.strftime('%Y%m%d_%H:%M')+')\n#\n')
if comment: fPlot.write('# '+comment.replace('\n','\n# ')+'#\n')
dataFile='"< bzcat %s.data.bz2"'%(baseNameNoPath)
if varData:
fPlot.write('dataFile=%s'%dataFile); dataFile='dataFile'
if not extension: extension=term
i=0
for p in plots:
pStrip=p.strip().split('=',1)[0]
if plots[p]==None: continue ## this plot is image plot, which is not applicable to gnuplot
plots_p=[addPointTypeSpecifier(o) for o in tuplifyYAxis(plots[p])]
if term in ['wxt','x11']: fPlot.write("set term %s %d persist\n"%(term,i))
else: fPlot.write("set term %s; set output '%s.%d.%s'\n"%(term,baseNameNoPath,i,extension))
fPlot.write("set xlabel '%s'\n"%xlateLabel(p,labels))
fPlot.write("set grid\n")
fPlot.write("set datafile missing 'nan'\n")
if title: fPlot.write("set title '%s'\n"%title)
y1=True; plots_y1,plots_y2=[],[]
# replace callable/dict-like data specifiers by the results, it that particular data exists
plots_p2=[]
for pp in plots_p:
if pp[0]==None: plots_p2.append((pp[0],pp[1]))
elif pp[0].startswith('**'):
try:
dd=eval(pp[0][2:],{'S':P.scene})
plots_p2+=[(ppp,'') for ppp in dd.keys() if ppp in data.keys()]
except:
import traceback
traceback.print_exc()
print 'WARN: ignoring exception raised while evaluating expression "'+pp[0][2:]+'".'
elif pp[0].startswith('*'):
plots_p2+=[(e,'') for e in eval(pp[0][1:],{'S':P.scene}) if e in data.keys()]
else: plots_p2.append((pp[0],pp[1]))
plots_p=plots_p2
#plots_p=sum([([(pp,'') for pp in p[0]() if pp in data.keys()] if callable(p[0]) else [(p[0],p[1])] ) for p in plots_p],[])
for d in plots_p:
if d[0]==None:
y1=False; continue
if y1: plots_y1.append(d)
else: plots_y2.append(d)
fPlot.write("set ylabel '%s'\n"%(','.join([xlateLabel(_p[0],labels) for _p in plots_y1])))
if len(plots_y2)>0:
fPlot.write("set y2label '%s'\n"%(','.join([xlateLabel(_p[0],labels) for _p in plots_y2])))
fPlot.write("set y2tics\n")
ppp=[]
def _mkLine(varX,varY,i):
return " %s using %d:%d title '%s%s(%s)%s' with lines%s"%(dataFile,vars.index(varX)+1,vars.index(varY)+1,'← ' if i==0 else'',xlateLabel(varY,labels),xlateLabel(varX,labels),' →' if i==1 else '',' axes x1y2' if i==1 else '')
for pp in plots_y1: ppp.append(_mkLine(pStrip,pp[0],0))
for pp in plots_y2: ppp.append(_mkLine(pStrip,pp[0],1))
fPlot.write("plot "+",".join(ppp)+"\n")
i+=1
fPlot.close()
return baseName+'.gnuplot'
def _deprecPlotFunc(old,func,new=None,takesScene=False,*args,**kw):
"Wrapper for deprecated functions, example below."
import warnings
if not new: new=old
warnings.warn('Function plot.%s is deprecated, use %s.%s instead.'%(old,('Scene' if takesScene else 'Scene.plot'),new),stacklevel=3,category=DeprecationWarning)
S=woo.master.scene
if takesScene: return func(S,*args,**kw)
else: return func(S.plot,*args,**kw)
#
# DEPRECATED functions, will be removed at some point!
#
def reset(): _deprecPlotFunc('reset',Scene_plot_reset)
def resetData(): _deprecPlotFunc('resetData',Scene_plot_resetData)
def splitData(): _deprecPlotFunc('splitData',Scene_plot_splitData)
def reverseData(): _deprecPlotFunc('reverseData',Scene_plot_reverseData)
def addAutoData(): _deprecPlotFunc('addAutoData',Scene_plot_autoData,new='autoData')
def addData(): _deprecPlotFunc('addData',Scene_plot_addData)
def addImgData(): _deprecPlotFunc('addImgData',Scene_plot_addImgData)
def saveGnuplot(): _deprecPlotFunc('saveGnuplot',Scene_plot_saveGnuplot)
def saveDataTxt(): _deprecPlotFunc('saveDataTxt',Scene_plot_saveDataTxt)
def plot(): _deprecPlotFunc('plot',Scene_plot_plot)
# called at startup from from woo._monkey.plot
def defMonkeyMethods():
import woo.core
woo.core.Plot.reset=Scene_plot_reset
woo.core.Plot.resetData=Scene_plot_resetData
woo.core.Plot.splitData=Scene_plot_splitData
woo.core.Plot.reverseData=Scene_plot_reverseData
woo.core.Plot.autoData=Scene_plot_autoData
woo.core.Plot.addData=Scene_plot_addData
woo.core.Plot.addImgData=Scene_plot_addImgData
woo.core.Plot.saveGnuplot=Scene_plot_saveGnuplot
woo.core.Plot.saveDataTxt=Scene_plot_saveDataTxt
woo.core.Plot.plot=Scene_plot_plot
defMonkeyMethods()
| gpl-2.0 |
crichardson17/starburst_atlas | Low_resolution_sims/DustFree_LowRes/Geneva_noRot_inst/Geneva_noRot_inst_age6/Optical1.py | 33 | 7366 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith(".grd"):
inputfile = file
for file in os.listdir('.'):
if file.endswith(".txt"):
inputfile2 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid = [];
with open(inputfile, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid.append(row);
grid = asarray(grid)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines = [];
with open(inputfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines.append(row);
dataEmissionlines = asarray(dataEmissionlines)
print "import files complete"
# ---------------------------------------------------
#for grid
phi_values = grid[1:len(dataEmissionlines)+1,6]
hdens_values = grid[1:len(dataEmissionlines)+1,7]
#for lines
headers = headers[1:]
Emissionlines = dataEmissionlines[:, 1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(Emissionlines[0]),4))
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = Emissionlines[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(Emissionlines),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('optical_lines.pdf')
plt.clf()
| gpl-2.0 |
rkmaddox/mne-python | examples/visualization/channel_epochs_image.py | 10 | 2661 | """
=========================================
Visualize channel over epochs as an image
=========================================
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
Two images are produced, one with a good channel and one with a channel
that does not show any evoked field.
It is also demonstrated how to reorder the epochs using a 1D spectral
embedding as described in :footcite:`GramfortEtAl2010`.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# Create epochs, here for gradiometers + EOG only for simplicity
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
###############################################################################
# Show event-related fields images
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.manifold import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
###############################################################################
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
Clyde-fare/scikit-learn | examples/applications/plot_stock_market.py | 227 | 8284 | """
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
print(__doc__)
# Author: Gael Varoquaux [email protected]
# License: BSD 3 clause
import datetime
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import finance
from matplotlib.collections import LineCollection
from sklearn import cluster, covariance, manifold
###############################################################################
# Retrieve the data from Internet
# Choose a time period reasonnably calm (not too long ago so that we get
# high-tech firms, and before the 2008 crash)
d1 = datetime.datetime(2003, 1, 1)
d2 = datetime.datetime(2008, 1, 1)
# kraft symbol has now changed from KFT to MDLZ in yahoo
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'MTU': 'Mitsubishi',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'Mc Donalds',
'PEP': 'Pepsi',
'MDLZ': 'Kraft Foods',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas instruments',
'XRX': 'Xerox',
'LMT': 'Lookheed Martin',
'WMT': 'Wal-Mart',
'WBA': 'Walgreen',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(list(symbol_dict.items())).T
quotes = [finance.quotes_historical_yahoo(symbol, d1, d2, asobject=True)
for symbol in symbols]
open = np.array([q.open for q in quotes]).astype(np.float)
close = np.array([q.close for q in quotes]).astype(np.float)
# The daily variations of the quotes are what carry most information
variation = close - open
###############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
###############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
###############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
###############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| bsd-3-clause |
abelcarreras/DynaPhoPy | dynaphopy/analysis/thermal_properties.py | 1 | 7253 | import numpy as np
import matplotlib.pylab as pl
import warnings
from scipy import integrate
N_a = 6.022140857e23
k_b = 1.38064852e-23 # J / K
h_bar = 6.626070040e-22 # J * ps
warnings.simplefilter("ignore")
def get_dos(temp, frequency, power_spectrum, n_size, bose_einstein_statistics=False):
conversion_factor = 1.60217662e-19 # eV -> J
def n(temp, freq):
return pow(np.exp(freq*h_bar/(k_b*temp))-1, -1)
if bose_einstein_statistics:
def energy(freq, temp):
return h_bar*freq*(0.5+n(temp, freq))
else:
def energy(freq, temp):
return k_b * temp
dos = np.nan_to_num([2.0*conversion_factor*power_spectrum[i]/(energy(freq, temp)*n_size)
for i, freq in enumerate(frequency)])
return dos
def get_total_energy(temperature, frequency, dos):
def n(temp, freq):
return pow(np.exp(freq*h_bar/(k_b*temp))-1, -1)
total_energy = np.nan_to_num([dos[i] * h_bar * freq * (0.5 + n(temperature, freq))
for i, freq in enumerate(frequency)])
total_energy = integrate.simps(total_energy, frequency) * N_a / 1000 # KJ/K/mol
return total_energy
def get_free_energy(temperature, frequency, dos):
free_energy = np.nan_to_num([dos[i] * k_b * temperature * np.log(2 * np.sinh(h_bar * freq / (2 * k_b * temperature)))
for i, freq in enumerate(frequency)])
free_energy[0] = 0
free_energy = integrate.simps(free_energy, frequency) * N_a / 1000 # KJ/K/mol
return free_energy
def get_free_energy_correction_shift(temperature, frequency, dos, shift):
def n(temp, freq):
return pow(np.exp(freq*h_bar/(k_b*temp))-1, -1)
free_energy_c = np.nan_to_num([dos[i] * -h_bar/2 *shift*(n(temperature, freq) + 1 / 2.)
for i, freq in enumerate(frequency)])
free_energy_c = integrate.simps(free_energy_c, frequency) * N_a / 1000 # KJ/K/mol
return free_energy_c
def get_free_energy_correction_dos(temperature, frequency, dos, dos_r):
def n(temp, freq):
return pow(np.exp(freq*h_bar/(k_b*temp))-1, -1)
free_energy_1 = np.nan_to_num([ dos_r[i] * -h_bar/2 * freq*(n(temperature, freq) + 1 / 2.)
for i, freq in enumerate(frequency)])
free_energy_2 = np.nan_to_num([ dos[i] * -h_bar/2 * freq*(n(temperature, freq) + 1 / 2.)
for i, freq in enumerate(frequency)])
free_energy_c = free_energy_1 - free_energy_2
free_energy_c = integrate.simps(free_energy_c, frequency) * N_a / 1000 # KJ/K/mol
return free_energy_c
def get_entropy(temperature, frequency, dos):
def coth(x):
return np.cosh(x)/np.sinh(x)
entropy = np.nan_to_num([dos[i]*(1.0 / (2. * temperature) * h_bar * freq * coth(h_bar * freq / (2 * k_b * temperature))
- k_b * np.log(2 * np.sinh(h_bar * freq / (2 * k_b * temperature))))
for i, freq in enumerate(frequency)])
entropy = integrate.simps(entropy, frequency) * N_a # J/K/mol
return entropy
# Alternative way to calculate entropy (not used)
def get_entropy2(temperature, frequency, dos):
def n(temp, freq):
return pow(np.exp(freq*h_bar/(k_b*temp))-1, -1)
entropy = np.nan_to_num([dos[i] * k_b * ((n(temperature, freq) + 1) * np.log(n(temperature, freq) + 1)
- n(temperature, freq) * np.log(n(temperature, freq)))
for i, freq in enumerate(frequency)])
entropy = integrate.simps(entropy, frequency) * N_a # J/K/mol
return entropy
def get_cv(temperature, frequency, dos):
def z(temp, freq):
return h_bar*freq/(k_b*temp)
c_v = np.nan_to_num([dos[i] * k_b * pow(z(temperature, freq), 2) * np.exp(z(temperature, freq)) / pow(np.exp(z(temperature, freq)) - 1, 2)
for i, freq in enumerate(frequency)])
c_v = integrate.simps(c_v, frequency) * N_a # J/K/mol
return c_v
if __name__ == "__main__":
shift = 0.05
#temp = 300
#dos_file = open('/Users/abel/TEST_GPU/GaN/total_dos.dat', mode='r')
#dos_r_file = open('/Users/abel/TEST_GPU/GaN/total_dos_o.dat', mode='r')
#power_file = open('/Users/abel/TEST_GPU/GaN/power_spectrum.dat', mode='r')
temp=900
dos_file = open('/home/abel/LAMMPS/Si/total_dos_h.dat', mode='r')
dos_r_file = open('/home/abel/LAMMPS/Si/total_dos_o.dat', mode='r')
power_file = open('/home/abel/LAMMPS/Si/power_spectrum_900_12_fft_vlong.dat', mode='r')
frequency = []
dos = []
for line in dos_file.readlines()[1:]:
frequency.append(float(line.split()[0]))
dos.append(float(line.split()[1]))
frequency_r = []
dos_r = []
for line in dos_r_file.readlines()[1:]:
frequency_r.append(float(line.split()[0]))
dos_r.append(float(line.split()[1]))
frequency_p = []
power_spectrum = []
for line in power_file.readlines():
frequency_p.append(float(line.split()[0]))
power_spectrum.append(float(line.split()[1]))
# power_spectrum = get_dos(temp,frequency_p,power_spectrum, 12*12*6)
power_spectrum = get_dos(temp, frequency_p, power_spectrum, 12*12*12)
pl.plot(frequency_p, power_spectrum, label='power')
pl.plot(frequency, dos,label='dos')
pl.plot(frequency_r, dos_r, label='dos_r')
pl.legend()
pl.show()
# free_energy = get_free_energy(temp,frequency,dos) + get_free_energy_correction(temp, frequency, dos, shift)
print (get_free_energy_correction_shift(temp, frequency, dos, shift),
get_free_energy_correction_dos(temp, frequency, dos, dos_r))
free_energy = get_free_energy(temp, frequency_r, dos_r) + get_free_energy_correction_dos(temp, frequency, dos_r, dos)
entropy = get_entropy(temp, frequency_r, dos_r)
c_v = get_cv(temp, frequency_r, dos_r)
print ('Renormalized')
print ('-------------------------')
print ('Free energy: {0} KJ/K/mol'.format(free_energy))
print ('Entropy: {0} J/K/mol'.format(entropy))
print ('Cv: {0} J/K/mol'.format(c_v))
print (np.trapz(dos_r, x=frequency_r))/(8*3)
print (integrate.simps(dos_r,x=frequency_r)/(8*3))
print ('\nFrom MD')
print ('-------------------------')
free_energy = get_free_energy(temp, frequency_p, power_spectrum)
entropy = get_entropy(temp, frequency_p, power_spectrum)
c_v = get_cv(temp, frequency_p, power_spectrum)
print ('Free energy: {0} KJ/K/mol'.format(free_energy))
print ('Entropy: {0} J/K/mol'.format(entropy))
print ('Cv: {0} J/K/mol'.format(c_v))
print (np.trapz(power_spectrum, x=frequency_p))/(8*3)
print (integrate.simps(power_spectrum, x=frequency_p))/(8*3)
print ('\nHARMONIC')
print ('-------------------------')
free_energy = get_free_energy(temp, frequency, dos)
entropy = get_entropy(temp, frequency, dos)
c_v = get_cv(temp, frequency, dos)
print ('Free energy: {0} KJ/K/mol'.format(free_energy))
print ('Entropy: {0} J/K/mol'.format(entropy))
print ('Cv: {0} J/K/mol'.format(c_v))
print (np.trapz(dos, x=frequency)/(8*3))
print (integrate.simps(dos, x=frequency)/(8*3)) | mit |
Gabriel-p/arXiv | modules/rank.py | 1 | 8355 |
import numpy as np
import textwrap
from itertools import groupby
from operator import itemgetter
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
def probs(clmode, wordsRank, articles):
"""
Rank and obtains probabilities for each article.
Based on the example: http://scikit-learn.org/stable/tutorial/
text_analytics/working_with_text_data.html
"""
if not wordsRank.empty:
modes = {
'NB': 'Naive Bayes', 'LR': 'Logistic regression',
'MH': 'Modified Huber', 'SVM': 'Support Vector Machines',
'PC': 'Perceptron'}
print("Training classifier ({}).".format(modes[clmode]))
# Extract titles and abstracts.
titles, abstracts = list(zip(*articles))[1], list(zip(*articles))[2]
titlAbs = [_ + ' ' + abstracts[i] for i, _ in enumerate(titles)]
# This block is the same as the Pipeline() below, but done in parts.
# # Tokenizing text
# count_vect = CountVectorizer()
# X_train_counts = count_vect.fit_transform(wordsRank['articles'])
# # From occurrences to frequencies
# tfidf_transformer = TfidfTransformer()
# X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
# # Training a classifier
# clf = MultinomialNB().fit(X_train_tfidf, wordsRank['rank'])
# # Predict based on titles and abstract data.
# X_new_counts = count_vect.transform(titlAbs)
# X_new_tfidf = tfidf_transformer.transform(X_new_counts)
# probs = clf.predict(X_new_tfidf)
if clmode == 'NB':
# NaiveBayes
text_clf = Pipeline(
[('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),
('clf', MultinomialNB())])
elif clmode == 'LR':
# Logistic regression
text_clf = Pipeline(
[('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),
('clf', SGDClassifier(loss='log', max_iter=1000, tol=1e-3))])
elif clmode == 'MH':
# Modified Huber
text_clf = Pipeline(
[('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),
('clf', SGDClassifier(
loss='modified_huber', max_iter=1000, tol=1e-3))])
elif clmode == 'SVM':
# Support Vector Machines
text_clf = Pipeline(
[('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),
('clf', SGDClassifier(max_iter=1000, tol=1e-3))])
elif clmode == 'PC':
# Perceptron
text_clf = Pipeline(
[('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),
('clf', SGDClassifier(
loss='perceptron', max_iter=1000, tol=1e-3))])
# Train the model.
text_clf.fit(wordsRank['articles'], wordsRank['rank'])
# Predict classifications.
ranks = text_clf.predict(titlAbs)
# Probability estimates are only available for 'log' and
# 'modified Huber' loss when using the 'SGDClassifier()'.
# (https://scikit-learn.org/stable/modules/generated/
# sklearn.linear_model.SGDClassifier.html#sklearn.linear_model.
# SGDClassifier.predict_proba)
if clmode not in ['SVM', 'PC']:
probs = text_clf.predict_proba(titlAbs).max(axis=1)
else:
print(" WARNING: probability estimates are not available"
"for this method.")
probs = np.ones(len(articles))
else:
ranks, probs = [0 for _ in articles], [0. for _ in articles]
print("No previous classifier file found.")
return ranks, probs
def artSort(gr_ids, articles, dates, ranks, probs):
"""
Sort articles according to rank values first and probabilities second
in reverse order so larger probabilities will be positioned first.
Group articles by ranks.
"""
# Sort.
ranks, probs, articles, dates = (
list(t) for t in zip(*sorted(zip(
ranks, -np.array(probs), articles, dates))))
# Revert back.
probs = -np.array(probs)
# Group by ranks.
data = list(zip(*[ranks, probs, articles, dates]))
gr_data = groupby(data, lambda x: x[0])
grpd_arts = {}
for k, group in gr_data:
grpd_arts[k] = list(group)
# Store number of articles for each group.
gr_len = []
for g_id in gr_ids:
g_id = int(g_id) if g_id != 'n' else 999
try:
gr_len.append(len(grpd_arts[g_id]))
except KeyError:
gr_len.append(0)
# To list and sublists.
grpd_arts = [list(_) for _ in grpd_arts.values()]
# Reverse last group (not_interested) so those that have a *lower*
# probability of belonging here are shown first.
grpd_arts[-1].sort(key=itemgetter(1))
return grpd_arts, gr_len
def manual(groups, gr_ids, grpd_arts, gr_len):
"""
Manually rank articles.
'q', 'quit', 'quit()', 'exit' exit the ranking process and stores
whatever was ranked at that point.
"""
# Total number of articles.
N_arts = sum([len(_) for _ in grpd_arts])
print("\nTotal number of articles to classify: {}".format(N_arts))
print("\nArticles per group defined:")
for i, g in enumerate(groups):
print(" G={} ({}): {}".format(gr_ids[i], g, gr_len[i]))
print("\nInput 'ng' to stop classifying and jump to the next group.")
train = []
# For each defined group.
for articles_gr in grpd_arts:
gr_id = articles_gr[0][0] - 1 if articles_gr[0][0] != 999 else -1
print("\n* Articles classified in group '{}' ({})".format(
groups[gr_id], len(articles_gr)))
# For each article in this group.
for j, data in enumerate(articles_gr):
rank, prob, art, date = data
r = str(rank) if rank != 999 else 'n'
print('\n{}/{}) G={}, P={:.2f}, {} ({})\n'.format(
str(j + 1), len(articles_gr), r, prob, date, art[3]))
# Authors
authors = art[0] if len(art[0].split(',')) < 4 else\
','.join(art[0].split(',')[:3]) + ', et al.'
print(textwrap.fill(authors, 77) + '\n')
# Title
print(textwrap.fill(art[1], 75) + '\n')
# Abstract
print(textwrap.fill(art[2], 80))
answ = artClass(gr_ids, rank)
if answ in gr_ids:
a = int(answ) if answ != 'n' else 999
train.append([date, a, art[1] + ' ' + art[2]])
# Jump to next group.
elif answ == 'next_gr':
break
# Don't classify this article and move forward.
elif answ == '':
pass
# Quit training/classifying.
elif answ in ['q', 'quit', 'quit()', 'exit']:
return train
return train
def artClass(gr_ids, rank):
"""
Manual ranking.
"""
while True:
pn = input("Group (1,..,{},n): ".format(len(gr_ids) - 1))
if pn in gr_ids:
return pn
# Jump to next group.
elif pn == 'ng':
if str(rank + 1) in gr_ids or (rank + 1) == len(gr_ids):
print("\nJump to next group.")
return 'next_gr'
else:
print(" No such group '{}' exists.".format(rank + 1))
# Empty string means don't classify this article and move forward.
# Rest of options mean "Quit training/classifying."
elif pn in ['', 'q', 'quit', 'quit()', 'exit']:
return pn
def zotero(groups, gr_ids, articles, dates):
"""
Assign a rank to Zotero articles.
"""
print("\nGroups defined:")
for i, g in enumerate(groups):
print(" {}: {}".format(gr_ids[i], g))
print("\nAssign a group for Zotero entries read.")
while True:
pn = input("Group (1,..,{},n): ".format(len(gr_ids) - 1))
if pn in gr_ids:
break
train = []
for i, art in enumerate(articles):
train.append([dates[i], int(pn), art[1] + ' ' + art[2]])
return train
| gpl-3.0 |
CallaJun/hackprince | indico/mpl_toolkits/tests/test_axes_grid1.py | 7 | 1871 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
@image_comparison(baseline_images=['divider_append_axes'])
def test_divider_append_axes():
# the random data
np.random.seed(0)
x = np.random.randn(1000)
y = np.random.randn(1000)
fig, axScatter = plt.subplots()
# the scatter plot:
axScatter.scatter(x, y)
# create new axes on the right and on the top of the current axes
# The first argument of the new_vertical(new_horizontal) method is
# the height (width) of the axes to be created in inches.
divider = make_axes_locatable(axScatter)
axHistbot = divider.append_axes("bottom", 1.2, pad=0.1, sharex=axScatter)
axHistright = divider.append_axes("right", 1.2, pad=0.1, sharey=axScatter)
axHistleft = divider.append_axes("left", 1.2, pad=0.1, sharey=axScatter)
axHisttop = divider.append_axes("top", 1.2, pad=0.1, sharex=axScatter)
# now determine nice limits by hand:
binwidth = 0.25
xymax = np.max([np.max(np.fabs(x)), np.max(np.fabs(y))])
lim = (int(xymax/binwidth) + 1) * binwidth
bins = np.arange(-lim, lim + binwidth, binwidth)
axHisttop.hist(x, bins=bins)
axHistbot.hist(x, bins=bins)
axHistleft.hist(y, bins=bins, orientation='horizontal')
axHistright.hist(y, bins=bins, orientation='horizontal')
axHistbot.invert_yaxis()
axHistleft.invert_xaxis()
axHisttop.xaxis.set_ticklabels(())
axHistbot.xaxis.set_ticklabels(())
axHistleft.yaxis.set_ticklabels(())
axHistright.yaxis.set_ticklabels(())
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| lgpl-3.0 |
korin-worm-code/LevelJoiner | CompareWormEuler.py | 1 | 4970 | from sqlalchemy import create_engine, func, inspect
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import Table,MetaData
from sqlalchemy import Column, Integer, Float, ForeignKey
from geoalchemy2 import Geometry
from geoalchemy2.functions import GenericFunction
from geoalchemy2.elements import WKTElement, WKBElement
from sqlalchemy.orm import sessionmaker, relationship, backref, aliased
from math import sqrt, atan2, pi, log10, log, sin, cos, radians
from Scientific.Geometry import Vector
from scipy import spatial
from sklearn import neighbors
import numpy as np
import sys
from WormDBStuff.WormDBStuff import WormDBStuffFactory
#Testing things
basename = 'ADKMergedBGA2500'
euler_points = 'adk_bga_euler_new'
WormPoint, WormLevelPoints, WormLevel, tablenames = WormDBStuffFactory(basename,to_max_grad = True)
# sqlalchemy vodoo
Base = declarative_base()
# Hooking things up to the database system
db = 'postgresql://frank:f00bar@localhost:5433/frank'
engine = create_engine('%s'%db, echo=False)
Session = sessionmaker(bind=engine)
session = Session()
connect = engine.connect()
if not engine.dialect.has_table(connect, points_name):
raise AttributeError('The Points table is missing.')
if not engine.dialect.has_table(connect, levels_name):
raise AttributeError('The Levels table is missing.')
if not engine.dialect.has_table(connect, levels_points_name):
raise AttributeError('The Levels_Points table is missing.')
meta = MetaData()
# This is a black magic function, that hooks up an existing database table, but that still allows
# for python object access to the database data.
# We will hook up the earthquake hypocenters (not valid anymore)
class Eulers(Base):
__table__ = Table(euler_points, meta, autoload=True, autoload_with=engine)
# This is an example of the sqlalchemy way to encapsulate a SQL query.
# This particular query builds a database "join" (perhaps not exactly due to the sqlalchemy innards)
# where all entities returned will be the edge "end point" and "edge" data structures that match.
# This is actually the head end of more restrictive filterings of the database tables
point_query = session.query(WormPoint,WormLevelPoints).filter(WormPoint.worm_point_id == WormLevelPoints.point_id)
# Pull all worm data structures from the database;
# returns both WormPoint and WormLevelPoints as a tuple(?) for each item
all_worm_points = point_query.all()
# It's actually simpler to dig the relevant bits out from the data structures returned by the database now
# than trying to deal with the headache of getting all of the indexing correct everywhere else.
# Think of it as a "once and only once" for getting the bloody indexing right...
# Build an array of 3-coords for each worm point to feed into the kd-tree for indexing
worm_pt_coords = np.array([[w[0].x,w[0].y,w[0].z] for w in all_worm_points])
# Creating an array out of the worm levels
worm_sgmt_levels = np.array([w[1].worm_level_id for w in all_worm_points])
# Creating an array out of the worm segments
worm_sgmt_ids = np.array([w[1].worm_seg_id for w in all_worm_points])
# Creating an array out of the sequential worm pieces
worm_sgmt_seq_num = np.array([w[1].seg_sequence_num for w in all_worm_points])
# We are building a numpy record array so that we can sort them with auxiliary sorting order.
worm_rec = np.rec.fromarrays([worm_sgmt_levels, worm_sgmt_ids, worm_sgmt_seq_num])
# Now create the ndarray of the results from the query.
# N.B. Both the end point and the edge are contained in each element.
all_worm_data = np.array(all_worm_points,dtype=[('worm_point',WormPoint),('worm_level_points',WormLevelPoints)])
# Creating SciPy KDTree to speed up earthquake-worm point comparison
#worm_kd = spatial.KDTree(worm_pt_coords,leafsize=50)
# Updating to be runable with mag data
worm_kd = neighbors.KDTree(worm_pt_coords,leaf_size=100)
# Pulling in the Euler points from the database
euler_query = session.query(Eulers)
# This is the distance we are searching within, in meters
r = 10000.
# Let's build something for some quick stats...
min_dist_to_nodes = []
#far_eq = []
for p in euler_query.filter(Eulers.depth <= 15000.):
# We are no longer working with earthquakes, so we don't need to sort them by magnitude
#.filter(ADKMergedEQs._Depth_km_ == 0.).order_by(ADKMergedEQs._Magnitude_):
#print p._latitude_, p._longitude_, p._depth_km_, p._magnitude_
# depth must be in meters!
euler_pt = [p.x_euler,p.y_euler,p.depth]
# SciPy KDTrees
#dq,wq = worm_kd.query(euler_pt,k=20,distance_upper_bound=r)
wq,dq = worm_kd.query_radius(euler_pt,r=r,return_distance = True,sort_results=True)
# New return style
if wq[0].shape[0] == 0:
# print "No Worms within %f meters."%r
continue
# Distance to the closest worm point
min_dist_to_nodes += [dq[0][0]]
sys.stdout.flush()
#session.commit()
| bsd-2-clause |
numenta-archive/htmresearch | htmresearch/frameworks/capybara/unsupervised/plot.py | 6 | 8542 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from collections import OrderedDict
import numpy as np
from matplotlib import colors
from matplotlib import pyplot as plt
from htmresearch.frameworks.capybara.distance import cluster_distance_matrix
from htmresearch.frameworks.capybara.util import find_cluster_assignments
from htmresearch.frameworks.dimensionality_reduction.proj import project_in_2D
def plot_inter_sequence_distances(output_dir,
plot_id,
distance_func,
sdrs,
cluster_ids,
ignore_noise):
cluster_assignments, sdr_slices = find_cluster_assignments(sdrs, cluster_ids,
ignore_noise)
distance_mat = cluster_distance_matrix(sdr_slices, distance_func)
title = 'distance_matrix_%s' % plot_id
output_file = '%s/%s' % (output_dir, '%s.png' % title)
plot_distance_mat(distance_mat, title, output_file)
projections = project_in_2D(distance_mat, method='mds')
title = '2d_projections_%s' % plot_id
output_file = '%s/%s' % (output_dir, '%s.png' % title)
plot_2D_projections(title, output_file, cluster_assignments, projections)
def plot_2D_projections(title, output_file, cluster_assignments, projections):
"""
Visualize SDR cluster projections
"""
color_list = colors.cnames.keys()
plt.figure()
color_list = color_list
color_names = []
for i in range(len(cluster_assignments)):
cluster_id = int(cluster_assignments[i])
if cluster_id not in color_names:
color_names.append(cluster_id)
projection = projections[i]
label = 'Category %s' % cluster_id
if len(color_list) > cluster_id:
color = color_list[cluster_id]
else:
color = 'black'
plt.scatter(projection[0], projection[1], label=label, alpha=0.5,
color=color, marker='o', edgecolor='black')
# Add nicely formatted legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(), scatterpoints=1, loc=2)
plt.title(title)
plt.draw()
plt.savefig(output_file)
print('==> saved: %s' % output_file)
return plt
def plot_distance_mat(distance_mat, title, output_file):
plt.figure()
plt.imshow(distance_mat, interpolation="nearest")
plt.colorbar()
plt.title(title)
plt.xlabel('Sequence category')
plt.ylabel('Sequence category')
plt.savefig(output_file)
print('==> saved: %s' % output_file)
plt.draw()
def plot_accuracy(output_dir,
plot_id,
sensor_values,
categories,
anomaly_scores,
clustering_accuracies,
xlim):
fig, ax = plt.subplots(nrows=3, sharex=True, figsize=(15, 7))
# plot sensor data and categories
t = range(xlim[0], xlim[1])
ax[0].plot(t, sensor_values)
ax[0].set_xlabel('Time step')
ax[0].set_ylabel('Signal amplitude')
ax[0].set_xlim(xmin=xlim[0], xmax=xlim[1])
category_colors = ['grey', 'blue', 'yellow', 'red', 'green', 'orange']
previous_category = categories[0]
start = 0
category_count = 0
num_points = len(categories)
categories_labelled = []
for category in categories:
if previous_category != category or category_count == num_points - 1:
category_color = category_colors[int(previous_category)]
if category_color not in categories_labelled:
labelLegend = 'class=%s' % int(previous_category)
categories_labelled.append(category_color)
else:
labelLegend = None
end = category_count
ax[0].axvspan(start, end, facecolor=category_color, alpha=0.4,
label=labelLegend)
ax[1].axvspan(start, end, facecolor=category_color, alpha=0.4)
ax[2].axvspan(start, end, facecolor=category_color, alpha=0.4)
start = end
previous_category = category
category_count += 1
title = 'Sensor data (%s)' % plot_id.split('|')[0]
ax[0].set_title(title)
ax[0].set_ylim([-1, 11])
ax[0].legend(ncol=10)
# plot anomaly score
title = 'Anomaly score (%s)' % plot_id
ax[1].set_title(title)
ax[1].set_ylim([-0.1, 1.1])
ax[1].plot(anomaly_scores)
ax[1].set_ylabel('Anomaly score')
# clustering accuracy
title = 'Clustering accuracy (%s)' % plot_id
ax[2].plot(clustering_accuracies)
ax[2].set_title(title)
ax[2].set_ylim([-0.1, 1.1])
ax[2].set_xlabel('Time step')
ax[2].set_ylabel('Clustering accuracy')
plt.tight_layout(pad=0.5)
fig_name = 'clustering_accuracy.png'
plt.savefig('%s/%s' % (output_dir, fig_name))
print('==> saved: %s/%s' % (output_dir, fig_name))
plt.draw()
def plot_cluster_assignments(output_dir, clusters, timestep):
fig, ax = plt.subplots(figsize=(15, 7))
# cluster sizes
num_clusters = len(clusters)
if num_clusters > 0:
categories_to_num_points = {}
for i in range(num_clusters):
cluster = clusters[i]
cluster_id = cluster.id
freqs = cluster.label_distribution()
for freq in freqs:
num_points = int(freq['num_points'])
category = int(freq['label'])
if category not in categories_to_num_points:
categories_to_num_points[category] = {}
categories_to_num_points[category][cluster_id] = num_points
cluster_ids = []
for clusters_to_num_points in categories_to_num_points.values():
cluster_ids.extend(clusters_to_num_points.keys())
cluster_ids = list(set(cluster_ids))
# Get some pastel shades for the colors. Note: category index start at 0
num_bars = len(cluster_ids)
num_categories = max(categories_to_num_points.keys()) + 1
colors = plt.cm.BuPu(np.linspace(0, 0.5, num_categories))
bottom = np.array([0 for _ in range(num_bars)])
# Plot bars and create text labels for the table
cell_text = []
categories = []
for category, clusters_to_num_points in categories_to_num_points.items():
categories.append(category)
bars = []
for cid in cluster_ids:
if cid in clusters_to_num_points:
bars.append(clusters_to_num_points[cid])
else:
bars.append(0)
# draw the bars for this category
x = np.array([i for i in range(num_bars)])
ax.bar(x,
bars,
align='center',
bottom=bottom,
color=colors[category])
bottom += np.array(bars)
ax.set_xticks(x)
cell_text.append([x for x in bars])
ax.set_title('Number of points per category by cluster ID (Timestep: %s)'
% timestep)
ax.set_ylabel('Number of points')
# Reverse colors and text labels to display the last value at the top.
# colors = colors[::-1]
# cell_text.reverse()
# Add a table at the bottom of the axes
rowLabels = ['category %s' % c for c in categories]
colLabels = ['c%s' % c for c in cluster_ids]
the_table = plt.table(cellText=cell_text,
cellLoc='center',
rowLabels=rowLabels,
rowColours=colors,
colLabels=colLabels,
loc='bottom')
the_table.auto_set_font_size(False)
the_table.set_fontsize(9)
the_table.scale(1, 2)
ax.set_xticks([])
plt.tight_layout(pad=12)
fig_name = 'cluster_assignments_t=%s.png' % timestep
plt.savefig('%s/%s' % (output_dir, fig_name))
print('==> saved: %s/%s' % (output_dir, fig_name))
plt.draw()
| agpl-3.0 |
paulmueller/bornscat | examples/born_rytov_plot_3d.py | 2 | 1948 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Compare Born and Rytov approximation
This script creates a colorfull plot.
"""
from __future__ import division
from __future__ import print_function
from matplotlib import pylab as plt
import numpy as np
import os
import sys
DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, DIR+"/../")
import bornscat
rfac = 1
# Set measurement parameters
# Compute scattered field from cylinder
radius = 3 # wavelengths
nmed = 1.333
nsph = 1.343
size = 64*rfac # pixels
res = 4*rfac #23 # px/wavelengths
fft_method = "numpy"
# create refractive index map for Born
n = nmed * np.ones((size,size,size))
n0 = 1*n
rad = radius*res
x=np.linspace(-size/2,size/2,size, endpoint=False)
xv = x.reshape(-1, 1, 1)
yv = x.reshape( 1,-1, 1)
zv = x.reshape( 1, 1,-1)
n[np.where((xv**2+yv**2+zv**2 < rad**2))] = nsph
# Rytov
print("Rytov scattered wave")
rytov_u0 = bornscat.rytov_3d(n0, nmed, res, fft_method=fft_method)
rytov_u = bornscat.rytov_3d(n, nmed, res, fft_method=fft_method)
ro = rytov_u/rytov_u0
rph = np.angle(ro)
ram = np.abs(ro)
phakwargs = {"vmin": rph.min(),
"vmax": rph.max(),
"cmap": "coolwarm"}
ampkwargs = {"vmin": ram.min(),
"vmax": ram.max(),
"cmap": "gray"}
# Plot
fig, axes = plt.subplots(2,3)
axes = axes.transpose().flatten()
axes[0].set_title("Rytov phase z=0")
axes[0].imshow(rph[:,:,size//2], **phakwargs)
axes[1].set_title("Rytov amplitude z=0")
axes[1].imshow(ram[:,:,size//2], **ampkwargs)
axes[2].set_title("Rytov phase y=0")
axes[2].imshow(rph[:,size//2,:], **phakwargs)
axes[3].set_title("Rytov amplitude y=0")
axes[3].imshow(ram[:,size//2,:], **ampkwargs)
axes[4].set_title("Rytov phase x=0")
axes[4].imshow(rph[size//2,:,:], **phakwargs)
axes[5].set_title("Rytov amplitude x=0")
axes[5].imshow(ram[size//2,:,:], **ampkwargs)
plt.tight_layout()
plt.savefig(os.path.join(DIR, "born_rytov_plot_3d.png"))
| bsd-3-clause |
calico/basenji | bin/basenji_bench_phylop_folds.py | 1 | 14867 | #!/usr/bin/env python
# Copyright 2019 Calico LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from optparse import OptionParser, OptionGroup
import glob
import h5py
import json
import pdb
import os
import shutil
import sys
import numpy as np
import pandas as pd
import slurm
import util
from basenji_test_folds import stat_tests
"""
basenji_bench_phylop_folds.py
Benchmark Basenji model replicates on BED PhyloP task.
"""
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] <exp_dir> <params_file> <data_dir> <bed_file>'
parser = OptionParser(usage)
# sat options
sat_options = OptionGroup(parser, 'basenji_sat_bed.py options')
sat_options.add_option('-d', dest='mut_down',
default=0, type='int',
help='Nucleotides downstream of center sequence to mutate [Default: %default]')
sat_options.add_option('-f', dest='genome_fasta',
default=None,
help='Genome FASTA for sequences [Default: %default]')
sat_options.add_option('-l', dest='mut_len',
default=0, type='int',
help='Length of center sequence to mutate [Default: %default]')
sat_options.add_option('-o', dest='out_dir',
default='sat_mut', help='Output directory [Default: %default]')
sat_options.add_option('--plots', dest='plots',
default=False, action='store_true',
help='Make heatmap plots [Default: %default]')
sat_options.add_option('-p', dest='processes',
default=None, type='int',
help='Number of processes, passed by multi script')
sat_options.add_option('--restart', dest='restart',
default=False, action='store_true',
help='Restart a partially completed job [Default: %default]')
sat_options.add_option('--rc', dest='rc',
default=False, action='store_true',
help='Ensemble forward and reverse complement predictions [Default: %default]')
sat_options.add_option('--shifts', dest='shifts',
default='0',
help='Ensemble prediction shifts [Default: %default]')
sat_options.add_option('--stats', dest='sad_stats',
default='sum',
help='Comma-separated list of stats to save. [Default: %default]')
sat_options.add_option('-t', dest='targets_file',
default=None, type='str',
help='File specifying target indexes and labels in table format')
sat_options.add_option('-u', dest='mut_up',
default=0, type='int',
help='Nucleotides upstream of center sequence to mutate [Default: %default]')
parser.add_option_group(sat_options)
phylop_options = OptionGroup(parser, 'basenji_bench_phylop.py options')
# phylop_options.add_option('-e', dest='num_estimators',
# default=100, type='int',
# help='Number of random forest estimators [Default: %default]')
phylop_options.add_option('-g', dest='genome',
default='ce11', help='PhyloP and FASTA genome [Default: %default]')
# phylop_options.add_option('--pca', dest='n_components',
# default=None, type='int',
# help='PCA n_components [Default: %default]')
parser.add_option_group(phylop_options)
fold_options = OptionGroup(parser, 'cross-fold options')
fold_options.add_option('-a', '--alt', dest='alternative',
default='two-sided', help='Statistical test alternative [Default: %default]')
fold_options.add_option('-c', dest='crosses',
default=1, type='int',
help='Number of cross-fold rounds [Default:%default]')
fold_options.add_option('-e', dest='conda_env',
default='tf2.4',
help='Anaconda environment [Default: %default]')
fold_options.add_option('--label_exp', dest='label_exp',
default='Experiment', help='Experiment label [Default: %default]')
fold_options.add_option('--label_ref', dest='label_ref',
default='Reference', help='Reference label [Default: %default]')
fold_options.add_option('--max_proc', dest='max_proc',
default=None, type='int',
help='Maximum concurrent processes [Default: %default]')
fold_options.add_option('--name', dest='name',
default='sat', help='SLURM name prefix [Default: %default]')
fold_options.add_option('-q', dest='queue',
default='gtx1080ti',
help='SLURM queue on which to run the jobs [Default: %default]')
fold_options.add_option('-r', dest='ref_dir',
default=None, help='Reference directory for statistical tests')
parser.add_option_group(fold_options)
(options, args) = parser.parse_args()
if len(args) != 4:
parser.error('Must provide parameters file and data directory')
else:
exp_dir = args[0]
params_file = args[1]
data_dir = args[2]
bed_file = args[3]
# read data parameters
data_stats_file = '%s/statistics.json' % data_dir
with open(data_stats_file) as data_stats_open:
data_stats = json.load(data_stats_open)
# count folds
num_folds = len([dkey for dkey in data_stats if dkey.startswith('fold')])
# genome
genome_path = os.environ[options.genome.upper()]
options.genome_fasta = '%s/assembly/%s.fa' % (genome_path, options.genome)
################################################################
# saturation mutagenesis
################################################################
jobs = []
scores_files = []
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%d_c%d' % (exp_dir, fi, ci)
name = '%s-f%dc%d' % (options.name, fi, ci)
# update output directory
sat_dir = '%s/%s' % (it_dir, options.out_dir)
# check if done
scores_file = '%s/scores.h5' % sat_dir
scores_files.append(scores_file)
if os.path.isfile(scores_file):
print('%s already generated.' % scores_file)
else:
basenji_cmd = '. /home/drk/anaconda3/etc/profile.d/conda.sh;'
basenji_cmd += ' conda activate %s;' % options.conda_env
basenji_cmd += ' echo $HOSTNAME;'
if options.processes > 1:
basenji_cmd += ' basenji_sat_bed_multi.py'
basenji_cmd += ' --max_proc %d' % (options.max_proc // num_folds)
basenji_cmd += ' -q %s' % options.queue
basenji_cmd += ' -n %s' % name
else:
basenji_cmd += ' basenji_sat_bed.py'
basenji_cmd += ' %s' % options_string(options, sat_options, sat_dir)
basenji_cmd += ' %s' % params_file
basenji_cmd += ' %s/train/model_best.h5' % it_dir
basenji_cmd += ' %s' % bed_file
if options.processes > 1:
jobs.append(basenji_cmd)
else:
basenji_job = slurm.Job(basenji_cmd, name,
out_file='%s.out'%sat_dir,
err_file='%s.err'%sat_dir,
cpu=2, gpu=1,
queue=options.queue,
mem=30000, time='28-0:00:00')
jobs.append(basenji_job)
if options.processes > 1:
util.exec_par(jobs, verbose=True)
else:
slurm.multi_run(jobs, verbose=True)
################################################################
# ensemble
################################################################
ensemble_dir = '%s/ensemble' % exp_dir
if not os.path.isdir(ensemble_dir):
os.mkdir(ensemble_dir)
sat_dir = '%s/%s' % (ensemble_dir, options.out_dir)
if not os.path.isdir(sat_dir):
os.mkdir(sat_dir)
if not os.path.isfile('%s/scores.h5' % sat_dir):
print('Generating ensemble scores.')
ensemble_scores_h5(sat_dir, scores_files)
else:
print('Ensemble scores already generated.')
################################################################
# PhyloP regressors
################################################################
# num_pcs = int(data_stats['num_targets']**0.75)
jobs = []
for ci in range(options.crosses):
for fi in range(num_folds):
it_dir = '%s/f%d_c%d' % (exp_dir, fi, ci)
sat_dir = '%s/%s' % (it_dir, options.out_dir)
if not os.path.isfile('%s/stats.txt' % sat_dir):
phylop_cmd = 'basenji_bench_phylop.py'
phylop_cmd += ' -e 200 -p 4'
# phylop_cmd += ' -d %d' % num_pcs
phylop_cmd += ' -o %s' % sat_dir
phylop_cmd += ' %s/scores.h5' % sat_dir
name = '%s-f%dc%d' % (options.name, fi, ci)
std_pre = '%s/phylop'%sat_dir
j = slurm.Job(phylop_cmd, name,
'%s.out'%std_pre, '%s.err'%std_pre,
queue='standard', cpu=4,
mem=90000, time='1-0:0:0')
jobs.append(j)
# ensemble
sat_dir = '%s/%s' % (ensemble_dir, options.out_dir)
if not os.path.isfile('%s/stats.txt' % sat_dir):
phylop_cmd = 'basenji_bench_phylop.py'
phylop_cmd += ' -e 200 -p 4'
# phylop_cmd += ' -d %d' % num_pcs
phylop_cmd += ' -o %s' % sat_dir
phylop_cmd += ' %s/scores.h5' % sat_dir
name = '%s-ens' % options.name
std_pre = '%s/phylop'%sat_dir
j = slurm.Job(phylop_cmd, name,
'%s.out'%std_pre, '%s.err'%std_pre,
queue='standard', cpu=4,
mem=90000, time='1-0:0:0')
jobs.append(j)
slurm.multi_run(jobs, verbose=True)
################################################################
# compare
################################################################
ref_sat_dirs = []
exp_sat_dirs = []
for ci in range(options.crosses):
for fi in range(num_folds):
exp_sat_dir = '%s/f%d_c%d/%s' % (exp_dir, fi, ci, options.out_dir)
exp_sat_dirs.append(exp_sat_dir)
if options.ref_dir is not None:
ref_sat_dir = '%s/f%d_c%d/%s' % (options.ref_dir, fi, ci, options.out_dir)
ref_sat_dirs.append(ref_sat_dir)
exp_pcor_folds, exp_r2_folds = read_metrics(exp_sat_dirs)
exp_sat_dirs = ['%s/ensemble/%s' % (exp_dir, options.out_dir)]
exp_pcor_ens, exp_r2_ens = read_metrics(exp_sat_dirs)
if options.ref_dir is not None:
ref_pcor_folds, ref_r2_folds = read_metrics(ref_sat_dirs)
ref_sat_dirs = ['%s/ensemble/%s' % (options.ref_dir, options.out_dir)]
ref_pcor_ens, ref_r2_ens = read_metrics(ref_sat_dirs)
print('PearsonR')
exp_mean = exp_pcor_folds.mean()
exp_stdm = exp_pcor_folds.std() / np.sqrt(len(exp_pcor_folds))
expe_mean = exp_pcor_ens.mean()
expe_stdm = exp_pcor_ens.std() / np.sqrt(len(exp_pcor_ens))
print('%12s: %.4f (%.4f)' % (options.label_exp, exp_mean, exp_stdm))
print('%12s (ens): %.4f (%.4f)' % (options.label_exp, expe_mean, expe_stdm))
if options.ref_dir is not None:
ref_mean = ref_pcor_folds.mean()
ref_stdm = ref_pcor_folds.std() / np.sqrt(len(ref_pcor_folds))
refe_mean = ref_pcor_ens.mean()
refe_stdm = ref_pcor_ens.std() / np.sqrt(len(ref_pcor_ens))
print('%12s: %.4f (%.4f)' % (options.label_ref, ref_mean, ref_stdm))
print('%12s (ens): %.4f (%.4f)' % (options.label_ref, refe_mean, refe_stdm))
mwp, tp = stat_tests(exp_pcor_folds, ref_pcor_folds, options.alternative)
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
print('\nR2')
exp_mean = exp_r2_folds.mean()
exp_stdm = exp_r2_folds.std() / np.sqrt(len(exp_r2_folds))
expe_mean = exp_r2_ens.mean()
expe_stdm = exp_r2_ens.std() / np.sqrt(len(exp_r2_ens))
print('%12s: %.4f (%.4f)' % (options.label_exp, exp_mean, exp_stdm))
print('%12s (ens): %.4f (%.4f)' % (options.label_exp, expe_mean, expe_stdm))
if options.ref_dir is not None:
ref_mean = ref_r2_folds.mean()
ref_stdm = ref_r2_folds.std() / np.sqrt(len(ref_r2_folds))
refe_mean = ref_r2_ens.mean()
refe_stdm = ref_r2_ens.std() / np.sqrt(len(ref_r2_ens))
print('%12s: %.4f (%.4f)' % (options.label_ref, ref_mean, ref_stdm))
print('%12s (ens): %.4f (%.4f)' % (options.label_ref, refe_mean, refe_stdm))
mwp, tp = stat_tests(exp_r2_folds, ref_r2_folds, options.alternative)
print('Mann-Whitney U p-value: %.3g' % mwp)
print('T-test p-value: %.3g' % tp)
def ensemble_scores_h5(ensemble_dir, scores_files):
# open ensemble
ensemble_h5_file = '%s/scores.h5' % ensemble_dir
if os.path.isfile(ensemble_h5_file):
os.remove(ensemble_h5_file)
ensemble_h5 = h5py.File(ensemble_h5_file, 'w')
# transfer base
base_keys = ['seqs','chr','start','end','strand']
sad_stats = []
scores0_h5 = h5py.File(scores_files[0], 'r')
for key in scores0_h5.keys():
if key in base_keys:
ensemble_h5.create_dataset(key, data=scores0_h5[key])
else:
sad_stats.append(key)
sad_shape = scores0_h5[key].shape
scores0_h5.close()
# average sum stats
num_folds = len(scores_files)
for sad_stat in sad_stats:
# initialize ensemble array
sad_values = np.zeros(shape=sad_shape, dtype='float32')
# read and add folds
for scores_file in scores_files:
with h5py.File(scores_file, 'r') as scores_h5:
sad_values += scores_h5[sad_stat][:].astype('float32')
# normalize and downcast
sad_values /= num_folds
sad_values = sad_values.astype('float16')
# save
ensemble_h5.create_dataset(sad_stat, data=sad_values)
ensemble_h5.close()
def options_string(options, group_options, rep_dir):
options_str = ''
for opt in group_options.option_list:
opt_str = opt.get_opt_string()
opt_value = options.__dict__[opt.dest]
# wrap askeriks in ""
if type(opt_value) == str and opt_value.find('*') != -1:
opt_value = '"%s"' % opt_value
# no value for bools
elif type(opt_value) == bool:
if not opt_value:
opt_str = ''
opt_value = ''
# skip Nones
elif opt_value is None:
opt_str = ''
opt_value = ''
# modify
elif opt.dest == 'out_dir':
opt_value = rep_dir
options_str += ' %s %s' % (opt_str, opt_value)
return options_str
def read_metrics(sat_dirs):
pcor_folds = []
r2_folds = []
for sat_dir in sat_dirs:
pcor_i = np.load('%s/pcor.npy' % sat_dir)
r2_i = np.load('%s/r2.npy' % sat_dir)
pcor_folds.append(pcor_i)
r2_folds.append(r2_i)
pcor_folds = np.concatenate(pcor_folds)
r2_folds = np.concatenate(r2_folds)
return pcor_folds, r2_folds
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| apache-2.0 |
wbyne/QGIS | python/plugins/processing/algs/qgis/VectorLayerHistogram.py | 4 | 2943 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EquivalentNumField.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputHTML
from processing.tools import vector, dataobjects
class VectorLayerHistogram(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
BINS = 'BINS'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Vector layer histogram')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer')))
self.addParameter(ParameterTableField(self.FIELD,
self.tr('Attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterNumber(self.BINS,
self.tr('number of bins'), 2, None, 10))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Histogram')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
fieldname = self.getParameterValue(self.FIELD)
bins = self.getParameterValue(self.BINS)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, fieldname)
plt.close()
plt.hist(values[fieldname], bins)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
newville/scikit-image | doc/examples/plot_tinting_grayscale_images.py | 14 | 5336 | """
=========================
Tinting gray-scale images
=========================
It can be useful to artificially tint an image with some color, either to
highlight particular regions of an image or maybe just to liven up a grayscale
image. This example demonstrates image-tinting by scaling RGB values and by
adjusting colors in the HSV color-space.
In 2D, color images are often represented in RGB---3 layers of 2D arrays, where
the 3 layers represent (R)ed, (G)reen and (B)lue channels of the image. The
simplest way of getting a tinted image is to set each RGB channel to the
grayscale image scaled by a different multiplier for each channel. For example,
multiplying the green and blue channels by 0 leaves only the red channel and
produces a bright red image. Similarly, zeroing-out the blue channel leaves
only the red and green channels, which combine to form yellow.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage import color
from skimage import img_as_float
grayscale_image = img_as_float(data.camera()[::2, ::2])
image = color.gray2rgb(grayscale_image)
red_multiplier = [1, 0, 0]
yellow_multiplier = [1, 1, 0]
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(red_multiplier * image)
ax2.imshow(yellow_multiplier * image)
"""
.. image:: PLOT2RST.current_figure
In many cases, dealing with RGB values may not be ideal. Because of that, there
are many other `color spaces`_ in which you can represent a color image. One
popular color space is called HSV, which represents hue (~the color),
saturation (~colorfulness), and value (~brightness). For example, a color
(hue) might be green, but its saturation is how intense that green is---where
olive is on the low end and neon on the high end.
In some implementations, the hue in HSV goes from 0 to 360, since hues wrap
around in a circle. In scikit-image, however, hues are float values from 0 to
1, so that hue, saturation, and value all share the same scale.
.. _color spaces:
http://en.wikipedia.org/wiki/List_of_color_spaces_and_their_uses
Below, we plot a linear gradient in the hue, with the saturation and value
turned all the way up:
"""
import numpy as np
hue_gradient = np.linspace(0, 1)
hsv = np.ones(shape=(1, len(hue_gradient), 3), dtype=float)
hsv[:, :, 0] = hue_gradient
all_hues = color.hsv2rgb(hsv)
fig, ax = plt.subplots(figsize=(5, 2))
# Set image extent so hues go from 0 to 1 and the image is a nice aspect ratio.
ax.imshow(all_hues, extent=(0, 1, 0, 0.2))
ax.set_axis_off()
"""
.. image:: PLOT2RST.current_figure
Notice how the colors at the far left and far right are the same. That reflects
the fact that the hues wrap around like the color wheel (see HSV_ for more
info).
.. _HSV: http://en.wikipedia.org/wiki/HSL_and_HSV
Now, let's create a little utility function to take an RGB image and:
1. Transform the RGB image to HSV
2. Set the hue and saturation
3. Transform the HSV image back to RGB
"""
def colorize(image, hue, saturation=1):
""" Add color of the given hue to an RGB image.
By default, set the saturation to 1 so that the colors pop!
"""
hsv = color.rgb2hsv(image)
hsv[:, :, 1] = saturation
hsv[:, :, 0] = hue
return color.hsv2rgb(hsv)
"""
Notice that we need to bump up the saturation; images with zero saturation are
grayscale, so we need to a non-zero value to actually see the color we've set.
Using the function above, we plot six images with a linear gradient in the hue
and a non-zero saturation:
"""
hue_rotations = np.linspace(0, 1, 6)
fig, axes = plt.subplots(nrows=2, ncols=3)
for ax, hue in zip(axes.flat, hue_rotations):
# Turn down the saturation to give it that vintage look.
tinted_image = colorize(image, hue, saturation=0.3)
ax.imshow(tinted_image, vmin=0, vmax=1)
ax.set_axis_off()
fig.tight_layout()
"""
.. image:: PLOT2RST.current_figure
You can combine this tinting effect with numpy slicing and fancy-indexing to
selectively tint your images. In the example below, we set the hue of some
rectangles using slicing and scale the RGB values of some pixels found by
thresholding. In practice, you might want to define a region for tinting based
on segmentation results or blob detection methods.
"""
from skimage.filters import rank
# Square regions defined as slices over the first two dimensions.
top_left = (slice(100),) * 2
bottom_right = (slice(-100, None),) * 2
sliced_image = image.copy()
sliced_image[top_left] = colorize(image[top_left], 0.82, saturation=0.5)
sliced_image[bottom_right] = colorize(image[bottom_right], 0.5, saturation=0.5)
# Create a mask selecting regions with interesting texture.
noisy = rank.entropy(grayscale_image, np.ones((9, 9)))
textured_regions = noisy > 4
# Note that using `colorize` here is a bit more difficult, since `rgb2hsv`
# expects an RGB image (height x width x channel), but fancy-indexing returns
# a set of RGB pixels (# pixels x channel).
masked_image = image.copy()
masked_image[textured_regions, :] *= red_multiplier
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(sliced_image)
ax2.imshow(masked_image)
plt.show()
"""
.. image:: PLOT2RST.current_figure
For coloring multiple regions, you may also be interested in
`skimage.color.label2rgb <http://scikit-image.org/docs/0.9.x/api/skimage.color.html#label2rgb>`_.
"""
| bsd-3-clause |
jseabold/scikit-learn | examples/decomposition/plot_image_denoising.py | 181 | 5819 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
tobegit3hub/deep_cnn | java_predict_client/src/main/proto/tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
gurkirt/actNet-inAct | processing/detection-fusion-working.py | 1 | 32723 | '''
Autor: Gurkirt Singh
Start data: 15th May 2016
purpose: of this file is read frame level predictions and process them to produce a label per video
'''
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import pickle
import os,h5py
import time,json
import scipy.io as sio
import copy
#import pylab as plt
#######baseDir = "/mnt/sun-alpha/actnet/";
baseDir = "/data/shared/solar-machines/actnet/";
baseDir = "/data/shared/solar-machines/actnet/";
########imgDir = "/mnt/sun-alpha/actnet/rgb-images/";
######## imgDir = "/mnt/DATADISK2/ss-workspace/actnet/rgb-images/";
annotPklFile = "../Evaluation/data/actNet200-V1-3.pkl"
def readannos():
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
return actionIDs,taxonomy,database
def getnames():
fname = baseDir+'data/lists/gtnames.list'
with open(fname,'rb') as f:
lines = f.readlines()
names = []
for name in lines:
name = name.rstrip('\n')
names.append(name)
# print names
return names
def gettopklabel(preds,k,classtopk):
scores = np.zeros(200)
topk = min(classtopk,np.shape(preds)[1]);
for i in range(200):
values = preds[i,:];
values = np.sort(values);
values = values[::-1]
scores[i] = np.mean(values[:topk])
# print scores
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
return sortedlabel[:k],sortedscores[:k]
def gettopklabel4mp(scores,k):
scores = scores - np.min(scores);
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
ss = sortedscores[:20]
ss = ss/np.sum(ss)
ss = ss[:5]
ss = ss/np.sum(ss)
return sortedlabel[:k],ss[:k]
def sumfuse(mbh,ims,k):
mbh = mbh - np.min(mbh)+1.0;
ims = ims - np.min(ims)+1.0;
# mbh = mbh/np.sum(mbh)
# ims = ims/np.sum(ims)
scores = mbh*ims;
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
ss = sortedscores[:5]
ss = ss/np.sum(ss)
return sortedlabel[:k],ss[:k]
def wAPfuse(mbh,ims,wmbh,wims,k):
for i in range(200):
mbh[i] = (1+wmbh[i])*mbh[i]
ims[i] = (1+wims[i])*ims[i]
mbh = mbh - np.min(mbh)+1;
ims = ims - np.min(ims)+1;
# mbh = mbh/np.sum(mbh)
# ims = ims/np.sum(ims)
scores = mbh + ims;
# scores = np.mean(wmbh)*mbh+np.mean(wims)*ims;
# scores = np.zeros(200)
# for i in range(200):
# scores[i] = (mbh[i]*wmbh[i]+wims[i]*ims[i])/(wmbh[i]+wims[i]+1);
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
# print sortedlabel
sortedscores = scores[sortedlabel]
# print sortedlabel[:k],sortedscores[:k]
ss = sortedscores[:5]
ss = ss/np.sum(ss)
return sortedlabel[:k],ss[:k]
def getsegmentswithcls(preds,alpha=5):
labels,scores = gettopklabel(preds,10,200)
labels,scores = refineCalssification(labels,scores)
#(p,D) = dpEM(preds,alpha)
#labels,starts,ends = getLabels(p)
starts,ends = getfullLength(labels,np.shape(preds)[1])
# print 'Number of segments generated are ',np.shape(labels)
#scores = getscores(D,starts,ends,labels)
#labels,scores,starts,ends = removeBackground(labels,scores,starts,ends)
return labels,scores,starts,ends
def getfullLength(labels,length):
starts=[];ends=[]
offset = int(length*0.15)
for i in range(len(labels)):
starts.append(offset);
ends.append(length-offset)
return np.asarray(starts),np.asarray(ends)
def fuseThree(mbh,ims,c3d,numf,k):
mbh = mbh - np.min(mbh)+1;
ims = ims - np.min(ims)+1;
scores = mbh*ims*c3d;
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
sortedscores = scores[sortedlabel]
ss = sortedscores
ss = ss/np.sum(ss[:5])
labels,scores = sortedlabel[:k],ss[:k]
starts,ends = getfullLength(labels,numf)
return labels,scores,starts,ends
def dpEM(M,alpha):
(r,c) = np.shape(M);
D = np.zeros((r, c+1)) # add an extra column
D[:,0] = 1# % put the maximum cost
D[:, 1:(c+1)] = M;
# v = np.ones(r)*alpha;
phi = np.zeros((r,c))
# pdb.set_trace()
for j in xrange(1,c):
for i in xrange(r):
# values.index(min(values))
v1 = np.ones(r)*alpha
v1[i] = 0;
values= D[:, j-1]+v1
tb = np.argmin(values)
dmax = min(values)
D[i,j] = D[i,j]+dmax;
phi[i,j] = tb;
# pdb.set_trace()
q = c-1;
values= D[:, c-1]
p = np.argmin(values)
i = p
j = q
ps = np.zeros(c)
ps[q] = p
while j>0:
tb = phi[i,j];
j = int(j-1);
q = j;
ps[q] = tb;
i = int(tb);
D = D[:,1:];
return (ps,D)
def getLabels(p):
starts = np.zeros(500);
ends = np.zeros(500);
labels = np.zeros(500,dtype='int32');
fl = 0
i=0
starts[i]=0
fl = p[0]
labels[i] = p[0]
# print p[0]
# pdb.set_trace()
for ii in range(len(p)):
if abs(p[ii] -fl)>0:
ends[i]=ii-1
fl = p[ii]
i+=1
starts[i]=ii
labels[i] = fl
ends[i] = len(p)-1
# print i, starts[:i+1],ends[:i+1],labels[:i+1]
return labels[:i+1],starts[:i+1],ends[:i+1]
def getSegment4mAlphaEXT(topklabels,topscors,clscores,predEXT,C3Dfeat,fps,numf,alpha):
labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors[:5])
for label in topklabels[:1]:
clScore = clscores[label]/norms;
colScore = predEXT[:,label];
colScore = colScore/np.max(colScore)
M = np.zeros((2,len(colScore)))
M[1,:] = 1-colScore
M[0,:] = colScore
# print M
scs = [.5,.6,.7,.8,.9,.1]
offsetA = 0;
while len(scs)>2:
(p,D) = dpEM(M,alpha+offsetA)
# print p
ls,ss,eds = getLabels(p)
scs,ss,eds = refinelabels(ls,ss,eds,colScore)
offsetA+=1
if len(scs)>0:
for ind in range(len(scs)):
labels.append(label)
scores.append(scs[ind])
starts.append(ss[ind])
ends.append(eds[ind])
else:
labels.append(label)
# scores.append(clScore)
scols = sorted(colScore)
scols = scols[::-1]
scores.append(np.mean(scols[:min(len(colScore),200)]))
starts.append(int(len(colScore)*0.10))
ends.append(int(len(colScore) - len(colScore)*0.10))
return labels,scores,starts,ends
def getSegmentBinaryC3D(topklabels,topscors,clscores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha):
# topklabels,topscors,scores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha
indexs = np.asarray(C3Dfeat['indexs']);
frameLabels = np.asarray(C3Dfeat['labels']);
preds = np.asarray(C3Dfeat['scores']);
c3numf = np.shape(preds)[0];
preds = preds - np.min(preds);
# predsEXT = predEXT['scores']
# sio.savemat('data.mat',mdict= {'indexs':indexs,'topklabels':topklabels,'topscors':topscors,'clscores':clscores,'preds':preds,'numf':numf,'fps':fps,'frameLabels':frameLabels,'predEXT':predEXT})
for i in range(c3numf):
preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
# preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
t2f = (c3numf*fps)/numf;
labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors[:2])
topscors = topscors/norms;
lcount = 0;
binSVM = smoothit(np.asarray(C3DfeatbinSVM['scores']))
binRFcopy = np.asarray(C3DfeatbinRF['scores'])
for label in topklabels[:15]:
binRF = copy.deepcopy(binRFcopy[:,1])
clScore = topscors[lcount];
colScore = preds[:,label]
lcount +=1
# colScoreSmoothed = smoothColScores(colScore,10)
# binRF = colScoreSmoothed;
# binRF = smoothit(binRF);
#binRF = binRF-np.mean(binRF);
# sortedScores = sorted(binRF)
# offset = int(c3numf*0.06);
# minS = np.mean(sortedScores[:offset])
# sortedScores = sortedScores[:-1];
# print sortedScores
# maxS = np.mean(sortedScores[c3numf-int(1.5*offset):])
# print minS,maxS
# binRF = (binRF-minS)/(maxS-minS)
# extColScore = predEXT[indexs,label]
# binRF[extColScore>0.8] = 0.95;
# binRF[binSVM>0.65] = 0.85;
# binRF[binSVM>0.8] = 0.99;
# binRF[binSVM<-0.4] = 0.1;
# binRF[binSVM<-0.85] = 0.00;
# binRF[colScoreSmoothed>0.6] = 0.9;
# binRF[colScoreSmoothed<0.1] = 0.0;
# else:
# binRF = binRF-minS
# binRF = newScores;
# print 'saving it'
# sio.savemat('colScoreSmoothed.mat',mdict = {'binSVM':binSVM,'binRF':binRF,'frameLabels':frameLabels});
# sio.savemat('colScoreSmoothed.mat',mdict = {'binRF':binRF,'frameLabels':frameLabels});
# colScoreSmoothed = binRF[:,1]
# M = np.transpose(binRF);
# print M
M = np.zeros((2,c3numf))
M[0,:] = 1-binRF
M[1,:] = binRF
# # print M
ls = [1,2,3,4,5,6,7,8,9,10,11]
# talpha = alpha;
# while len(ls)>7:
(p,D) = dpEM(M,alpha)
ls,ss,eds = getLabels(p)
# talpha += 0.2
scs,ss,eds = refinelabels(ls,ss,eds,binRF)
# print scs,ss,eds
if len(scs)>0:
for ind in range(len(scs)):
labels.append(label)
scores.append(clScore)
starts.append(ss[ind])
ends.append(eds[ind])
else:
# error('we have problem')
# else:
labels.append(label)
# scols = sorted(binRF)
# scols = scols[::-1]
# seglen = min(int(len(scols)*0.6),30)
# scores.append(np.mean(scols[:seglen])*clScore)
scores.append(clScore)
# scores.append(clScore)
starts.append(int(c3numf*0.12))
ends.append(int(c3numf - c3numf*0.12))
# st = int(segInit[sInd]*t2f)
# et = int(segEnd[sInd]*t2f)
# bscore = clScore*np.mean(colScore[st:et])*pscores[sInd]
# # print st,et,bscore,np.mean(colScore[st:et]),pscores[sInd],clScore
#
# if bscore>0.01:
# labels.append(label); scores.append(bscore);
# starts.append(segInit[sInd]*fps);
# ends.append(segEnd[sInd]*fps);
return labels,scores,starts,ends,c3numf
def getSegment4mAlphaC3D(topklabels,topscors,clscores,predEXT,C3Dfeat,fps,numf,alpha):
indexs = np.asarray(C3Dfeat['indexs']);
frameLabels = np.asarray(C3Dfeat['labels']);
preds = np.asarray(C3Dfeat['scores']);
c3numf = np.shape(preds)[0];
preds = preds - np.min(preds);
sio.savemat('data.mat',mdict= {'indexs':indexs,'topklabels':topklabels,'topscors':topscors,'clscores':clscores,'preds':preds,'numf':numf,'fps':fps,'frameLabels':frameLabels,'predEXT':predEXT})
for i in range(c3numf):
preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
# preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
t2f = (c3numf*fps)/numf;
labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors[:5])
topscors = topscors/norms;
lcount = 0;
for label in topklabels[:1]:
clScore = topscors[lcount];
lcount +=1
colScore = preds[:,label]/norms;
colScoreSmoothed = smoothColScores(colScore,10)
sio.savemat('colScoreSmoothed.mat',mdict = {'colScoreSmoothed':colScoreSmoothed,'colScore':colScore});
M = np.zeros((2,len(colScoreSmoothed)))
M[1,:] = 1-colScoreSmoothed
M[0,:] = colScoreSmoothed
# print M
(p,D) = dpEM(M,alpha)
# print p
ls,ss,eds = getLabels(p)
# print p
# print ls,ss,eds
scs,ss,eds = refinelabels(ls,ss,eds,colScoreSmoothed)
# print scs,ss,eds
if len(scs)>0:
for ind in range(len(scs)):
labels.append(label)
scores.append(scs[ind]*clScore)
starts.append(ss[ind])
ends.append(eds[ind])
# else:
labels.append(label)
scols = sorted(colScoreSmoothed)
scols = scols[::-1]
seglen = min(int(len(scols)*0.5),30)
scores.append(np.mean(scols[:seglen])*clScore)
# scores.append(clScore)
starts.append(int(c3numf*0.10))
ends.append(int(c3numf - c3numf*0.10))
# st = int(segInit[sInd]*t2f)
# et = int(segEnd[sInd]*t2f)
# bscore = clScore*np.mean(colScore[st:et])*pscores[sInd]
# # print st,et,bscore,np.mean(colScore[st:et]),pscores[sInd],clScore
#
# if bscore>0.01:
# labels.append(label); scores.append(bscore);
# starts.append(segInit[sInd]*fps);
# ends.append(segEnd[sInd]*fps);
return labels,scores,starts,ends,c3numf
def getBinaryAccuracy(topklabels,topscors,clscores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha):
# topklabels,topscors,scores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha
indexs = np.asarray(C3Dfeat['indexs']);
frameLabels = np.asarray(C3DfeatbinRF['labels']);
preds = np.asarray(C3Dfeat['scores']);
c3numf = np.shape(preds)[0];
preds = preds - np.min(preds);
# predsEXT = predEXT['scores']
# sio.savemat('data.mat',mdict= {'indexs':indexs,'topklabels':topklabels,'topscors':topscors,'clscores':clscores,'preds':preds,'numf':numf,'fps':fps,'frameLabels':frameLabels,'predEXT':predEXT})
for i in range(c3numf):
preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
# preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
t2f = (c3numf*fps)/numf;
# labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors[:2])
topscors = topscors/norms;
lcount = 0;
binSVM = smoothit(np.asarray(C3DfeatbinSVM['scores']),5)
binRF = np.asarray(C3DfeatbinRF['scores'])
# print ' shapes ', np.shape(binRF),np.shape(binSVM)
binRF = smoothit(binRF[:,1],5)
# print ' shapes ', np.shape(binRF)
label =topklabels[0]
clScore = topscors[lcount];
colScore = preds[:,label]
lcount +=1
colScoreSmoothed = smoothColScores(colScore,6)
extColScore = predEXT[indexs,label]
accEXT = 1.0; accbinSVM = 1.0; accbinRF = 1.0; accC3D = 1.0;
extth = 0.8; binSVMth = 0.8; binRFth = 0.8; c3dth = 0.8;
countrf = 1.0; countext = 1.0;
countc3 = 1.0; countBsvm = 1.0;
# binRF[binSVM<-0.1] = 0.05;
# binRF[extColScore>0.6] = 0.85;
for i in range(c3numf):
if extColScore[i]>=extth:
if frameLabels[i]<200:
accEXT+=1
countext = countext+1
if binRF[i]>=binRFth:
if frameLabels[i]<200:
accbinRF+=1
countrf = countrf+1
if binSVM[i]>=binSVMth:
if frameLabels[i]<200:
accbinSVM+=1
countBsvm = countBsvm+1
if colScoreSmoothed[i]>=c3dth:
if frameLabels[i]<200:
accC3D+=1
countc3 = countc3+1
# if (colScoreSmoothed[i]<c3dth and frameLabels[i]==200):
# accC3D+=1
# if (binRF[i]<binRFth and frameLabels[i]==200):
# accbinRF+=1
# if binSVM[i]<binSVMth and frameLabels[i]==200:
# accbinSVM+=1;
# if frameLabels[i]==200:
# count+=1
# for i in range(c3numf):
# if (extColScore[i]>=extth and frameLabels[i]<200) or (extColScore[i]<extth and frameLabels[i]==200):
# accEXT+=1
# if (colScoreSmoothed[i]>=c3dth and frameLabels[i]<200) or (colScoreSmoothed[i]<c3dth and frameLabels[i]==200):
# accC3D+=1
# if (binRF[i]>=binRFth and frameLabels[i]<200) or (binRF[i]<binRFth and frameLabels[i]==200):
# accbinRF+=1
# if (binSVM[i]>=binSVMth and frameLabels[i]<200) or (binSVM[i]<binSVMth and frameLabels[i]==200):
# accbinSVM+=1;
# if frameLabels[i]<201:
# count+=1
print np.asarray([accEXT/countext,accbinRF/countrf,accbinSVM/countBsvm,accC3D/countc3])
return accEXT/countext,accbinRF/countrf,accbinSVM/countBsvm,accC3D/countc3,1
def smoothit(colScore,hws=5):
if len(colScore)<hws:
colScore = colScore/np.max(colScore)
return colScore #hws = int(len(colScore)/2);
newScores = np.zeros_like(colScore)
numelm = len(colScore)
for i in range(numelm):
ts = 0;count = 0;
for k in np.arange(max(i-hws,0),min(numelm,i+hws),1):
count += 1
ts += colScore[k]
if count>0:
newScores[i] = float(ts)/count
else:
newScores[i] = colScore[i]
return newScores
def smoothColScores(colScore,hws=5):
if len(colScore)<hws:
colScore = colScore/np.max(colScore)
return colScore #hws = int(len(colScore)/2);
newScores = np.zeros_like(colScore)
numelm = len(colScore)
for i in range(numelm):
ts = 0;count = 0;
for k in np.arange(max(i-hws,0),min(numelm,i+hws),1):
count += 1
ts += colScore[k]
if count>0:
newScores[i] = float(ts)/count
else:
newScores[i] = colScore[i]
sortedScores = sorted(newScores)[::-1]
minS = np.mean(sortedScores[-5:-2])
# sortedScores = sortedScores
# print sortedScores
maxS = np.mean(sortedScores[1:5])
if maxS>0:
newScores = (newScores-minS)/(maxS-minS)
else:
newScores = newScores-minS
newScores[newScores<0] = 0
# for i in range(len(newScores)):
# if newScores[i]>0.4 and newScores[i]<0.5:
# newScores[i]*=1.5
# newScores[newScores>0.8] = 1.0
return newScores
def refinelabels(inlabels,instarts,inends,colScore):
scores = []; starts = []; ends = [];
offset = len(colScore)*0.15;
for ind in range(len(inlabels)):
segIndexs = np.asarray(np.arange(instarts[ind],inends[ind],dtype=int))
if inlabels[ind] == 0 and len(segIndexs)>offset:
starts.append(max(offset,instarts[ind]-5));
ends.append(min(inends[ind]+5,len(colScore)-offset))
scols = sorted(colScore[segIndexs])
seglen = min(int(len(scols)*0.8),190)
scols = scols[::-1]
sc = np.mean(scols[:seglen])
scores.append(len(segIndexs))
if len(scores)>0:
ind = np.argmax(scores)
return [scores[ind]],[starts[ind]],[ends[ind]]
else:
return scores,starts,ends
def getSegment4mProp(topklabels,topscors,clscores,C3Dfeat,props,fps,numf):
pscores = props['score'];
segInit = props['segment-init'];
segEnd = props['segment-end'];
indexs = C3Dfeat['indexs'];
frameLabels = C3Dfeat['labels'];
preds = C3Dfeat['scores'];
preds = preds - np.min(preds) + 1;
c3numf = np.shape(preds)[0];
for i in range(c3numf):
preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
preds[i,:] = preds[i,:]/np.sum(preds[i,:]);
t2f = (c3numf*fps)/numf;
labels = []; scores = []; starts = []; ends = [];
clscores = clscores/np.sum(clscores);
norms = np.sum(topscors)
for label in topklabels[:1]:
clScore = clscores[label]/norms;
colScore = preds[:,label]/norms;
print 'number of props',len(pscores)
for sInd in range(min(len(pscores),5)):
# if pscores[sInd]>0.3:
st = int(segInit[sInd]*t2f)
et = int(segEnd[sInd]*t2f)
bscore = clScore*np.mean(colScore[st:et])*pscores[sInd]
# print st,et,bscore,np.mean(colScore[st:et]),pscores[sInd],clScore
if bscore>0.01:
labels.append(label); scores.append(bscore);
starts.append(segInit[sInd]*fps);
ends.append(segEnd[sInd]*fps);
return labels,scores,starts,ends
def getTOPclasses(mbh,ims,c3d,k):
mbh = mbh - np.min(mbh)+1;
ims = ims - np.min(ims)+1;
scores = mbh*ims*c3d;
scores = scores/np.sum(scores);
sortedlabel = np.argsort(scores)[::-1]
sortedscores = scores[sortedlabel]
return sortedlabel[:k],sortedscores[:k],scores
def getC3dMeanPreds(preds,classtopk=80):
preds = preds - np.min(preds) + 1;
scores = np.zeros(200)
topk = min(classtopk,np.shape(preds)[0]);
# for i in range(np.shape(preds)[0]):
# preds[i,:] = preds[i,:] - np.min(preds[i,:])+1;
# preds[i,:] = preds[i,:]/np.sum(preds[i,:]) ;
for i in range(200):
values = preds[:,i];
values = np.sort(values);
values = values[::-1]
scores[i] = np.mean(values[:topk])
return scores
def readpkl(filename):
with open(filename) as f:
data = pickle.load(f)
return data
def processThreePredictions():
#########################################
#########################################
names = getnames()
gtlabels = readpkl('{}data/labels.pkl'.format(baseDir))
indexs = readpkl('{}data/indexs.pkl'.format(baseDir))
actionIDs,taxonomy,database = readannos()
########################################
########################################
for alpha in [3,]:
K = 15;
subset = 'testing';#,
featType = 'IMS'
savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType)
with open(savename,'r') as f:
dataIMS = pickle.load(f)
featType = 'MBH'
savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType)
with open(savename,'r') as f:
dataMBH = pickle.load(f)
featType = 'C3D'
savename = '{}data/predictions-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3D = h5py.File(savename,'r');
featType = 'EXT'
savename = '{}data/predictions-{}-{}.hdf5'.format(baseDir,subset,featType)
infileEXT = h5py.File(savename,'r');
featType = 'C3D'
savename = '{}data/predictions-BWRF-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3DbinRF = h5py.File(savename,'r');
featType = 'C3D'
savename = '{}data/predictions-BWSVM-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3DbinSVM = h5py.File(savename,'r');
# savename = '{}data/activitynet_v1-3_proposals.hdf5'.format(baseDir)
# infileProp = h5py.File(savename,'r');
#
outfilename = '{}results/detection/{}-{}-K-{}-alpha-{}.json'.format(baseDir,subset,'C3D-BIN-BOOST-LONG',str(K).zfill(3),str(int(alpha*10)).zfill(3))
if True: #not os.path.isfile(outfilename):
vcount = 0;
vdata = {};
vdata['external_data'] = {'used':True, 'details':"We use ImagenetShuffle features, MBH features and C3D features provided on challenge page."}
vdata['version'] = "VERSION 1.3"
results = {}
for videoId in database.keys():
videoInfo = database[videoId]
numf = videoInfo['numf'];
duration = videoInfo['duration']
#fps = videoInfo['fps'];
fps = numf/duration;
if videoInfo['subset'] == subset:
vcount +=1
if vcount > -1:
vidresults = []
# print videoInfo
# vcount+=1
vidname = 'v_'+videoId
# print 'processing ', vidname, ' vcount ',vcount
ind = dataMBH['vIndexs'][videoId]
predsMBH = dataMBH['scores'][ind,:]
ind = dataIMS['vIndexs'][videoId]
predsIMS = dataIMS['scores'][ind,:]
C3Dfeat = infileC3D[videoId]
C3Dscores = C3Dfeat['scores']
predS3D = getC3dMeanPreds(C3Dscores)
# props = infileProp[vidname]
predEXT = np.transpose(infileEXT[videoId]['scores'])
# predEXT = getC3dMeanPreds(preds,220)
C3DfeatbinRF = infileC3DbinRF[videoId]
C3DfeatbinSVM = infileC3DbinSVM[videoId]
#print 'shape of preds',np.shape(preds)
print 'processing ', vidname, ' vcount ',vcount,' fps ',fps, ' numf ',numf,' alpha ',alpha,
# labels,scores,starts,ends = fuseThree(predsMBH,predsIMS,predS3D,numf,K)
topklabels,topscors,scores= getTOPclasses(predsMBH,predsIMS,predS3D,K)
# labels,scores,starts,ends = getSegment4mProp(topklabels,topscors,scores,C3Dfeat,props,fps,numf)
# labels,scores,starts,ends = getSegment4mAlphaEXT(topklabels,topscors,scores,predEXT,C3Dfeat,fps,numf,alpha)
labels,scores,starts,ends,c3numf = getSegmentBinaryC3D(topklabels,topscors,scores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha)
print ' Number of detection are ',len(labels)
# print labels,scores
fps = c3numf/duration;
for idx in range(len(labels)):
score = scores[idx]
label = labels[idx]
name = names[label]
st = float(starts[idx])/fps
et = float(ends[idx])/fps
segment = [];
segment.append(st);segment.append(et)
# print label,score,segment,starts[idx],ends[idx]
tempdict = {'label':name,'score':float(score),'segment':segment}
vidresults.append(tempdict)
results[videoId] = vidresults
vdata['results'] = results
print 'result-saved-in ',outfilename
with open(outfilename,'wb') as f:
json.dump(vdata,f)
def getaccuracy():
#########################################
#########################################
names = getnames()
gtlabels = readpkl('{}data/labels.pkl'.format(baseDir))
indexs = readpkl('{}data/indexs.pkl'.format(baseDir))
actionIDs,taxonomy,database = readannos()
########################################
########################################
for alpha in [0.3,]:
K = 5;
subset = 'validation';#,'testing']:
featType = 'IMS'
savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType)
with open(savename,'r') as f:
dataIMS = pickle.load(f)
featType = 'MBH'
savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType)
with open(savename,'r') as f:
dataMBH = pickle.load(f)
featType = 'C3D'
savename = '{}data/predictions-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3D = h5py.File(savename,'r');
featType = 'EXT'
savename = '{}data/predictions-{}-{}.hdf5'.format(baseDir,subset,featType)
infileEXT = h5py.File(savename,'r');
featType = 'C3D'
savename = '{}data/predictions-BWRF-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3DbinRF = h5py.File(savename,'r');
featType = 'C3D'
savename = '{}data/predictions-BWSVM-{}-{}.hdf5'.format(baseDir,subset,featType)
infileC3DbinSVM = h5py.File(savename,'r');
# savename = '{}data/activitynet_v1-3_proposals.hdf5'.format(baseDir)
# infileProp = h5py.File(savename,'r');
#
outfilename = '{}results/detection/{}-{}-K-{}-alpha-{}.json'.format(baseDir,subset,'C3D-BIN',str(K).zfill(3),str(int(alpha*10)).zfill(3))
accEXT = 0.0; accbinSVM = 0.0; accbinRF = 0.0; accC3D = 0.0;
count = 0;
if True: #not os.path.isfile(outfilename):
vcount = 0;
vdata = {};
vdata['external_data'] = {'used':True, 'details':"We use extraction Net model with its weights pretrained on imageNet dataset and fine tuned on Activitty Net. Plus ImagenetShuffle, MBH features, C3D features privide on challenge page"}
vdata['version'] = "VERSION 1.3"
results = {}
for videoId in database.keys():
videoInfo = database[videoId]
numf = videoInfo['numf'];
duration = videoInfo['duration']
#fps = videoInfo['fps'];
fps = numf/duration;
if videoInfo['subset'] == subset:
vcount +=1
if vcount <2000:
vidresults = []
# print videoInfo
# vcount+=1
vidname = 'v_'+videoId
# print 'processing ', vidname, ' vcount ',vcount
ind = dataMBH['vIndexs'][videoId]
predsMBH = dataMBH['scores'][ind,:]
ind = dataIMS['vIndexs'][videoId]
predsIMS = dataIMS['scores'][ind,:]
C3Dfeat = infileC3D[videoId]
C3Dscores = C3Dfeat['scores']
predS3D = getC3dMeanPreds(C3Dscores)
# props = infileProp[vidname]
predEXT = np.transpose(infileEXT[videoId]['scores'])
# predEXT = getC3dMeanPreds(preds,220)
C3DfeatbinRF = infileC3DbinRF[videoId]
C3DfeatbinSVM = infileC3DbinSVM[videoId]
#print 'shape of preds',np.shape(preds)
print 'processing ', vidname, ' vcount ',vcount,' fps ',fps, ' numf ',numf,' alpha ',alpha,
# labels,scores,starts,ends = fuseThree(predsMBH,predsIMS,predS3D,numf,K)
topklabels,topscors,scores= getTOPclasses(predsMBH,predsIMS,predS3D,K)
# labels,scores,starts,ends = getSegment4mProp(topklabels,topscors,scores,C3Dfeat,props,fps,numf)
# labels,scores,starts,ends = getSegment4mAlphaEXT(topklabels,topscors,scores,predEXT,C3Dfeat,fps,numf,alpha)
aEXT,aSVM,aRF,aC3D,cnf= getBinaryAccuracy(topklabels,topscors,scores,predEXT,C3DfeatbinRF,C3DfeatbinSVM,C3Dfeat,fps,numf,alpha)
accEXT += aEXT; accbinSVM += aSVM; accbinRF += aRF; accC3D += aC3D;
count +=cnf
print 'Avergae Accuracy is ', np.asarray([accEXT,accbinRF,accbinSVM,accC3D])/count
if __name__=="__main__":
# processOnePredictions()
# processTwoPredictions()
# fuse2withAP()
processThreePredictions()
# getaccuracy()
| mit |
courtarro/gnuradio | gr-digital/examples/example_costas.py | 49 | 5316 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_costas(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*scipy.random.randint(0, 2, N) - 1.0
data = scipy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.cst = digital.costas_loop_cc(bw, 2)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_cst = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.cst, self.vsnk_cst)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.cst,1), self.vsnk_frq)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=2000,
help="Set the number of samples to process [default=%default]")
parser.add_option("-S", "--sps", type="int", default=4,
help="Set the samples per symbol [default=%default]")
parser.add_option("-r", "--rolloff", type="eng_float", default=0.35,
help="Set the rolloff factor [default=%default]")
parser.add_option("-W", "--bandwidth", type="eng_float", default=2*scipy.pi/100.0,
help="Set the loop bandwidth [default=%default]")
parser.add_option("-n", "--ntaps", type="int", default=45,
help="Set the number of taps in the filters [default=%default]")
parser.add_option("", "--noise", type="eng_float", default=0.0,
help="Set the simulation noise voltage [default=%default]")
parser.add_option("-f", "--foffset", type="eng_float", default=0.0,
help="Set the simulation's normalized frequency offset (in Hz) [default=%default]")
parser.add_option("-t", "--toffset", type="eng_float", default=1.0,
help="Set the simulation's timing offset [default=%default]")
parser.add_option("-p", "--poffset", type="eng_float", default=0.707,
help="Set the simulation's phase offset [default=%default]")
(options, args) = parser.parse_args ()
# Adjust N for the interpolation by sps
options.nsamples = options.nsamples // options.sps
# Set up the program-under-test
put = example_costas(options.nsamples, options.sps, options.rolloff,
options.ntaps, options.bandwidth, options.noise,
options.foffset, options.toffset, options.poffset)
put.run()
data_src = scipy.array(put.vsnk_src.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = scipy.array(put.vsnk_frq.data()) / (2.0*scipy.pi)
# adjust this to align with the data.
data_cst = scipy.array(3*[0,]+list(put.vsnk_cst.data()))
# Plot the Costas loop's LO frequency
f1 = pylab.figure(1, figsize=(12,10), facecolor='w')
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("Costas LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,2)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_cst.real, data_cst.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
s3.set_xlim([-2, 2])
s3.set_ylim([-2, 2])
# Plot the symbols in time
s4 = f1.add_subplot(2,2,3)
s4.set_position([0.125, 0.05, 0.775, 0.4])
s4.plot(data_src.real, "o-")
s4.plot(data_cst.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
xiaoxiamii/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
wzbozon/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
oxtopus/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/_cm.py | 70 | 375423 | """
Color data and pre-defined cmap objects.
This is a helper for cm.py, originally part of that file.
Separating the data (this file) from cm.py makes both easier
to deal with.
Objects visible in cm.py are the individual cmap objects ('autumn',
etc.) and a dictionary, 'datad', including all of these objects.
"""
import matplotlib as mpl
import matplotlib.colors as colors
LUTSIZE = mpl.rcParams['image.lut']
_binary_data = {
'red' : ((0., 1., 1.), (1., 0., 0.)),
'green': ((0., 1., 1.), (1., 0., 0.)),
'blue' : ((0., 1., 1.), (1., 0., 0.))
}
_bone_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 1.0, 1.0))}
_autumn_data = {'red': ((0., 1.0, 1.0),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(1.0, 0., 0.))}
_bone_data = {'red': ((0., 0., 0.),(0.746032, 0.652778, 0.652778),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.319444, 0.319444),
(0.746032, 0.777778, 0.777778),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.365079, 0.444444, 0.444444),(1.0, 1.0, 1.0))}
_cool_data = {'red': ((0., 0., 0.), (1.0, 1.0, 1.0)),
'green': ((0., 1., 1.), (1.0, 0., 0.)),
'blue': ((0., 1., 1.), (1.0, 1., 1.))}
_copper_data = {'red': ((0., 0., 0.),(0.809524, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 0.7812, 0.7812)),
'blue': ((0., 0., 0.),(1.0, 0.4975, 0.4975))}
_flag_data = {'red': ((0., 1., 1.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 1.000000, 1.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 1.000000, 1.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 1.000000, 1.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 1.000000, 1.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 1.000000, 1.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 0.000000, 0.000000),(1.0, 0., 0.)),
'green': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 1.000000, 1.000000),(0.095238, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 0.000000, 0.000000),(0.190476, 0.000000, 0.000000),
(0.206349, 1.000000, 1.000000),(0.222222, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 0.000000, 0.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 1.000000, 1.000000),(0.476190, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 0.000000, 0.000000),(0.571429, 0.000000, 0.000000),
(0.587302, 1.000000, 1.000000),(0.603175, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 0.000000, 0.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 0.000000, 0.000000),(0.952381, 0.000000, 0.000000),
(0.968254, 1.000000, 1.000000),(0.984127, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.015873, 1.000000, 1.000000),
(0.031746, 1.000000, 1.000000),(0.047619, 0.000000, 0.000000),
(0.063492, 0.000000, 0.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 1.000000, 1.000000),(0.111111, 0.000000, 0.000000),
(0.126984, 0.000000, 0.000000),(0.142857, 1.000000, 1.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.206349, 1.000000, 1.000000),
(0.222222, 1.000000, 1.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 0.000000, 0.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 1.000000, 1.000000),(0.301587, 0.000000, 0.000000),
(0.317460, 0.000000, 0.000000),(0.333333, 1.000000, 1.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.396825, 1.000000, 1.000000),
(0.412698, 1.000000, 1.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 0.000000, 0.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 1.000000, 1.000000),(0.492063, 0.000000, 0.000000),
(0.507937, 0.000000, 0.000000),(0.523810, 1.000000, 1.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.587302, 1.000000, 1.000000),
(0.603175, 1.000000, 1.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 0.000000, 0.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 1.000000, 1.000000),(0.682540, 0.000000, 0.000000),
(0.698413, 0.000000, 0.000000),(0.714286, 1.000000, 1.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.777778, 1.000000, 1.000000),
(0.793651, 1.000000, 1.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 0.000000, 0.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 1.000000, 1.000000),(0.873016, 0.000000, 0.000000),
(0.888889, 0.000000, 0.000000),(0.904762, 1.000000, 1.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.968254, 1.000000, 1.000000),
(0.984127, 1.000000, 1.000000),(1.0, 0., 0.))}
_gray_data = {'red': ((0., 0, 0), (1., 1, 1)),
'green': ((0., 0, 0), (1., 1, 1)),
'blue': ((0., 0, 0), (1., 1, 1))}
_hot_data = {'red': ((0., 0.0416, 0.0416),(0.365079, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.365079, 0.000000, 0.000000),
(0.746032, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.746032, 0.000000, 0.000000),(1.0, 1.0, 1.0))}
_hsv_data = {'red': ((0., 1., 1.),(0.158730, 1.000000, 1.000000),
(0.174603, 0.968750, 0.968750),(0.333333, 0.031250, 0.031250),
(0.349206, 0.000000, 0.000000),(0.666667, 0.000000, 0.000000),
(0.682540, 0.031250, 0.031250),(0.841270, 0.968750, 0.968750),
(0.857143, 1.000000, 1.000000),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.158730, 0.937500, 0.937500),
(0.174603, 1.000000, 1.000000),(0.507937, 1.000000, 1.000000),
(0.666667, 0.062500, 0.062500),(0.682540, 0.000000, 0.000000),
(1.0, 0., 0.)),
'blue': ((0., 0., 0.),(0.333333, 0.000000, 0.000000),
(0.349206, 0.062500, 0.062500),(0.507937, 1.000000, 1.000000),
(0.841270, 1.000000, 1.000000),(0.857143, 0.937500, 0.937500),
(1.0, 0.09375, 0.09375))}
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
_pink_data = {'red': ((0., 0.1178, 0.1178),(0.015873, 0.195857, 0.195857),
(0.031746, 0.250661, 0.250661),(0.047619, 0.295468, 0.295468),
(0.063492, 0.334324, 0.334324),(0.079365, 0.369112, 0.369112),
(0.095238, 0.400892, 0.400892),(0.111111, 0.430331, 0.430331),
(0.126984, 0.457882, 0.457882),(0.142857, 0.483867, 0.483867),
(0.158730, 0.508525, 0.508525),(0.174603, 0.532042, 0.532042),
(0.190476, 0.554563, 0.554563),(0.206349, 0.576204, 0.576204),
(0.222222, 0.597061, 0.597061),(0.238095, 0.617213, 0.617213),
(0.253968, 0.636729, 0.636729),(0.269841, 0.655663, 0.655663),
(0.285714, 0.674066, 0.674066),(0.301587, 0.691980, 0.691980),
(0.317460, 0.709441, 0.709441),(0.333333, 0.726483, 0.726483),
(0.349206, 0.743134, 0.743134),(0.365079, 0.759421, 0.759421),
(0.380952, 0.766356, 0.766356),(0.396825, 0.773229, 0.773229),
(0.412698, 0.780042, 0.780042),(0.428571, 0.786796, 0.786796),
(0.444444, 0.793492, 0.793492),(0.460317, 0.800132, 0.800132),
(0.476190, 0.806718, 0.806718),(0.492063, 0.813250, 0.813250),
(0.507937, 0.819730, 0.819730),(0.523810, 0.826160, 0.826160),
(0.539683, 0.832539, 0.832539),(0.555556, 0.838870, 0.838870),
(0.571429, 0.845154, 0.845154),(0.587302, 0.851392, 0.851392),
(0.603175, 0.857584, 0.857584),(0.619048, 0.863731, 0.863731),
(0.634921, 0.869835, 0.869835),(0.650794, 0.875897, 0.875897),
(0.666667, 0.881917, 0.881917),(0.682540, 0.887896, 0.887896),
(0.698413, 0.893835, 0.893835),(0.714286, 0.899735, 0.899735),
(0.730159, 0.905597, 0.905597),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.517549, 0.517549),(0.396825, 0.540674, 0.540674),
(0.412698, 0.562849, 0.562849),(0.428571, 0.584183, 0.584183),
(0.444444, 0.604765, 0.604765),(0.460317, 0.624669, 0.624669),
(0.476190, 0.643958, 0.643958),(0.492063, 0.662687, 0.662687),
(0.507937, 0.680900, 0.680900),(0.523810, 0.698638, 0.698638),
(0.539683, 0.715937, 0.715937),(0.555556, 0.732828, 0.732828),
(0.571429, 0.749338, 0.749338),(0.587302, 0.765493, 0.765493),
(0.603175, 0.781313, 0.781313),(0.619048, 0.796819, 0.796819),
(0.634921, 0.812029, 0.812029),(0.650794, 0.826960, 0.826960),
(0.666667, 0.841625, 0.841625),(0.682540, 0.856040, 0.856040),
(0.698413, 0.870216, 0.870216),(0.714286, 0.884164, 0.884164),
(0.730159, 0.897896, 0.897896),(0.746032, 0.911421, 0.911421),
(0.761905, 0.917208, 0.917208),(0.777778, 0.922958, 0.922958),
(0.793651, 0.928673, 0.928673),(0.809524, 0.934353, 0.934353),
(0.825397, 0.939999, 0.939999),(0.841270, 0.945611, 0.945611),
(0.857143, 0.951190, 0.951190),(0.873016, 0.956736, 0.956736),
(0.888889, 0.962250, 0.962250),(0.904762, 0.967733, 0.967733),
(0.920635, 0.973185, 0.973185),(0.936508, 0.978607, 0.978607),
(0.952381, 0.983999, 0.983999),(0.968254, 0.989361, 0.989361),
(0.984127, 0.994695, 0.994695),(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.015873, 0.102869, 0.102869),
(0.031746, 0.145479, 0.145479),(0.047619, 0.178174, 0.178174),
(0.063492, 0.205738, 0.205738),(0.079365, 0.230022, 0.230022),
(0.095238, 0.251976, 0.251976),(0.111111, 0.272166, 0.272166),
(0.126984, 0.290957, 0.290957),(0.142857, 0.308607, 0.308607),
(0.158730, 0.325300, 0.325300),(0.174603, 0.341178, 0.341178),
(0.190476, 0.356348, 0.356348),(0.206349, 0.370899, 0.370899),
(0.222222, 0.384900, 0.384900),(0.238095, 0.398410, 0.398410),
(0.253968, 0.411476, 0.411476),(0.269841, 0.424139, 0.424139),
(0.285714, 0.436436, 0.436436),(0.301587, 0.448395, 0.448395),
(0.317460, 0.460044, 0.460044),(0.333333, 0.471405, 0.471405),
(0.349206, 0.482498, 0.482498),(0.365079, 0.493342, 0.493342),
(0.380952, 0.503953, 0.503953),(0.396825, 0.514344, 0.514344),
(0.412698, 0.524531, 0.524531),(0.428571, 0.534522, 0.534522),
(0.444444, 0.544331, 0.544331),(0.460317, 0.553966, 0.553966),
(0.476190, 0.563436, 0.563436),(0.492063, 0.572750, 0.572750),
(0.507937, 0.581914, 0.581914),(0.523810, 0.590937, 0.590937),
(0.539683, 0.599824, 0.599824),(0.555556, 0.608581, 0.608581),
(0.571429, 0.617213, 0.617213),(0.587302, 0.625727, 0.625727),
(0.603175, 0.634126, 0.634126),(0.619048, 0.642416, 0.642416),
(0.634921, 0.650600, 0.650600),(0.650794, 0.658682, 0.658682),
(0.666667, 0.666667, 0.666667),(0.682540, 0.674556, 0.674556),
(0.698413, 0.682355, 0.682355),(0.714286, 0.690066, 0.690066),
(0.730159, 0.697691, 0.697691),(0.746032, 0.705234, 0.705234),
(0.761905, 0.727166, 0.727166),(0.777778, 0.748455, 0.748455),
(0.793651, 0.769156, 0.769156),(0.809524, 0.789314, 0.789314),
(0.825397, 0.808969, 0.808969),(0.841270, 0.828159, 0.828159),
(0.857143, 0.846913, 0.846913),(0.873016, 0.865261, 0.865261),
(0.888889, 0.883229, 0.883229),(0.904762, 0.900837, 0.900837),
(0.920635, 0.918109, 0.918109),(0.936508, 0.935061, 0.935061),
(0.952381, 0.951711, 0.951711),(0.968254, 0.968075, 0.968075),
(0.984127, 0.984167, 0.984167),(1.0, 1.0, 1.0))}
_prism_data = {'red': ((0., 1., 1.),(0.031746, 1.000000, 1.000000),
(0.047619, 0.000000, 0.000000),(0.063492, 0.000000, 0.000000),
(0.079365, 0.666667, 0.666667),(0.095238, 1.000000, 1.000000),
(0.126984, 1.000000, 1.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 0.000000, 0.000000),(0.174603, 0.666667, 0.666667),
(0.190476, 1.000000, 1.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 0.000000, 0.000000),(0.253968, 0.000000, 0.000000),
(0.269841, 0.666667, 0.666667),(0.285714, 1.000000, 1.000000),
(0.317460, 1.000000, 1.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 0.000000, 0.000000),(0.365079, 0.666667, 0.666667),
(0.380952, 1.000000, 1.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 0.000000, 0.000000),(0.444444, 0.000000, 0.000000),
(0.460317, 0.666667, 0.666667),(0.476190, 1.000000, 1.000000),
(0.507937, 1.000000, 1.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 0.000000, 0.000000),(0.555556, 0.666667, 0.666667),
(0.571429, 1.000000, 1.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 0.000000, 0.000000),(0.634921, 0.000000, 0.000000),
(0.650794, 0.666667, 0.666667),(0.666667, 1.000000, 1.000000),
(0.698413, 1.000000, 1.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 0.000000, 0.000000),(0.746032, 0.666667, 0.666667),
(0.761905, 1.000000, 1.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 0.000000, 0.000000),(0.825397, 0.000000, 0.000000),
(0.841270, 0.666667, 0.666667),(0.857143, 1.000000, 1.000000),
(0.888889, 1.000000, 1.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 0.000000, 0.000000),(0.936508, 0.666667, 0.666667),
(0.952381, 1.000000, 1.000000),(0.984127, 1.000000, 1.000000),
(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(0.031746, 1.000000, 1.000000),
(0.047619, 1.000000, 1.000000),(0.063492, 0.000000, 0.000000),
(0.095238, 0.000000, 0.000000),(0.126984, 1.000000, 1.000000),
(0.142857, 1.000000, 1.000000),(0.158730, 0.000000, 0.000000),
(0.190476, 0.000000, 0.000000),(0.222222, 1.000000, 1.000000),
(0.238095, 1.000000, 1.000000),(0.253968, 0.000000, 0.000000),
(0.285714, 0.000000, 0.000000),(0.317460, 1.000000, 1.000000),
(0.333333, 1.000000, 1.000000),(0.349206, 0.000000, 0.000000),
(0.380952, 0.000000, 0.000000),(0.412698, 1.000000, 1.000000),
(0.428571, 1.000000, 1.000000),(0.444444, 0.000000, 0.000000),
(0.476190, 0.000000, 0.000000),(0.507937, 1.000000, 1.000000),
(0.523810, 1.000000, 1.000000),(0.539683, 0.000000, 0.000000),
(0.571429, 0.000000, 0.000000),(0.603175, 1.000000, 1.000000),
(0.619048, 1.000000, 1.000000),(0.634921, 0.000000, 0.000000),
(0.666667, 0.000000, 0.000000),(0.698413, 1.000000, 1.000000),
(0.714286, 1.000000, 1.000000),(0.730159, 0.000000, 0.000000),
(0.761905, 0.000000, 0.000000),(0.793651, 1.000000, 1.000000),
(0.809524, 1.000000, 1.000000),(0.825397, 0.000000, 0.000000),
(0.857143, 0.000000, 0.000000),(0.888889, 1.000000, 1.000000),
(0.904762, 1.000000, 1.000000),(0.920635, 0.000000, 0.000000),
(0.952381, 0.000000, 0.000000),(0.984127, 1.000000, 1.000000),
(1.0, 1.0, 1.0)),
'blue': ((0., 0., 0.),(0.047619, 0.000000, 0.000000),
(0.063492, 1.000000, 1.000000),(0.079365, 1.000000, 1.000000),
(0.095238, 0.000000, 0.000000),(0.142857, 0.000000, 0.000000),
(0.158730, 1.000000, 1.000000),(0.174603, 1.000000, 1.000000),
(0.190476, 0.000000, 0.000000),(0.238095, 0.000000, 0.000000),
(0.253968, 1.000000, 1.000000),(0.269841, 1.000000, 1.000000),
(0.285714, 0.000000, 0.000000),(0.333333, 0.000000, 0.000000),
(0.349206, 1.000000, 1.000000),(0.365079, 1.000000, 1.000000),
(0.380952, 0.000000, 0.000000),(0.428571, 0.000000, 0.000000),
(0.444444, 1.000000, 1.000000),(0.460317, 1.000000, 1.000000),
(0.476190, 0.000000, 0.000000),(0.523810, 0.000000, 0.000000),
(0.539683, 1.000000, 1.000000),(0.555556, 1.000000, 1.000000),
(0.571429, 0.000000, 0.000000),(0.619048, 0.000000, 0.000000),
(0.634921, 1.000000, 1.000000),(0.650794, 1.000000, 1.000000),
(0.666667, 0.000000, 0.000000),(0.714286, 0.000000, 0.000000),
(0.730159, 1.000000, 1.000000),(0.746032, 1.000000, 1.000000),
(0.761905, 0.000000, 0.000000),(0.809524, 0.000000, 0.000000),
(0.825397, 1.000000, 1.000000),(0.841270, 1.000000, 1.000000),
(0.857143, 0.000000, 0.000000),(0.904762, 0.000000, 0.000000),
(0.920635, 1.000000, 1.000000),(0.936508, 1.000000, 1.000000),
(0.952381, 0.000000, 0.000000),(1.0, 0.0, 0.0))}
_spring_data = {'red': ((0., 1., 1.),(1.0, 1.0, 1.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.0, 0.0))}
_summer_data = {'red': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'green': ((0., 0.5, 0.5),(1.0, 1.0, 1.0)),
'blue': ((0., 0.4, 0.4),(1.0, 0.4, 0.4))}
_winter_data = {'red': ((0., 0., 0.),(1.0, 0.0, 0.0)),
'green': ((0., 0., 0.),(1.0, 1.0, 1.0)),
'blue': ((0., 1., 1.),(1.0, 0.5, 0.5))}
_spectral_data = {'red': [(0.0, 0.0, 0.0), (0.05, 0.4667, 0.4667),
(0.10, 0.5333, 0.5333), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.0, 0.0),
(0.30, 0.0, 0.0), (0.35, 0.0, 0.0),
(0.40, 0.0, 0.0), (0.45, 0.0, 0.0),
(0.50, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.7333, 0.7333),
(0.70, 0.9333, 0.9333), (0.75, 1.0, 1.0),
(0.80, 1.0, 1.0), (0.85, 1.0, 1.0),
(0.90, 0.8667, 0.8667), (0.95, 0.80, 0.80),
(1.0, 0.80, 0.80)],
'green': [(0.0, 0.0, 0.0), (0.05, 0.0, 0.0),
(0.10, 0.0, 0.0), (0.15, 0.0, 0.0),
(0.20, 0.0, 0.0), (0.25, 0.4667, 0.4667),
(0.30, 0.6000, 0.6000), (0.35, 0.6667, 0.6667),
(0.40, 0.6667, 0.6667), (0.45, 0.6000, 0.6000),
(0.50, 0.7333, 0.7333), (0.55, 0.8667, 0.8667),
(0.60, 1.0, 1.0), (0.65, 1.0, 1.0),
(0.70, 0.9333, 0.9333), (0.75, 0.8000, 0.8000),
(0.80, 0.6000, 0.6000), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)],
'blue': [(0.0, 0.0, 0.0), (0.05, 0.5333, 0.5333),
(0.10, 0.6000, 0.6000), (0.15, 0.6667, 0.6667),
(0.20, 0.8667, 0.8667), (0.25, 0.8667, 0.8667),
(0.30, 0.8667, 0.8667), (0.35, 0.6667, 0.6667),
(0.40, 0.5333, 0.5333), (0.45, 0.0, 0.0),
(0.5, 0.0, 0.0), (0.55, 0.0, 0.0),
(0.60, 0.0, 0.0), (0.65, 0.0, 0.0),
(0.70, 0.0, 0.0), (0.75, 0.0, 0.0),
(0.80, 0.0, 0.0), (0.85, 0.0, 0.0),
(0.90, 0.0, 0.0), (0.95, 0.0, 0.0),
(1.0, 0.80, 0.80)]}
autumn = colors.LinearSegmentedColormap('autumn', _autumn_data, LUTSIZE)
bone = colors.LinearSegmentedColormap('bone ', _bone_data, LUTSIZE)
binary = colors.LinearSegmentedColormap('binary ', _binary_data, LUTSIZE)
cool = colors.LinearSegmentedColormap('cool', _cool_data, LUTSIZE)
copper = colors.LinearSegmentedColormap('copper', _copper_data, LUTSIZE)
flag = colors.LinearSegmentedColormap('flag', _flag_data, LUTSIZE)
gray = colors.LinearSegmentedColormap('gray', _gray_data, LUTSIZE)
hot = colors.LinearSegmentedColormap('hot', _hot_data, LUTSIZE)
hsv = colors.LinearSegmentedColormap('hsv', _hsv_data, LUTSIZE)
jet = colors.LinearSegmentedColormap('jet', _jet_data, LUTSIZE)
pink = colors.LinearSegmentedColormap('pink', _pink_data, LUTSIZE)
prism = colors.LinearSegmentedColormap('prism', _prism_data, LUTSIZE)
spring = colors.LinearSegmentedColormap('spring', _spring_data, LUTSIZE)
summer = colors.LinearSegmentedColormap('summer', _summer_data, LUTSIZE)
winter = colors.LinearSegmentedColormap('winter', _winter_data, LUTSIZE)
spectral = colors.LinearSegmentedColormap('spectral', _spectral_data, LUTSIZE)
datad = {
'autumn': _autumn_data,
'bone': _bone_data,
'binary': _binary_data,
'cool': _cool_data,
'copper': _copper_data,
'flag': _flag_data,
'gray' : _gray_data,
'hot': _hot_data,
'hsv': _hsv_data,
'jet' : _jet_data,
'pink': _pink_data,
'prism': _prism_data,
'spring': _spring_data,
'summer': _summer_data,
'winter': _winter_data,
'spectral': _spectral_data
}
# 34 colormaps based on color specifications and designs
# developed by Cynthia Brewer (http://colorbrewer.org).
# The ColorBrewer palettes have been included under the terms
# of an Apache-stype license (for details, see the file
# LICENSE_COLORBREWER in the license directory of the matplotlib
# source distribution).
_Accent_data = {'blue': [(0.0, 0.49803921580314636,
0.49803921580314636), (0.14285714285714285, 0.83137255907058716,
0.83137255907058716), (0.2857142857142857, 0.52549022436141968,
0.52549022436141968), (0.42857142857142855, 0.60000002384185791,
0.60000002384185791), (0.5714285714285714, 0.69019609689712524,
0.69019609689712524), (0.7142857142857143, 0.49803921580314636,
0.49803921580314636), (0.8571428571428571, 0.090196080505847931,
0.090196080505847931), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.78823530673980713, 0.78823530673980713),
(0.14285714285714285, 0.68235296010971069, 0.68235296010971069),
(0.2857142857142857, 0.75294119119644165, 0.75294119119644165),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.42352941632270813, 0.42352941632270813), (0.7142857142857143,
0.0078431377187371254, 0.0078431377187371254),
(0.8571428571428571, 0.35686275362968445, 0.35686275362968445),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.14285714285714285, 0.7450980544090271, 0.7450980544090271),
(0.2857142857142857, 0.99215686321258545, 0.99215686321258545),
(0.42857142857142855, 1.0, 1.0), (0.5714285714285714,
0.21960784494876862, 0.21960784494876862), (0.7142857142857143,
0.94117647409439087, 0.94117647409439087), (0.8571428571428571,
0.74901962280273438, 0.74901962280273438), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_Blues_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.93725490570068359, 0.93725490570068359),
(0.375, 0.88235294818878174, 0.88235294818878174), (0.5,
0.83921569585800171, 0.83921569585800171), (0.625, 0.7764706015586853,
0.7764706015586853), (0.75, 0.70980393886566162, 0.70980393886566162),
(0.875, 0.61176472902297974, 0.61176472902297974), (1.0,
0.41960784792900085, 0.41960784792900085)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92156863212585449, 0.92156863212585449), (0.25,
0.85882353782653809, 0.85882353782653809), (0.375,
0.7921568751335144, 0.7921568751335144), (0.5,
0.68235296010971069, 0.68235296010971069), (0.625,
0.57254904508590698, 0.57254904508590698), (0.75,
0.44313725829124451, 0.44313725829124451), (0.875,
0.31764706969261169, 0.31764706969261169), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87058824300765991, 0.87058824300765991), (0.25,
0.7764706015586853, 0.7764706015586853), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.41960784792900085, 0.41960784792900085), (0.625,
0.25882354378700256, 0.25882354378700256), (0.75,
0.12941177189350128, 0.12941177189350128), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_BrBG_data = {'blue': [(0.0, 0.019607843831181526,
0.019607843831181526), (0.10000000000000001, 0.039215687662363052,
0.039215687662363052), (0.20000000000000001, 0.17647059261798859,
0.17647059261798859), (0.29999999999999999, 0.49019607901573181,
0.49019607901573181), (0.40000000000000002, 0.76470589637756348,
0.76470589637756348), (0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.89803922176361084, 0.89803922176361084),
(0.69999999999999996, 0.75686275959014893, 0.75686275959014893),
(0.80000000000000004, 0.56078433990478516, 0.56078433990478516),
(0.90000000000000002, 0.36862745881080627, 0.36862745881080627), (1.0,
0.18823529779911041, 0.18823529779911041)],
'green': [(0.0, 0.18823529779911041, 0.18823529779911041),
(0.10000000000000001, 0.31764706969261169, 0.31764706969261169),
(0.20000000000000001, 0.5058823823928833, 0.5058823823928833),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90980392694473267, 0.90980392694473267),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.91764706373214722, 0.91764706373214722),
(0.69999999999999996, 0.80392158031463623, 0.80392158031463623),
(0.80000000000000004, 0.59215688705444336, 0.59215688705444336),
(0.90000000000000002, 0.40000000596046448, 0.40000000596046448),
(1.0, 0.23529411852359772, 0.23529411852359772)],
'red': [(0.0, 0.32941177487373352, 0.32941177487373352),
(0.10000000000000001, 0.54901963472366333, 0.54901963472366333),
(0.20000000000000001, 0.74901962280273438, 0.74901962280273438),
(0.29999999999999999, 0.87450981140136719, 0.87450981140136719),
(0.40000000000000002, 0.96470588445663452, 0.96470588445663452),
(0.5, 0.96078431606292725, 0.96078431606292725),
(0.59999999999999998, 0.78039216995239258, 0.78039216995239258),
(0.69999999999999996, 0.50196081399917603, 0.50196081399917603),
(0.80000000000000004, 0.20784313976764679, 0.20784313976764679),
(0.90000000000000002, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0, 0.0)]}
_BuGn_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.97647058963775635,
0.97647058963775635), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.78823530673980713,
0.78823530673980713), (0.5, 0.64313727617263794, 0.64313727617263794),
(0.625, 0.46274510025978088, 0.46274510025978088), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.92549020051956177, 0.92549020051956177), (0.375,
0.84705883264541626, 0.84705883264541626), (0.5,
0.7607843279838562, 0.7607843279838562), (0.625,
0.68235296010971069, 0.68235296010971069), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)], 'red': [(0.0,
0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.60000002384185791, 0.60000002384185791), (0.5,
0.40000000596046448, 0.40000000596046448), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_BuPu_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.95686274766921997,
0.95686274766921997), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85490196943283081,
0.85490196943283081), (0.5, 0.7764706015586853, 0.7764706015586853),
(0.625, 0.69411766529083252, 0.69411766529083252), (0.75,
0.61568629741668701, 0.61568629741668701), (0.875,
0.48627451062202454, 0.48627451062202454), (1.0, 0.29411765933036804,
0.29411765933036804)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.92549020051956177, 0.92549020051956177), (0.25,
0.82745099067687988, 0.82745099067687988), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.58823531866073608, 0.58823531866073608), (0.625,
0.41960784792900085, 0.41960784792900085), (0.75,
0.25490197539329529, 0.25490197539329529), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.74901962280273438, 0.74901962280273438), (0.375,
0.61960786581039429, 0.61960786581039429), (0.5,
0.54901963472366333, 0.54901963472366333), (0.625,
0.54901963472366333, 0.54901963472366333), (0.75,
0.53333336114883423, 0.53333336114883423), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.30196079611778259, 0.30196079611778259)]}
_Dark2_data = {'blue': [(0.0, 0.46666666865348816,
0.46666666865348816), (0.14285714285714285, 0.0078431377187371254,
0.0078431377187371254), (0.2857142857142857, 0.70196080207824707,
0.70196080207824707), (0.42857142857142855, 0.54117649793624878,
0.54117649793624878), (0.5714285714285714, 0.11764705926179886,
0.11764705926179886), (0.7142857142857143, 0.0078431377187371254,
0.0078431377187371254), (0.8571428571428571, 0.11372549086809158,
0.11372549086809158), (1.0, 0.40000000596046448,
0.40000000596046448)],
'green': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.14285714285714285, 0.37254902720451355, 0.37254902720451355),
(0.2857142857142857, 0.43921568989753723, 0.43921568989753723),
(0.42857142857142855, 0.16078431904315948, 0.16078431904315948),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 0.67058825492858887, 0.67058825492858887),
(0.8571428571428571, 0.46274510025978088, 0.46274510025978088),
(1.0, 0.40000000596046448, 0.40000000596046448)],
'red': [(0.0, 0.10588235408067703, 0.10588235408067703),
(0.14285714285714285, 0.85098040103912354, 0.85098040103912354),
(0.2857142857142857, 0.45882353186607361, 0.45882353186607361),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.40000000596046448, 0.40000000596046448),
(0.7142857142857143, 0.90196079015731812, 0.90196079015731812),
(0.8571428571428571, 0.65098041296005249, 0.65098041296005249),
(1.0, 0.40000000596046448, 0.40000000596046448)]}
_GnBu_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.85882353782653809,
0.85882353782653809), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.70980393886566162,
0.70980393886566162), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.82745099067687988, 0.82745099067687988), (0.75,
0.7450980544090271, 0.7450980544090271), (0.875, 0.67450982332229614,
0.67450982332229614), (1.0, 0.5058823823928833, 0.5058823823928833)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.9529411792755127, 0.9529411792755127), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.86666667461395264, 0.86666667461395264), (0.5,
0.80000001192092896, 0.80000001192092896), (0.625,
0.70196080207824707, 0.70196080207824707), (0.75,
0.54901963472366333, 0.54901963472366333), (0.875,
0.40784314274787903, 0.40784314274787903), (1.0,
0.25098040699958801, 0.25098040699958801)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.65882354974746704, 0.65882354974746704), (0.5,
0.48235294222831726, 0.48235294222831726), (0.625,
0.30588236451148987, 0.30588236451148987), (0.75,
0.16862745583057404, 0.16862745583057404), (0.875,
0.031372550874948502, 0.031372550874948502), (1.0,
0.031372550874948502, 0.031372550874948502)]}
_Greens_data = {'blue': [(0.0, 0.96078431606292725,
0.96078431606292725), (0.125, 0.87843137979507446,
0.87843137979507446), (0.25, 0.75294119119644165,
0.75294119119644165), (0.375, 0.60784316062927246,
0.60784316062927246), (0.5, 0.46274510025978088, 0.46274510025978088),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.27058824896812439, 0.27058824896812439), (0.875,
0.17254902422428131, 0.17254902422428131), (1.0, 0.10588235408067703,
0.10588235408067703)],
'green': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.96078431606292725, 0.96078431606292725), (0.25,
0.91372549533843994, 0.91372549533843994), (0.375,
0.85098040103912354, 0.85098040103912354), (0.5,
0.76862746477127075, 0.76862746477127075), (0.625,
0.67058825492858887, 0.67058825492858887), (0.75,
0.54509806632995605, 0.54509806632995605), (0.875,
0.42745098471641541, 0.42745098471641541), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.89803922176361084, 0.89803922176361084), (0.25,
0.78039216995239258, 0.78039216995239258), (0.375,
0.63137257099151611, 0.63137257099151611), (0.5,
0.45490196347236633, 0.45490196347236633), (0.625,
0.25490197539329529, 0.25490197539329529), (0.75,
0.13725490868091583, 0.13725490868091583), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)]}
_Greys_data = {'blue': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608, 0.58823531866073608),
(0.625, 0.45098039507865906, 0.45098039507865906), (0.75,
0.32156863808631897, 0.32156863808631897), (0.875,
0.14509804546833038, 0.14509804546833038), (1.0, 0.0, 0.0)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.74117648601531982,
0.74117648601531982), (0.5, 0.58823531866073608,
0.58823531866073608), (0.625, 0.45098039507865906,
0.45098039507865906), (0.75, 0.32156863808631897,
0.32156863808631897), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.0, 0.0)]}
_Oranges_data = {'blue': [(0.0, 0.92156863212585449,
0.92156863212585449), (0.125, 0.80784314870834351,
0.80784314870834351), (0.25, 0.63529413938522339,
0.63529413938522339), (0.375, 0.41960784792900085,
0.41960784792900085), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.074509806931018829, 0.074509806931018829), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.011764706112444401, 0.011764706112444401), (1.0,
0.015686275437474251, 0.015686275437474251)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.90196079015731812, 0.90196079015731812), (0.25,
0.81568628549575806, 0.81568628549575806), (0.375,
0.68235296010971069, 0.68235296010971069), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.4117647111415863, 0.4117647111415863), (0.75,
0.28235295414924622, 0.28235295414924622), (0.875,
0.21176470816135406, 0.21176470816135406), (1.0,
0.15294118225574493, 0.15294118225574493)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.99215686321258545,
0.99215686321258545), (0.625, 0.94509804248809814,
0.94509804248809814), (0.75, 0.85098040103912354,
0.85098040103912354), (0.875, 0.65098041296005249,
0.65098041296005249), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_OrRd_data = {'blue': [(0.0, 0.92549020051956177,
0.92549020051956177), (0.125, 0.78431373834609985,
0.78431373834609985), (0.25, 0.61960786581039429,
0.61960786581039429), (0.375, 0.51764708757400513,
0.51764708757400513), (0.5, 0.3490196168422699, 0.3490196168422699),
(0.625, 0.28235295414924622, 0.28235295414924622), (0.75,
0.12156862765550613, 0.12156862765550613), (0.875, 0.0, 0.0), (1.0,
0.0, 0.0)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90980392694473267, 0.90980392694473267), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.73333334922790527, 0.73333334922790527), (0.5,
0.55294120311737061, 0.55294120311737061), (0.625,
0.3960784375667572, 0.3960784375667572), (0.75,
0.18823529779911041, 0.18823529779911041), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.99215686321258545,
0.99215686321258545), (0.375, 0.99215686321258545,
0.99215686321258545), (0.5, 0.98823529481887817,
0.98823529481887817), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.84313726425170898,
0.84313726425170898), (0.875, 0.70196080207824707,
0.70196080207824707), (1.0, 0.49803921580314636,
0.49803921580314636)]}
_Paired_data = {'blue': [(0.0, 0.89019608497619629,
0.89019608497619629), (0.090909090909090912, 0.70588237047195435,
0.70588237047195435), (0.18181818181818182, 0.54117649793624878,
0.54117649793624878), (0.27272727272727271, 0.17254902422428131,
0.17254902422428131), (0.36363636363636365, 0.60000002384185791,
0.60000002384185791), (0.45454545454545453, 0.10980392247438431,
0.10980392247438431), (0.54545454545454541, 0.43529412150382996,
0.43529412150382996), (0.63636363636363635, 0.0, 0.0),
(0.72727272727272729, 0.83921569585800171, 0.83921569585800171),
(0.81818181818181823, 0.60392159223556519, 0.60392159223556519),
(0.90909090909090906, 0.60000002384185791, 0.60000002384185791), (1.0,
0.15686275064945221, 0.15686275064945221)],
'green': [(0.0, 0.80784314870834351, 0.80784314870834351),
(0.090909090909090912, 0.47058823704719543, 0.47058823704719543),
(0.18181818181818182, 0.87450981140136719, 0.87450981140136719),
(0.27272727272727271, 0.62745100259780884, 0.62745100259780884),
(0.36363636363636365, 0.60392159223556519, 0.60392159223556519),
(0.45454545454545453, 0.10196078568696976, 0.10196078568696976),
(0.54545454545454541, 0.74901962280273438, 0.74901962280273438),
(0.63636363636363635, 0.49803921580314636, 0.49803921580314636),
(0.72727272727272729, 0.69803923368453979, 0.69803923368453979),
(0.81818181818181823, 0.23921568691730499, 0.23921568691730499),
(0.90909090909090906, 1.0, 1.0), (1.0, 0.3490196168422699,
0.3490196168422699)],
'red': [(0.0, 0.65098041296005249, 0.65098041296005249),
(0.090909090909090912, 0.12156862765550613, 0.12156862765550613),
(0.18181818181818182, 0.69803923368453979, 0.69803923368453979),
(0.27272727272727271, 0.20000000298023224, 0.20000000298023224),
(0.36363636363636365, 0.9843137264251709, 0.9843137264251709),
(0.45454545454545453, 0.89019608497619629, 0.89019608497619629),
(0.54545454545454541, 0.99215686321258545, 0.99215686321258545),
(0.63636363636363635, 1.0, 1.0), (0.72727272727272729,
0.7921568751335144, 0.7921568751335144), (0.81818181818181823,
0.41568627953529358, 0.41568627953529358), (0.90909090909090906,
1.0, 1.0), (1.0, 0.69411766529083252, 0.69411766529083252)]}
_Pastel1_data = {'blue': [(0.0, 0.68235296010971069,
0.68235296010971069), (0.125, 0.89019608497619629,
0.89019608497619629), (0.25, 0.77254903316497803,
0.77254903316497803), (0.375, 0.89411765336990356,
0.89411765336990356), (0.5, 0.65098041296005249, 0.65098041296005249),
(0.625, 0.80000001192092896, 0.80000001192092896), (0.75,
0.74117648601531982, 0.74117648601531982), (0.875,
0.92549020051956177, 0.92549020051956177), (1.0, 0.94901961088180542,
0.94901961088180542)],
'green': [(0.0, 0.70588237047195435, 0.70588237047195435), (0.125,
0.80392158031463623, 0.80392158031463623), (0.25,
0.92156863212585449, 0.92156863212585449), (0.375,
0.79607844352722168, 0.79607844352722168), (0.5,
0.85098040103912354, 0.85098040103912354), (0.625, 1.0, 1.0),
(0.75, 0.84705883264541626, 0.84705883264541626), (0.875,
0.85490196943283081, 0.85490196943283081), (1.0,
0.94901961088180542, 0.94901961088180542)],
'red': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.70196080207824707, 0.70196080207824707), (0.25,
0.80000001192092896, 0.80000001192092896), (0.375,
0.87058824300765991, 0.87058824300765991), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625, 1.0, 1.0),
(0.75, 0.89803922176361084, 0.89803922176361084), (0.875,
0.99215686321258545, 0.99215686321258545), (1.0,
0.94901961088180542, 0.94901961088180542)]}
_Pastel2_data = {'blue': [(0.0, 0.80392158031463623,
0.80392158031463623), (0.14285714285714285, 0.67450982332229614,
0.67450982332229614), (0.2857142857142857, 0.90980392694473267,
0.90980392694473267), (0.42857142857142855, 0.89411765336990356,
0.89411765336990356), (0.5714285714285714, 0.78823530673980713,
0.78823530673980713), (0.7142857142857143, 0.68235296010971069,
0.68235296010971069), (0.8571428571428571, 0.80000001192092896,
0.80000001192092896), (1.0, 0.80000001192092896,
0.80000001192092896)],
'green': [(0.0, 0.88627451658248901, 0.88627451658248901),
(0.14285714285714285, 0.80392158031463623, 0.80392158031463623),
(0.2857142857142857, 0.83529412746429443, 0.83529412746429443),
(0.42857142857142855, 0.7921568751335144, 0.7921568751335144),
(0.5714285714285714, 0.96078431606292725, 0.96078431606292725),
(0.7142857142857143, 0.94901961088180542, 0.94901961088180542),
(0.8571428571428571, 0.88627451658248901, 0.88627451658248901),
(1.0, 0.80000001192092896, 0.80000001192092896)],
'red': [(0.0, 0.70196080207824707, 0.70196080207824707),
(0.14285714285714285, 0.99215686321258545, 0.99215686321258545),
(0.2857142857142857, 0.79607844352722168, 0.79607844352722168),
(0.42857142857142855, 0.95686274766921997, 0.95686274766921997),
(0.5714285714285714, 0.90196079015731812, 0.90196079015731812),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.94509804248809814, 0.94509804248809814), (1.0,
0.80000001192092896, 0.80000001192092896)]}
_PiYG_data = {'blue': [(0.0, 0.32156863808631897,
0.32156863808631897), (0.10000000000000001, 0.49019607901573181,
0.49019607901573181), (0.20000000000000001, 0.68235296010971069,
0.68235296010971069), (0.29999999999999999, 0.85490196943283081,
0.85490196943283081), (0.40000000000000002, 0.93725490570068359,
0.93725490570068359), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81568628549575806, 0.81568628549575806),
(0.69999999999999996, 0.52549022436141968, 0.52549022436141968),
(0.80000000000000004, 0.25490197539329529, 0.25490197539329529),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128), (1.0,
0.098039217293262482, 0.098039217293262482)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.10588235408067703, 0.10588235408067703),
(0.20000000000000001, 0.46666666865348816, 0.46666666865348816),
(0.29999999999999999, 0.7137255072593689, 0.7137255072593689),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.96078431606292725, 0.96078431606292725),
(0.69999999999999996, 0.88235294818878174, 0.88235294818878174),
(0.80000000000000004, 0.73725491762161255, 0.73725491762161255),
(0.90000000000000002, 0.57254904508590698, 0.57254904508590698),
(1.0, 0.39215686917304993, 0.39215686917304993)],
'red': [(0.0, 0.55686277151107788, 0.55686277151107788),
(0.10000000000000001, 0.77254903316497803, 0.77254903316497803),
(0.20000000000000001, 0.87058824300765991, 0.87058824300765991),
(0.29999999999999999, 0.94509804248809814, 0.94509804248809814),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.90196079015731812, 0.90196079015731812),
(0.69999999999999996, 0.72156864404678345, 0.72156864404678345),
(0.80000000000000004, 0.49803921580314636, 0.49803921580314636),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.15294118225574493, 0.15294118225574493)]}
_PRGn_data = {'blue': [(0.0, 0.29411765933036804,
0.29411765933036804), (0.10000000000000001, 0.51372551918029785,
0.51372551918029785), (0.20000000000000001, 0.67058825492858887,
0.67058825492858887), (0.29999999999999999, 0.81176471710205078,
0.81176471710205078), (0.40000000000000002, 0.90980392694473267,
0.90980392694473267), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.82745099067687988, 0.82745099067687988),
(0.69999999999999996, 0.62745100259780884, 0.62745100259780884),
(0.80000000000000004, 0.3803921639919281, 0.3803921639919281),
(0.90000000000000002, 0.21568627655506134, 0.21568627655506134), (1.0,
0.10588235408067703, 0.10588235408067703)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.16470588743686676, 0.16470588743686676), (0.20000000000000001,
0.43921568989753723, 0.43921568989753723), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.83137255907058716, 0.83137255907058716), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.94117647409439087, 0.94117647409439087), (0.69999999999999996,
0.85882353782653809, 0.85882353782653809), (0.80000000000000004,
0.68235296010971069, 0.68235296010971069), (0.90000000000000002,
0.47058823704719543, 0.47058823704719543), (1.0,
0.26666668057441711, 0.26666668057441711)],
'red': [(0.0, 0.25098040699958801, 0.25098040699958801),
(0.10000000000000001, 0.46274510025978088, 0.46274510025978088),
(0.20000000000000001, 0.60000002384185791, 0.60000002384185791),
(0.29999999999999999, 0.7607843279838562, 0.7607843279838562),
(0.40000000000000002, 0.90588235855102539, 0.90588235855102539),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85098040103912354, 0.85098040103912354),
(0.69999999999999996, 0.65098041296005249, 0.65098041296005249),
(0.80000000000000004, 0.35294118523597717, 0.35294118523597717),
(0.90000000000000002, 0.10588235408067703, 0.10588235408067703),
(1.0, 0.0, 0.0)]}
_PuBu_data = {'blue': [(0.0, 0.9843137264251709, 0.9843137264251709),
(0.125, 0.94901961088180542, 0.94901961088180542), (0.25,
0.90196079015731812, 0.90196079015731812), (0.375,
0.85882353782653809, 0.85882353782653809), (0.5, 0.81176471710205078,
0.81176471710205078), (0.625, 0.75294119119644165,
0.75294119119644165), (0.75, 0.69019609689712524,
0.69019609689712524), (0.875, 0.55294120311737061,
0.55294120311737061), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.43921568989753723, 0.43921568989753723), (0.875,
0.35294118523597717, 0.35294118523597717), (1.0,
0.21960784494876862, 0.21960784494876862)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.45490196347236633,
0.45490196347236633), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.019607843831181526,
0.019607843831181526), (0.875, 0.015686275437474251,
0.015686275437474251), (1.0, 0.0078431377187371254,
0.0078431377187371254)]}
_PuBuGn_data = {'blue': [(0.0, 0.9843137264251709,
0.9843137264251709), (0.125, 0.94117647409439087,
0.94117647409439087), (0.25, 0.90196079015731812,
0.90196079015731812), (0.375, 0.85882353782653809,
0.85882353782653809), (0.5, 0.81176471710205078, 0.81176471710205078),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.54117649793624878, 0.54117649793624878), (0.875, 0.3490196168422699,
0.3490196168422699), (1.0, 0.21176470816135406, 0.21176470816135406)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.88627451658248901, 0.88627451658248901), (0.25,
0.81960785388946533, 0.81960785388946533), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.66274511814117432, 0.66274511814117432), (0.625,
0.56470590829849243, 0.56470590829849243), (0.75,
0.5058823823928833, 0.5058823823928833), (0.875,
0.42352941632270813, 0.42352941632270813), (1.0,
0.27450981736183167, 0.27450981736183167)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92549020051956177,
0.92549020051956177), (0.25, 0.81568628549575806,
0.81568628549575806), (0.375, 0.65098041296005249,
0.65098041296005249), (0.5, 0.40392157435417175,
0.40392157435417175), (0.625, 0.21176470816135406,
0.21176470816135406), (0.75, 0.0078431377187371254,
0.0078431377187371254), (0.875, 0.0039215688593685627,
0.0039215688593685627), (1.0, 0.0039215688593685627,
0.0039215688593685627)]}
_PuOr_data = {'blue': [(0.0, 0.031372550874948502,
0.031372550874948502), (0.10000000000000001, 0.023529412224888802,
0.023529412224888802), (0.20000000000000001, 0.078431375324726105,
0.078431375324726105), (0.29999999999999999, 0.38823530077934265,
0.38823530077934265), (0.40000000000000002, 0.7137255072593689,
0.7137255072593689), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.92156863212585449, 0.92156863212585449),
(0.69999999999999996, 0.82352942228317261, 0.82352942228317261),
(0.80000000000000004, 0.67450982332229614, 0.67450982332229614),
(0.90000000000000002, 0.53333336114883423, 0.53333336114883423), (1.0,
0.29411765933036804, 0.29411765933036804)],
'green': [(0.0, 0.23137255012989044, 0.23137255012989044),
(0.10000000000000001, 0.34509804844856262, 0.34509804844856262),
(0.20000000000000001, 0.50980395078659058, 0.50980395078659058),
(0.29999999999999999, 0.72156864404678345, 0.72156864404678345),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.85490196943283081, 0.85490196943283081),
(0.69999999999999996, 0.67058825492858887, 0.67058825492858887),
(0.80000000000000004, 0.45098039507865906, 0.45098039507865906),
(0.90000000000000002, 0.15294118225574493, 0.15294118225574493),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.49803921580314636, 0.49803921580314636),
(0.10000000000000001, 0.70196080207824707, 0.70196080207824707),
(0.20000000000000001, 0.87843137979507446, 0.87843137979507446),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.84705883264541626, 0.84705883264541626),
(0.69999999999999996, 0.69803923368453979, 0.69803923368453979),
(0.80000000000000004, 0.50196081399917603, 0.50196081399917603),
(0.90000000000000002, 0.32941177487373352, 0.32941177487373352),
(1.0, 0.17647059261798859, 0.17647059261798859)]}
_PuRd_data = {'blue': [(0.0, 0.97647058963775635,
0.97647058963775635), (0.125, 0.93725490570068359,
0.93725490570068359), (0.25, 0.85490196943283081,
0.85490196943283081), (0.375, 0.78039216995239258,
0.78039216995239258), (0.5, 0.69019609689712524, 0.69019609689712524),
(0.625, 0.54117649793624878, 0.54117649793624878), (0.75,
0.33725491166114807, 0.33725491166114807), (0.875,
0.26274511218070984, 0.26274511218070984), (1.0, 0.12156862765550613,
0.12156862765550613)],
'green': [(0.0, 0.95686274766921997, 0.95686274766921997), (0.125,
0.88235294818878174, 0.88235294818878174), (0.25,
0.72549021244049072, 0.72549021244049072), (0.375,
0.58039218187332153, 0.58039218187332153), (0.5,
0.3960784375667572, 0.3960784375667572), (0.625,
0.16078431904315948, 0.16078431904315948), (0.75,
0.070588238537311554, 0.070588238537311554), (0.875, 0.0, 0.0),
(1.0, 0.0, 0.0)],
'red': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.90588235855102539, 0.90588235855102539), (0.25,
0.83137255907058716, 0.83137255907058716), (0.375,
0.78823530673980713, 0.78823530673980713), (0.5,
0.87450981140136719, 0.87450981140136719), (0.625,
0.90588235855102539, 0.90588235855102539), (0.75,
0.80784314870834351, 0.80784314870834351), (0.875,
0.59607845544815063, 0.59607845544815063), (1.0,
0.40392157435417175, 0.40392157435417175)]}
_Purples_data = {'blue': [(0.0, 0.99215686321258545,
0.99215686321258545), (0.125, 0.96078431606292725,
0.96078431606292725), (0.25, 0.92156863212585449,
0.92156863212585449), (0.375, 0.86274510622024536,
0.86274510622024536), (0.5, 0.78431373834609985, 0.78431373834609985),
(0.625, 0.729411780834198, 0.729411780834198), (0.75,
0.63921570777893066, 0.63921570777893066), (0.875,
0.56078433990478516, 0.56078433990478516), (1.0, 0.49019607901573181,
0.49019607901573181)],
'green': [(0.0, 0.9843137264251709, 0.9843137264251709), (0.125,
0.92941176891326904, 0.92941176891326904), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.74117648601531982, 0.74117648601531982), (0.5,
0.60392159223556519, 0.60392159223556519), (0.625,
0.49019607901573181, 0.49019607901573181), (0.75,
0.31764706969261169, 0.31764706969261169), (0.875,
0.15294118225574493, 0.15294118225574493), (1.0, 0.0, 0.0)],
'red': [(0.0, 0.98823529481887817, 0.98823529481887817), (0.125,
0.93725490570068359, 0.93725490570068359), (0.25,
0.85490196943283081, 0.85490196943283081), (0.375,
0.73725491762161255, 0.73725491762161255), (0.5,
0.61960786581039429, 0.61960786581039429), (0.625,
0.50196081399917603, 0.50196081399917603), (0.75,
0.41568627953529358, 0.41568627953529358), (0.875,
0.32941177487373352, 0.32941177487373352), (1.0,
0.24705882370471954, 0.24705882370471954)]}
_RdBu_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.94117647409439087, 0.94117647409439087),
(0.69999999999999996, 0.87058824300765991, 0.87058824300765991),
(0.80000000000000004, 0.76470589637756348, 0.76470589637756348),
(0.90000000000000002, 0.67450982332229614, 0.67450982332229614), (1.0,
0.3803921639919281, 0.3803921639919281)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5,
0.9686274528503418, 0.9686274528503418), (0.59999999999999998,
0.89803922176361084, 0.89803922176361084), (0.69999999999999996,
0.77254903316497803, 0.77254903316497803), (0.80000000000000004,
0.57647061347961426, 0.57647061347961426), (0.90000000000000002,
0.40000000596046448, 0.40000000596046448), (1.0,
0.18823529779911041, 0.18823529779911041)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 0.9686274528503418, 0.9686274528503418),
(0.59999999999999998, 0.81960785388946533, 0.81960785388946533),
(0.69999999999999996, 0.57254904508590698, 0.57254904508590698),
(0.80000000000000004, 0.26274511218070984, 0.26274511218070984),
(0.90000000000000002, 0.12941177189350128, 0.12941177189350128),
(1.0, 0.019607843831181526, 0.019607843831181526)]}
_RdGy_data = {'blue': [(0.0, 0.12156862765550613,
0.12156862765550613), (0.10000000000000001, 0.16862745583057404,
0.16862745583057404), (0.20000000000000001, 0.30196079611778259,
0.30196079611778259), (0.29999999999999999, 0.50980395078659058,
0.50980395078659058), (0.40000000000000002, 0.78039216995239258,
0.78039216995239258), (0.5, 1.0, 1.0), (0.59999999999999998,
0.87843137979507446, 0.87843137979507446), (0.69999999999999996,
0.729411780834198, 0.729411780834198), (0.80000000000000004,
0.52941179275512695, 0.52941179275512695), (0.90000000000000002,
0.30196079611778259, 0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.094117648899555206, 0.094117648899555206), (0.20000000000000001,
0.37647059559822083, 0.37647059559822083), (0.29999999999999999,
0.64705884456634521, 0.64705884456634521), (0.40000000000000002,
0.85882353782653809, 0.85882353782653809), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.87843137979507446, 0.87843137979507446),
(0.69999999999999996, 0.729411780834198, 0.729411780834198),
(0.80000000000000004, 0.52941179275512695, 0.52941179275512695),
(0.90000000000000002, 0.30196079611778259, 0.30196079611778259),
(1.0, 0.10196078568696976, 0.10196078568696976)],
'red': [(0.0, 0.40392157435417175, 0.40392157435417175),
(0.10000000000000001, 0.69803923368453979, 0.69803923368453979),
(0.20000000000000001, 0.83921569585800171, 0.83921569585800171),
(0.29999999999999999, 0.95686274766921997, 0.95686274766921997),
(0.40000000000000002, 0.99215686321258545, 0.99215686321258545),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.87843137979507446,
0.87843137979507446), (0.69999999999999996, 0.729411780834198,
0.729411780834198), (0.80000000000000004, 0.52941179275512695,
0.52941179275512695), (0.90000000000000002, 0.30196079611778259,
0.30196079611778259), (1.0, 0.10196078568696976,
0.10196078568696976)]}
_RdPu_data = {'blue': [(0.0, 0.9529411792755127, 0.9529411792755127),
(0.125, 0.86666667461395264, 0.86666667461395264), (0.25,
0.75294119119644165, 0.75294119119644165), (0.375,
0.70980393886566162, 0.70980393886566162), (0.5, 0.63137257099151611,
0.63137257099151611), (0.625, 0.59215688705444336,
0.59215688705444336), (0.75, 0.49411764740943909,
0.49411764740943909), (0.875, 0.46666666865348816,
0.46666666865348816), (1.0, 0.41568627953529358,
0.41568627953529358)],
'green': [(0.0, 0.9686274528503418, 0.9686274528503418), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.77254903316497803, 0.77254903316497803), (0.375,
0.62352943420410156, 0.62352943420410156), (0.5,
0.40784314274787903, 0.40784314274787903), (0.625,
0.20392157137393951, 0.20392157137393951), (0.75,
0.0039215688593685627, 0.0039215688593685627), (0.875,
0.0039215688593685627, 0.0039215688593685627), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99215686321258545,
0.99215686321258545), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98039215803146362,
0.98039215803146362), (0.5, 0.9686274528503418,
0.9686274528503418), (0.625, 0.86666667461395264,
0.86666667461395264), (0.75, 0.68235296010971069,
0.68235296010971069), (0.875, 0.47843137383460999,
0.47843137383460999), (1.0, 0.28627452254295349,
0.28627452254295349)]}
_RdYlBu_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000149011612,
0.15294118225574493, 0.15294118225574493),
(0.20000000298023224, 0.26274511218070984,
0.26274511218070984), (0.30000001192092896,
0.3803921639919281, 0.3803921639919281),
(0.40000000596046448, 0.56470590829849243,
0.56470590829849243), (0.5, 0.74901962280273438,
0.74901962280273438), (0.60000002384185791,
0.97254902124404907, 0.97254902124404907),
(0.69999998807907104, 0.91372549533843994,
0.91372549533843994), (0.80000001192092896,
0.81960785388946533, 0.81960785388946533),
(0.89999997615814209, 0.70588237047195435,
0.70588237047195435), (1.0, 0.58431375026702881,
0.58431375026702881)], 'green': [(0.0, 0.0, 0.0),
(0.10000000149011612, 0.18823529779911041,
0.18823529779911041), (0.20000000298023224,
0.42745098471641541, 0.42745098471641541),
(0.30000001192092896, 0.68235296010971069,
0.68235296010971069), (0.40000000596046448,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0,
1.0), (0.60000002384185791, 0.9529411792755127,
0.9529411792755127), (0.69999998807907104,
0.85098040103912354, 0.85098040103912354),
(0.80000001192092896, 0.67843139171600342,
0.67843139171600342), (0.89999997615814209,
0.45882353186607361, 0.45882353186607361), (1.0,
0.21176470816135406, 0.21176470816135406)], 'red':
[(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000149011612, 0.84313726425170898,
0.84313726425170898), (0.20000000298023224,
0.95686274766921997, 0.95686274766921997),
(0.30000001192092896, 0.99215686321258545,
0.99215686321258545), (0.40000000596046448,
0.99607843160629272, 0.99607843160629272), (0.5, 1.0,
1.0), (0.60000002384185791, 0.87843137979507446,
0.87843137979507446), (0.69999998807907104,
0.67058825492858887, 0.67058825492858887),
(0.80000001192092896, 0.45490196347236633,
0.45490196347236633), (0.89999997615814209,
0.27058824896812439, 0.27058824896812439), (1.0,
0.19215686619281769, 0.19215686619281769)]}
_RdYlGn_data = {'blue': [(0.0, 0.14901961386203766,
0.14901961386203766), (0.10000000000000001, 0.15294118225574493,
0.15294118225574493), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.54509806632995605, 0.54509806632995605),
(0.69999999999999996, 0.41568627953529358, 0.41568627953529358),
(0.80000000000000004, 0.38823530077934265, 0.38823530077934265),
(0.90000000000000002, 0.31372550129890442, 0.31372550129890442), (1.0,
0.21568627655506134, 0.21568627655506134)],
'green': [(0.0, 0.0, 0.0), (0.10000000000000001,
0.18823529779911041, 0.18823529779911041), (0.20000000000000001,
0.42745098471641541, 0.42745098471641541), (0.29999999999999999,
0.68235296010971069, 0.68235296010971069), (0.40000000000000002,
0.87843137979507446, 0.87843137979507446), (0.5, 1.0, 1.0),
(0.59999999999999998, 0.93725490570068359, 0.93725490570068359),
(0.69999999999999996, 0.85098040103912354, 0.85098040103912354),
(0.80000000000000004, 0.74117648601531982, 0.74117648601531982),
(0.90000000000000002, 0.59607845544815063, 0.59607845544815063),
(1.0, 0.40784314274787903, 0.40784314274787903)],
'red': [(0.0, 0.64705884456634521, 0.64705884456634521),
(0.10000000000000001, 0.84313726425170898, 0.84313726425170898),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.85098040103912354,
0.85098040103912354), (0.69999999999999996, 0.65098041296005249,
0.65098041296005249), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.10196078568696976,
0.10196078568696976), (1.0, 0.0, 0.0)]}
_Reds_data = {'blue': [(0.0, 0.94117647409439087,
0.94117647409439087), (0.125, 0.82352942228317261,
0.82352942228317261), (0.25, 0.63137257099151611,
0.63137257099151611), (0.375, 0.44705882668495178,
0.44705882668495178), (0.5, 0.29019609093666077, 0.29019609093666077),
(0.625, 0.17254902422428131, 0.17254902422428131), (0.75,
0.11372549086809158, 0.11372549086809158), (0.875,
0.08235294371843338, 0.08235294371843338), (1.0, 0.050980392843484879,
0.050980392843484879)],
'green': [(0.0, 0.96078431606292725, 0.96078431606292725), (0.125,
0.87843137979507446, 0.87843137979507446), (0.25,
0.73333334922790527, 0.73333334922790527), (0.375,
0.57254904508590698, 0.57254904508590698), (0.5,
0.41568627953529358, 0.41568627953529358), (0.625,
0.23137255012989044, 0.23137255012989044), (0.75,
0.094117648899555206, 0.094117648899555206), (0.875,
0.058823529630899429, 0.058823529630899429), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.99607843160629272,
0.99607843160629272), (0.25, 0.98823529481887817,
0.98823529481887817), (0.375, 0.98823529481887817,
0.98823529481887817), (0.5, 0.9843137264251709,
0.9843137264251709), (0.625, 0.93725490570068359,
0.93725490570068359), (0.75, 0.79607844352722168,
0.79607844352722168), (0.875, 0.64705884456634521,
0.64705884456634521), (1.0, 0.40392157435417175,
0.40392157435417175)]}
_Set1_data = {'blue': [(0.0, 0.10980392247438431,
0.10980392247438431), (0.125, 0.72156864404678345,
0.72156864404678345), (0.25, 0.29019609093666077,
0.29019609093666077), (0.375, 0.63921570777893066,
0.63921570777893066), (0.5, 0.0, 0.0), (0.625, 0.20000000298023224,
0.20000000298023224), (0.75, 0.15686275064945221,
0.15686275064945221), (0.875, 0.74901962280273438,
0.74901962280273438), (1.0, 0.60000002384185791,
0.60000002384185791)],
'green': [(0.0, 0.10196078568696976, 0.10196078568696976), (0.125,
0.49411764740943909, 0.49411764740943909), (0.25,
0.68627452850341797, 0.68627452850341797), (0.375,
0.30588236451148987, 0.30588236451148987), (0.5,
0.49803921580314636, 0.49803921580314636), (0.625, 1.0, 1.0),
(0.75, 0.33725491166114807, 0.33725491166114807), (0.875,
0.5058823823928833, 0.5058823823928833), (1.0,
0.60000002384185791, 0.60000002384185791)],
'red': [(0.0, 0.89411765336990356, 0.89411765336990356), (0.125,
0.21568627655506134, 0.21568627655506134), (0.25,
0.30196079611778259, 0.30196079611778259), (0.375,
0.59607845544815063, 0.59607845544815063), (0.5, 1.0, 1.0),
(0.625, 1.0, 1.0), (0.75, 0.65098041296005249,
0.65098041296005249), (0.875, 0.9686274528503418,
0.9686274528503418), (1.0, 0.60000002384185791,
0.60000002384185791)]}
_Set2_data = {'blue': [(0.0, 0.64705884456634521,
0.64705884456634521), (0.14285714285714285, 0.38431373238563538,
0.38431373238563538), (0.2857142857142857, 0.79607844352722168,
0.79607844352722168), (0.42857142857142855, 0.76470589637756348,
0.76470589637756348), (0.5714285714285714, 0.32941177487373352,
0.32941177487373352), (0.7142857142857143, 0.18431372940540314,
0.18431372940540314), (0.8571428571428571, 0.58039218187332153,
0.58039218187332153), (1.0, 0.70196080207824707,
0.70196080207824707)],
'green': [(0.0, 0.7607843279838562, 0.7607843279838562),
(0.14285714285714285, 0.55294120311737061, 0.55294120311737061),
(0.2857142857142857, 0.62745100259780884, 0.62745100259780884),
(0.42857142857142855, 0.54117649793624878, 0.54117649793624878),
(0.5714285714285714, 0.84705883264541626, 0.84705883264541626),
(0.7142857142857143, 0.85098040103912354, 0.85098040103912354),
(0.8571428571428571, 0.76862746477127075, 0.76862746477127075),
(1.0, 0.70196080207824707, 0.70196080207824707)],
'red': [(0.0, 0.40000000596046448, 0.40000000596046448),
(0.14285714285714285, 0.98823529481887817, 0.98823529481887817),
(0.2857142857142857, 0.55294120311737061, 0.55294120311737061),
(0.42857142857142855, 0.90588235855102539, 0.90588235855102539),
(0.5714285714285714, 0.65098041296005249, 0.65098041296005249),
(0.7142857142857143, 1.0, 1.0), (0.8571428571428571,
0.89803922176361084, 0.89803922176361084), (1.0,
0.70196080207824707, 0.70196080207824707)]}
_Set3_data = {'blue': [(0.0, 0.78039216995239258,
0.78039216995239258), (0.090909090909090912, 0.70196080207824707,
0.70196080207824707), (0.18181818181818182, 0.85490196943283081,
0.85490196943283081), (0.27272727272727271, 0.44705882668495178,
0.44705882668495178), (0.36363636363636365, 0.82745099067687988,
0.82745099067687988), (0.45454545454545453, 0.38431373238563538,
0.38431373238563538), (0.54545454545454541, 0.4117647111415863,
0.4117647111415863), (0.63636363636363635, 0.89803922176361084,
0.89803922176361084), (0.72727272727272729, 0.85098040103912354,
0.85098040103912354), (0.81818181818181823, 0.74117648601531982,
0.74117648601531982), (0.90909090909090906, 0.77254903316497803,
0.77254903316497803), (1.0, 0.43529412150382996,
0.43529412150382996)],
'green': [(0.0, 0.82745099067687988, 0.82745099067687988),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.729411780834198, 0.729411780834198), (0.27272727272727271,
0.50196081399917603, 0.50196081399917603), (0.36363636363636365,
0.69411766529083252, 0.69411766529083252), (0.45454545454545453,
0.70588237047195435, 0.70588237047195435), (0.54545454545454541,
0.87058824300765991, 0.87058824300765991), (0.63636363636363635,
0.80392158031463623, 0.80392158031463623), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.50196081399917603, 0.50196081399917603), (0.90909090909090906,
0.92156863212585449, 0.92156863212585449), (1.0,
0.92941176891326904, 0.92941176891326904)],
'red': [(0.0, 0.55294120311737061, 0.55294120311737061),
(0.090909090909090912, 1.0, 1.0), (0.18181818181818182,
0.7450980544090271, 0.7450980544090271), (0.27272727272727271,
0.9843137264251709, 0.9843137264251709), (0.36363636363636365,
0.50196081399917603, 0.50196081399917603), (0.45454545454545453,
0.99215686321258545, 0.99215686321258545), (0.54545454545454541,
0.70196080207824707, 0.70196080207824707), (0.63636363636363635,
0.98823529481887817, 0.98823529481887817), (0.72727272727272729,
0.85098040103912354, 0.85098040103912354), (0.81818181818181823,
0.73725491762161255, 0.73725491762161255), (0.90909090909090906,
0.80000001192092896, 0.80000001192092896), (1.0, 1.0, 1.0)]}
_Spectral_data = {'blue': [(0.0, 0.25882354378700256,
0.25882354378700256), (0.10000000000000001, 0.30980393290519714,
0.30980393290519714), (0.20000000000000001, 0.26274511218070984,
0.26274511218070984), (0.29999999999999999, 0.3803921639919281,
0.3803921639919281), (0.40000000000000002, 0.54509806632995605,
0.54509806632995605), (0.5, 0.74901962280273438, 0.74901962280273438),
(0.59999999999999998, 0.59607845544815063, 0.59607845544815063),
(0.69999999999999996, 0.64313727617263794, 0.64313727617263794),
(0.80000000000000004, 0.64705884456634521, 0.64705884456634521),
(0.90000000000000002, 0.74117648601531982, 0.74117648601531982), (1.0,
0.63529413938522339, 0.63529413938522339)],
'green': [(0.0, 0.0039215688593685627, 0.0039215688593685627),
(0.10000000000000001, 0.24313725531101227, 0.24313725531101227),
(0.20000000000000001, 0.42745098471641541, 0.42745098471641541),
(0.29999999999999999, 0.68235296010971069, 0.68235296010971069),
(0.40000000000000002, 0.87843137979507446, 0.87843137979507446),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.96078431606292725,
0.96078431606292725), (0.69999999999999996, 0.86666667461395264,
0.86666667461395264), (0.80000000000000004, 0.7607843279838562,
0.7607843279838562), (0.90000000000000002, 0.53333336114883423,
0.53333336114883423), (1.0, 0.30980393290519714,
0.30980393290519714)],
'red': [(0.0, 0.61960786581039429, 0.61960786581039429),
(0.10000000000000001, 0.83529412746429443, 0.83529412746429443),
(0.20000000000000001, 0.95686274766921997, 0.95686274766921997),
(0.29999999999999999, 0.99215686321258545, 0.99215686321258545),
(0.40000000000000002, 0.99607843160629272, 0.99607843160629272),
(0.5, 1.0, 1.0), (0.59999999999999998, 0.90196079015731812,
0.90196079015731812), (0.69999999999999996, 0.67058825492858887,
0.67058825492858887), (0.80000000000000004, 0.40000000596046448,
0.40000000596046448), (0.90000000000000002, 0.19607843458652496,
0.19607843458652496), (1.0, 0.36862745881080627,
0.36862745881080627)]}
_YlGn_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.72549021244049072,
0.72549021244049072), (0.25, 0.63921570777893066,
0.63921570777893066), (0.375, 0.55686277151107788,
0.55686277151107788), (0.5, 0.47450980544090271, 0.47450980544090271),
(0.625, 0.364705890417099, 0.364705890417099), (0.75,
0.26274511218070984, 0.26274511218070984), (0.875,
0.21568627655506134, 0.21568627655506134), (1.0, 0.16078431904315948,
0.16078431904315948)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.98823529481887817,
0.98823529481887817), (0.25, 0.94117647409439087,
0.94117647409439087), (0.375, 0.86666667461395264,
0.86666667461395264), (0.5, 0.7764706015586853,
0.7764706015586853), (0.625, 0.67058825492858887,
0.67058825492858887), (0.75, 0.51764708757400513,
0.51764708757400513), (0.875, 0.40784314274787903,
0.40784314274787903), (1.0, 0.27058824896812439,
0.27058824896812439)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.67843139171600342,
0.67843139171600342), (0.5, 0.47058823704719543,
0.47058823704719543), (0.625, 0.25490197539329529,
0.25490197539329529), (0.75, 0.13725490868091583,
0.13725490868091583), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)]}
_YlGnBu_data = {'blue': [(0.0, 0.85098040103912354,
0.85098040103912354), (0.125, 0.69411766529083252,
0.69411766529083252), (0.25, 0.70588237047195435,
0.70588237047195435), (0.375, 0.73333334922790527,
0.73333334922790527), (0.5, 0.76862746477127075, 0.76862746477127075),
(0.625, 0.75294119119644165, 0.75294119119644165), (0.75,
0.65882354974746704, 0.65882354974746704), (0.875,
0.58039218187332153, 0.58039218187332153), (1.0, 0.34509804844856262,
0.34509804844856262)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.97254902124404907,
0.97254902124404907), (0.25, 0.91372549533843994,
0.91372549533843994), (0.375, 0.80392158031463623,
0.80392158031463623), (0.5, 0.7137255072593689,
0.7137255072593689), (0.625, 0.56862747669219971,
0.56862747669219971), (0.75, 0.36862745881080627,
0.36862745881080627), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.11372549086809158,
0.11372549086809158)],
'red': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.78039216995239258,
0.78039216995239258), (0.375, 0.49803921580314636,
0.49803921580314636), (0.5, 0.25490197539329529,
0.25490197539329529), (0.625, 0.11372549086809158,
0.11372549086809158), (0.75, 0.13333334028720856,
0.13333334028720856), (0.875, 0.14509804546833038,
0.14509804546833038), (1.0, 0.031372550874948502,
0.031372550874948502)]}
_YlOrBr_data = {'blue': [(0.0, 0.89803922176361084,
0.89803922176361084), (0.125, 0.73725491762161255,
0.73725491762161255), (0.25, 0.56862747669219971,
0.56862747669219971), (0.375, 0.30980393290519714,
0.30980393290519714), (0.5, 0.16078431904315948, 0.16078431904315948),
(0.625, 0.078431375324726105, 0.078431375324726105), (0.75,
0.0078431377187371254, 0.0078431377187371254), (0.875,
0.015686275437474251, 0.015686275437474251), (1.0,
0.023529412224888802, 0.023529412224888802)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.9686274528503418,
0.9686274528503418), (0.25, 0.89019608497619629,
0.89019608497619629), (0.375, 0.76862746477127075,
0.76862746477127075), (0.5, 0.60000002384185791,
0.60000002384185791), (0.625, 0.43921568989753723,
0.43921568989753723), (0.75, 0.29803922772407532,
0.29803922772407532), (0.875, 0.20392157137393951,
0.20392157137393951), (1.0, 0.14509804546833038,
0.14509804546833038)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99607843160629272, 0.99607843160629272), (0.625,
0.92549020051956177, 0.92549020051956177), (0.75,
0.80000001192092896, 0.80000001192092896), (0.875,
0.60000002384185791, 0.60000002384185791), (1.0,
0.40000000596046448, 0.40000000596046448)]}
_YlOrRd_data = {'blue': [(0.0, 0.80000001192092896,
0.80000001192092896), (0.125, 0.62745100259780884,
0.62745100259780884), (0.25, 0.46274510025978088,
0.46274510025978088), (0.375, 0.29803922772407532,
0.29803922772407532), (0.5, 0.23529411852359772, 0.23529411852359772),
(0.625, 0.16470588743686676, 0.16470588743686676), (0.75,
0.10980392247438431, 0.10980392247438431), (0.875,
0.14901961386203766, 0.14901961386203766), (1.0, 0.14901961386203766,
0.14901961386203766)],
'green': [(0.0, 1.0, 1.0), (0.125, 0.92941176891326904,
0.92941176891326904), (0.25, 0.85098040103912354,
0.85098040103912354), (0.375, 0.69803923368453979,
0.69803923368453979), (0.5, 0.55294120311737061,
0.55294120311737061), (0.625, 0.30588236451148987,
0.30588236451148987), (0.75, 0.10196078568696976,
0.10196078568696976), (0.875, 0.0, 0.0), (1.0, 0.0, 0.0)],
'red': [(0.0, 1.0, 1.0), (0.125, 1.0, 1.0), (0.25,
0.99607843160629272, 0.99607843160629272), (0.375,
0.99607843160629272, 0.99607843160629272), (0.5,
0.99215686321258545, 0.99215686321258545), (0.625,
0.98823529481887817, 0.98823529481887817), (0.75,
0.89019608497619629, 0.89019608497619629), (0.875,
0.74117648601531982, 0.74117648601531982), (1.0,
0.50196081399917603, 0.50196081399917603)]}
# The next 7 palettes are from the Yorick scientific visalisation package,
# an evolution of the GIST package, both by David H. Munro.
# They are released under a BSD-like license (see LICENSE_YORICK in
# the license directory of the matplotlib source distribution).
_gist_earth_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.18039216101169586, 0.18039216101169586), (0.0084033617749810219,
0.22745098173618317, 0.22745098173618317), (0.012605042196810246,
0.27058824896812439, 0.27058824896812439), (0.016806723549962044,
0.31764706969261169, 0.31764706969261169), (0.021008403971791267,
0.36078432202339172, 0.36078432202339172), (0.025210084393620491,
0.40784314274787903, 0.40784314274787903), (0.029411764815449715,
0.45490196347236633, 0.45490196347236633), (0.033613447099924088,
0.45490196347236633, 0.45490196347236633), (0.037815127521753311,
0.45490196347236633, 0.45490196347236633), (0.042016807943582535,
0.45490196347236633, 0.45490196347236633), (0.046218488365411758,
0.45490196347236633, 0.45490196347236633), (0.050420168787240982,
0.45882353186607361, 0.45882353186607361), (0.054621849209070206,
0.45882353186607361, 0.45882353186607361), (0.058823529630899429,
0.45882353186607361, 0.45882353186607361), (0.063025213778018951,
0.45882353186607361, 0.45882353186607361), (0.067226894199848175,
0.45882353186607361, 0.45882353186607361), (0.071428574621677399,
0.46274510025978088, 0.46274510025978088), (0.075630255043506622,
0.46274510025978088, 0.46274510025978088), (0.079831935465335846,
0.46274510025978088, 0.46274510025978088), (0.08403361588716507,
0.46274510025978088, 0.46274510025978088), (0.088235296308994293,
0.46274510025978088, 0.46274510025978088), (0.092436976730823517,
0.46666666865348816, 0.46666666865348816), (0.09663865715265274,
0.46666666865348816, 0.46666666865348816), (0.10084033757448196,
0.46666666865348816, 0.46666666865348816), (0.10504201799631119,
0.46666666865348816, 0.46666666865348816), (0.10924369841814041,
0.46666666865348816, 0.46666666865348816), (0.11344537883996964,
0.47058823704719543, 0.47058823704719543), (0.11764705926179886,
0.47058823704719543, 0.47058823704719543), (0.12184873968362808,
0.47058823704719543, 0.47058823704719543), (0.1260504275560379,
0.47058823704719543, 0.47058823704719543), (0.13025210797786713,
0.47058823704719543, 0.47058823704719543), (0.13445378839969635,
0.47450980544090271, 0.47450980544090271), (0.13865546882152557,
0.47450980544090271, 0.47450980544090271), (0.1428571492433548,
0.47450980544090271, 0.47450980544090271), (0.14705882966518402,
0.47450980544090271, 0.47450980544090271), (0.15126051008701324,
0.47450980544090271, 0.47450980544090271), (0.15546219050884247,
0.47843137383460999, 0.47843137383460999), (0.15966387093067169,
0.47843137383460999, 0.47843137383460999), (0.16386555135250092,
0.47843137383460999, 0.47843137383460999), (0.16806723177433014,
0.47843137383460999, 0.47843137383460999), (0.17226891219615936,
0.47843137383460999, 0.47843137383460999), (0.17647059261798859,
0.48235294222831726, 0.48235294222831726), (0.18067227303981781,
0.48235294222831726, 0.48235294222831726), (0.18487395346164703,
0.48235294222831726, 0.48235294222831726), (0.18907563388347626,
0.48235294222831726, 0.48235294222831726), (0.19327731430530548,
0.48235294222831726, 0.48235294222831726), (0.1974789947271347,
0.48627451062202454, 0.48627451062202454), (0.20168067514896393,
0.48627451062202454, 0.48627451062202454), (0.20588235557079315,
0.48627451062202454, 0.48627451062202454), (0.21008403599262238,
0.48627451062202454, 0.48627451062202454), (0.2142857164144516,
0.48627451062202454, 0.48627451062202454), (0.21848739683628082,
0.49019607901573181, 0.49019607901573181), (0.22268907725811005,
0.49019607901573181, 0.49019607901573181), (0.22689075767993927,
0.49019607901573181, 0.49019607901573181), (0.23109243810176849,
0.49019607901573181, 0.49019607901573181), (0.23529411852359772,
0.49019607901573181, 0.49019607901573181), (0.23949579894542694,
0.49411764740943909, 0.49411764740943909), (0.24369747936725616,
0.49411764740943909, 0.49411764740943909), (0.24789915978908539,
0.49411764740943909, 0.49411764740943909), (0.25210085511207581,
0.49411764740943909, 0.49411764740943909), (0.25630253553390503,
0.49411764740943909, 0.49411764740943909), (0.26050421595573425,
0.49803921580314636, 0.49803921580314636), (0.26470589637756348,
0.49803921580314636, 0.49803921580314636), (0.2689075767993927,
0.49803921580314636, 0.49803921580314636), (0.27310925722122192,
0.49803921580314636, 0.49803921580314636), (0.27731093764305115,
0.49803921580314636, 0.49803921580314636), (0.28151261806488037,
0.50196081399917603, 0.50196081399917603), (0.28571429848670959,
0.49411764740943909, 0.49411764740943909), (0.28991597890853882,
0.49019607901573181, 0.49019607901573181), (0.29411765933036804,
0.48627451062202454, 0.48627451062202454), (0.29831933975219727,
0.48235294222831726, 0.48235294222831726), (0.30252102017402649,
0.47843137383460999, 0.47843137383460999), (0.30672270059585571,
0.47058823704719543, 0.47058823704719543), (0.31092438101768494,
0.46666666865348816, 0.46666666865348816), (0.31512606143951416,
0.46274510025978088, 0.46274510025978088), (0.31932774186134338,
0.45882353186607361, 0.45882353186607361), (0.32352942228317261,
0.45098039507865906, 0.45098039507865906), (0.32773110270500183,
0.44705882668495178, 0.44705882668495178), (0.33193278312683105,
0.44313725829124451, 0.44313725829124451), (0.33613446354866028,
0.43529412150382996, 0.43529412150382996), (0.3403361439704895,
0.43137255311012268, 0.43137255311012268), (0.34453782439231873,
0.42745098471641541, 0.42745098471641541), (0.34873950481414795,
0.42352941632270813, 0.42352941632270813), (0.35294118523597717,
0.41568627953529358, 0.41568627953529358), (0.3571428656578064,
0.4117647111415863, 0.4117647111415863), (0.36134454607963562,
0.40784314274787903, 0.40784314274787903), (0.36554622650146484,
0.40000000596046448, 0.40000000596046448), (0.36974790692329407,
0.3960784375667572, 0.3960784375667572), (0.37394958734512329,
0.39215686917304993, 0.39215686917304993), (0.37815126776695251,
0.38431373238563538, 0.38431373238563538), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.37647059559822083, 0.37647059559822083), (0.39075630903244019,
0.36862745881080627, 0.36862745881080627), (0.39495798945426941,
0.364705890417099, 0.364705890417099), (0.39915966987609863,
0.36078432202339172, 0.36078432202339172), (0.40336135029792786,
0.35294118523597717, 0.35294118523597717), (0.40756303071975708,
0.3490196168422699, 0.3490196168422699), (0.4117647111415863,
0.34509804844856262, 0.34509804844856262), (0.41596639156341553,
0.33725491166114807, 0.33725491166114807), (0.42016807198524475,
0.3333333432674408, 0.3333333432674408), (0.42436975240707397,
0.32941177487373352, 0.32941177487373352), (0.4285714328289032,
0.32156863808631897, 0.32156863808631897), (0.43277311325073242,
0.31764706969261169, 0.31764706969261169), (0.43697479367256165,
0.31372550129890442, 0.31372550129890442), (0.44117647409439087,
0.30588236451148987, 0.30588236451148987), (0.44537815451622009,
0.30196079611778259, 0.30196079611778259), (0.44957983493804932,
0.29803922772407532, 0.29803922772407532), (0.45378151535987854,
0.29019609093666077, 0.29019609093666077), (0.45798319578170776,
0.28627452254295349, 0.28627452254295349), (0.46218487620353699,
0.27843138575553894, 0.27843138575553894), (0.46638655662536621,
0.27450981736183167, 0.27450981736183167), (0.47058823704719543,
0.27843138575553894, 0.27843138575553894), (0.47478991746902466,
0.28235295414924622, 0.28235295414924622), (0.47899159789085388,
0.28235295414924622, 0.28235295414924622), (0.48319327831268311,
0.28627452254295349, 0.28627452254295349), (0.48739495873451233,
0.28627452254295349, 0.28627452254295349), (0.49159663915634155,
0.29019609093666077, 0.29019609093666077), (0.49579831957817078,
0.29411765933036804, 0.29411765933036804), (0.5, 0.29411765933036804,
0.29411765933036804), (0.50420171022415161, 0.29803922772407532,
0.29803922772407532), (0.50840336084365845, 0.29803922772407532,
0.29803922772407532), (0.51260507106781006, 0.30196079611778259,
0.30196079611778259), (0.51680672168731689, 0.30196079611778259,
0.30196079611778259), (0.52100843191146851, 0.30588236451148987,
0.30588236451148987), (0.52521008253097534, 0.30980393290519714,
0.30980393290519714), (0.52941179275512695, 0.30980393290519714,
0.30980393290519714), (0.53361344337463379, 0.31372550129890442,
0.31372550129890442), (0.5378151535987854, 0.31372550129890442,
0.31372550129890442), (0.54201680421829224, 0.31764706969261169,
0.31764706969261169), (0.54621851444244385, 0.32156863808631897,
0.32156863808631897), (0.55042016506195068, 0.32156863808631897,
0.32156863808631897), (0.55462187528610229, 0.32156863808631897,
0.32156863808631897), (0.55882352590560913, 0.32549020648002625,
0.32549020648002625), (0.56302523612976074, 0.32549020648002625,
0.32549020648002625), (0.56722688674926758, 0.32549020648002625,
0.32549020648002625), (0.57142859697341919, 0.32941177487373352,
0.32941177487373352), (0.57563024759292603, 0.32941177487373352,
0.32941177487373352), (0.57983195781707764, 0.32941177487373352,
0.32941177487373352), (0.58403360843658447, 0.3333333432674408,
0.3333333432674408), (0.58823531866073608, 0.3333333432674408,
0.3333333432674408), (0.59243696928024292, 0.3333333432674408,
0.3333333432674408), (0.59663867950439453, 0.33725491166114807,
0.33725491166114807), (0.60084033012390137, 0.33725491166114807,
0.33725491166114807), (0.60504204034805298, 0.33725491166114807,
0.33725491166114807), (0.60924369096755981, 0.34117648005485535,
0.34117648005485535), (0.61344540119171143, 0.34117648005485535,
0.34117648005485535), (0.61764705181121826, 0.34117648005485535,
0.34117648005485535), (0.62184876203536987, 0.34509804844856262,
0.34509804844856262), (0.62605041265487671, 0.34509804844856262,
0.34509804844856262), (0.63025212287902832, 0.34509804844856262,
0.34509804844856262), (0.63445377349853516, 0.3490196168422699,
0.3490196168422699), (0.63865548372268677, 0.3490196168422699,
0.3490196168422699), (0.6428571343421936, 0.3490196168422699,
0.3490196168422699), (0.64705884456634521, 0.35294118523597717,
0.35294118523597717), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.35294118523597717,
0.35294118523597717), (0.6596638560295105, 0.35686275362968445,
0.35686275362968445), (0.66386556625366211, 0.35686275362968445,
0.35686275362968445), (0.66806721687316895, 0.35686275362968445,
0.35686275362968445), (0.67226892709732056, 0.36078432202339172,
0.36078432202339172), (0.67647057771682739, 0.36078432202339172,
0.36078432202339172), (0.680672287940979, 0.36078432202339172,
0.36078432202339172), (0.68487393856048584, 0.364705890417099,
0.364705890417099), (0.68907564878463745, 0.364705890417099,
0.364705890417099), (0.69327729940414429, 0.364705890417099,
0.364705890417099), (0.6974790096282959, 0.36862745881080627,
0.36862745881080627), (0.70168066024780273, 0.36862745881080627,
0.36862745881080627), (0.70588237047195435, 0.36862745881080627,
0.36862745881080627), (0.71008402109146118, 0.37254902720451355,
0.37254902720451355), (0.71428573131561279, 0.37254902720451355,
0.37254902720451355), (0.71848738193511963, 0.37254902720451355,
0.37254902720451355), (0.72268909215927124, 0.37647059559822083,
0.37647059559822083), (0.72689074277877808, 0.37647059559822083,
0.37647059559822083), (0.73109245300292969, 0.3803921639919281,
0.3803921639919281), (0.73529410362243652, 0.3803921639919281,
0.3803921639919281), (0.73949581384658813, 0.3803921639919281,
0.3803921639919281), (0.74369746446609497, 0.38431373238563538,
0.38431373238563538), (0.74789917469024658, 0.38431373238563538,
0.38431373238563538), (0.75210082530975342, 0.38431373238563538,
0.38431373238563538), (0.75630253553390503, 0.38823530077934265,
0.38823530077934265), (0.76050418615341187, 0.38823530077934265,
0.38823530077934265), (0.76470589637756348, 0.38823530077934265,
0.38823530077934265), (0.76890754699707031, 0.39215686917304993,
0.39215686917304993), (0.77310925722122192, 0.39215686917304993,
0.39215686917304993), (0.77731090784072876, 0.39215686917304993,
0.39215686917304993), (0.78151261806488037, 0.3960784375667572,
0.3960784375667572), (0.78571426868438721, 0.3960784375667572,
0.3960784375667572), (0.78991597890853882, 0.40784314274787903,
0.40784314274787903), (0.79411762952804565, 0.41568627953529358,
0.41568627953529358), (0.79831933975219727, 0.42352941632270813,
0.42352941632270813), (0.8025209903717041, 0.43529412150382996,
0.43529412150382996), (0.80672270059585571, 0.44313725829124451,
0.44313725829124451), (0.81092435121536255, 0.45490196347236633,
0.45490196347236633), (0.81512606143951416, 0.46274510025978088,
0.46274510025978088), (0.819327712059021, 0.47450980544090271,
0.47450980544090271), (0.82352942228317261, 0.48235294222831726,
0.48235294222831726), (0.82773107290267944, 0.49411764740943909,
0.49411764740943909), (0.83193278312683105, 0.5058823823928833,
0.5058823823928833), (0.83613443374633789, 0.51372551918029785,
0.51372551918029785), (0.8403361439704895, 0.52549022436141968,
0.52549022436141968), (0.84453779458999634, 0.5372549295425415,
0.5372549295425415), (0.84873950481414795, 0.54509806632995605,
0.54509806632995605), (0.85294115543365479, 0.55686277151107788,
0.55686277151107788), (0.8571428656578064, 0.56862747669219971,
0.56862747669219971), (0.86134451627731323, 0.58039218187332153,
0.58039218187332153), (0.86554622650146484, 0.58823531866073608,
0.58823531866073608), (0.86974787712097168, 0.60000002384185791,
0.60000002384185791), (0.87394958734512329, 0.61176472902297974,
0.61176472902297974), (0.87815123796463013, 0.62352943420410156,
0.62352943420410156), (0.88235294818878174, 0.63529413938522339,
0.63529413938522339), (0.88655459880828857, 0.64705884456634521,
0.64705884456634521), (0.89075630903244019, 0.65882354974746704,
0.65882354974746704), (0.89495795965194702, 0.66666668653488159,
0.66666668653488159), (0.89915966987609863, 0.67843139171600342,
0.67843139171600342), (0.90336132049560547, 0.69019609689712524,
0.69019609689712524), (0.90756303071975708, 0.70196080207824707,
0.70196080207824707), (0.91176468133926392, 0.7137255072593689,
0.7137255072593689), (0.91596639156341553, 0.72549021244049072,
0.72549021244049072), (0.92016804218292236, 0.74117648601531982,
0.74117648601531982), (0.92436975240707397, 0.75294119119644165,
0.75294119119644165), (0.92857140302658081, 0.76470589637756348,
0.76470589637756348), (0.93277311325073242, 0.7764706015586853,
0.7764706015586853), (0.93697476387023926, 0.78823530673980713,
0.78823530673980713), (0.94117647409439087, 0.80000001192092896,
0.80000001192092896), (0.94537812471389771, 0.81176471710205078,
0.81176471710205078), (0.94957983493804932, 0.82745099067687988,
0.82745099067687988), (0.95378148555755615, 0.83921569585800171,
0.83921569585800171), (0.95798319578170776, 0.85098040103912354,
0.85098040103912354), (0.9621848464012146, 0.86274510622024536,
0.86274510622024536), (0.96638655662536621, 0.87843137979507446,
0.87843137979507446), (0.97058820724487305, 0.89019608497619629,
0.89019608497619629), (0.97478991746902466, 0.90196079015731812,
0.90196079015731812), (0.97899156808853149, 0.91764706373214722,
0.91764706373214722), (0.98319327831268311, 0.92941176891326904,
0.92941176891326904), (0.98739492893218994, 0.94509804248809814,
0.94509804248809814), (0.99159663915634155, 0.95686274766921997,
0.95686274766921997), (0.99579828977584839, 0.97254902124404907,
0.97254902124404907), (1.0, 0.9843137264251709, 0.9843137264251709)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.011764706112444401, 0.011764706112444401),
(0.037815127521753311, 0.023529412224888802, 0.023529412224888802),
(0.042016807943582535, 0.031372550874948502, 0.031372550874948502),
(0.046218488365411758, 0.043137256056070328, 0.043137256056070328),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.062745101749897003, 0.062745101749897003),
(0.058823529630899429, 0.070588238537311554, 0.070588238537311554),
(0.063025213778018951, 0.08235294371843338, 0.08235294371843338),
(0.067226894199848175, 0.090196080505847931, 0.090196080505847931),
(0.071428574621677399, 0.10196078568696976, 0.10196078568696976),
(0.075630255043506622, 0.10980392247438431, 0.10980392247438431),
(0.079831935465335846, 0.12156862765550613, 0.12156862765550613),
(0.08403361588716507, 0.12941177189350128, 0.12941177189350128),
(0.088235296308994293, 0.14117647707462311, 0.14117647707462311),
(0.092436976730823517, 0.14901961386203766, 0.14901961386203766),
(0.09663865715265274, 0.16078431904315948, 0.16078431904315948),
(0.10084033757448196, 0.16862745583057404, 0.16862745583057404),
(0.10504201799631119, 0.17647059261798859, 0.17647059261798859),
(0.10924369841814041, 0.18823529779911041, 0.18823529779911041),
(0.11344537883996964, 0.19607843458652496, 0.19607843458652496),
(0.11764705926179886, 0.20392157137393951, 0.20392157137393951),
(0.12184873968362808, 0.21568627655506134, 0.21568627655506134),
(0.1260504275560379, 0.22352941334247589, 0.22352941334247589),
(0.13025210797786713, 0.23137255012989044, 0.23137255012989044),
(0.13445378839969635, 0.23921568691730499, 0.23921568691730499),
(0.13865546882152557, 0.25098040699958801, 0.25098040699958801),
(0.1428571492433548, 0.25882354378700256, 0.25882354378700256),
(0.14705882966518402, 0.26666668057441711, 0.26666668057441711),
(0.15126051008701324, 0.27450981736183167, 0.27450981736183167),
(0.15546219050884247, 0.28235295414924622, 0.28235295414924622),
(0.15966387093067169, 0.29019609093666077, 0.29019609093666077),
(0.16386555135250092, 0.30196079611778259, 0.30196079611778259),
(0.16806723177433014, 0.30980393290519714, 0.30980393290519714),
(0.17226891219615936, 0.31764706969261169, 0.31764706969261169),
(0.17647059261798859, 0.32549020648002625, 0.32549020648002625),
(0.18067227303981781, 0.3333333432674408, 0.3333333432674408),
(0.18487395346164703, 0.34117648005485535, 0.34117648005485535),
(0.18907563388347626, 0.3490196168422699, 0.3490196168422699),
(0.19327731430530548, 0.35686275362968445, 0.35686275362968445),
(0.1974789947271347, 0.364705890417099, 0.364705890417099),
(0.20168067514896393, 0.37254902720451355, 0.37254902720451355),
(0.20588235557079315, 0.3803921639919281, 0.3803921639919281),
(0.21008403599262238, 0.38823530077934265, 0.38823530077934265),
(0.2142857164144516, 0.39215686917304993, 0.39215686917304993),
(0.21848739683628082, 0.40000000596046448, 0.40000000596046448),
(0.22268907725811005, 0.40784314274787903, 0.40784314274787903),
(0.22689075767993927, 0.41568627953529358, 0.41568627953529358),
(0.23109243810176849, 0.42352941632270813, 0.42352941632270813),
(0.23529411852359772, 0.42745098471641541, 0.42745098471641541),
(0.23949579894542694, 0.43529412150382996, 0.43529412150382996),
(0.24369747936725616, 0.44313725829124451, 0.44313725829124451),
(0.24789915978908539, 0.45098039507865906, 0.45098039507865906),
(0.25210085511207581, 0.45490196347236633, 0.45490196347236633),
(0.25630253553390503, 0.46274510025978088, 0.46274510025978088),
(0.26050421595573425, 0.47058823704719543, 0.47058823704719543),
(0.26470589637756348, 0.47450980544090271, 0.47450980544090271),
(0.2689075767993927, 0.48235294222831726, 0.48235294222831726),
(0.27310925722122192, 0.49019607901573181, 0.49019607901573181),
(0.27731093764305115, 0.49411764740943909, 0.49411764740943909),
(0.28151261806488037, 0.50196081399917603, 0.50196081399917603),
(0.28571429848670959, 0.50196081399917603, 0.50196081399917603),
(0.28991597890853882, 0.5058823823928833, 0.5058823823928833),
(0.29411765933036804, 0.5058823823928833, 0.5058823823928833),
(0.29831933975219727, 0.50980395078659058, 0.50980395078659058),
(0.30252102017402649, 0.51372551918029785, 0.51372551918029785),
(0.30672270059585571, 0.51372551918029785, 0.51372551918029785),
(0.31092438101768494, 0.51764708757400513, 0.51764708757400513),
(0.31512606143951416, 0.5215686559677124, 0.5215686559677124),
(0.31932774186134338, 0.5215686559677124, 0.5215686559677124),
(0.32352942228317261, 0.52549022436141968, 0.52549022436141968),
(0.32773110270500183, 0.52549022436141968, 0.52549022436141968),
(0.33193278312683105, 0.52941179275512695, 0.52941179275512695),
(0.33613446354866028, 0.53333336114883423, 0.53333336114883423),
(0.3403361439704895, 0.53333336114883423, 0.53333336114883423),
(0.34453782439231873, 0.5372549295425415, 0.5372549295425415),
(0.34873950481414795, 0.54117649793624878, 0.54117649793624878),
(0.35294118523597717, 0.54117649793624878, 0.54117649793624878),
(0.3571428656578064, 0.54509806632995605, 0.54509806632995605),
(0.36134454607963562, 0.54901963472366333, 0.54901963472366333),
(0.36554622650146484, 0.54901963472366333, 0.54901963472366333),
(0.36974790692329407, 0.55294120311737061, 0.55294120311737061),
(0.37394958734512329, 0.55294120311737061, 0.55294120311737061),
(0.37815126776695251, 0.55686277151107788, 0.55686277151107788),
(0.38235294818878174, 0.56078433990478516, 0.56078433990478516),
(0.38655462861061096, 0.56078433990478516, 0.56078433990478516),
(0.39075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.39495798945426941, 0.56862747669219971, 0.56862747669219971),
(0.39915966987609863, 0.56862747669219971, 0.56862747669219971),
(0.40336135029792786, 0.57254904508590698, 0.57254904508590698),
(0.40756303071975708, 0.57254904508590698, 0.57254904508590698),
(0.4117647111415863, 0.57647061347961426, 0.57647061347961426),
(0.41596639156341553, 0.58039218187332153, 0.58039218187332153),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.58431375026702881, 0.58431375026702881),
(0.4285714328289032, 0.58823531866073608, 0.58823531866073608),
(0.43277311325073242, 0.58823531866073608, 0.58823531866073608),
(0.43697479367256165, 0.59215688705444336, 0.59215688705444336),
(0.44117647409439087, 0.59215688705444336, 0.59215688705444336),
(0.44537815451622009, 0.59607845544815063, 0.59607845544815063),
(0.44957983493804932, 0.60000002384185791, 0.60000002384185791),
(0.45378151535987854, 0.60000002384185791, 0.60000002384185791),
(0.45798319578170776, 0.60392159223556519, 0.60392159223556519),
(0.46218487620353699, 0.60784316062927246, 0.60784316062927246),
(0.46638655662536621, 0.60784316062927246, 0.60784316062927246),
(0.47058823704719543, 0.61176472902297974, 0.61176472902297974),
(0.47478991746902466, 0.61176472902297974, 0.61176472902297974),
(0.47899159789085388, 0.61568629741668701, 0.61568629741668701),
(0.48319327831268311, 0.61960786581039429, 0.61960786581039429),
(0.48739495873451233, 0.61960786581039429, 0.61960786581039429),
(0.49159663915634155, 0.62352943420410156, 0.62352943420410156),
(0.49579831957817078, 0.62745100259780884, 0.62745100259780884), (0.5,
0.62745100259780884, 0.62745100259780884), (0.50420171022415161,
0.63137257099151611, 0.63137257099151611), (0.50840336084365845,
0.63137257099151611, 0.63137257099151611), (0.51260507106781006,
0.63529413938522339, 0.63529413938522339), (0.51680672168731689,
0.63921570777893066, 0.63921570777893066), (0.52100843191146851,
0.63921570777893066, 0.63921570777893066), (0.52521008253097534,
0.64313727617263794, 0.64313727617263794), (0.52941179275512695,
0.64705884456634521, 0.64705884456634521), (0.53361344337463379,
0.64705884456634521, 0.64705884456634521), (0.5378151535987854,
0.65098041296005249, 0.65098041296005249), (0.54201680421829224,
0.65098041296005249, 0.65098041296005249), (0.54621851444244385,
0.65490198135375977, 0.65490198135375977), (0.55042016506195068,
0.65882354974746704, 0.65882354974746704), (0.55462187528610229,
0.65882354974746704, 0.65882354974746704), (0.55882352590560913,
0.65882354974746704, 0.65882354974746704), (0.56302523612976074,
0.66274511814117432, 0.66274511814117432), (0.56722688674926758,
0.66274511814117432, 0.66274511814117432), (0.57142859697341919,
0.66666668653488159, 0.66666668653488159), (0.57563024759292603,
0.66666668653488159, 0.66666668653488159), (0.57983195781707764,
0.67058825492858887, 0.67058825492858887), (0.58403360843658447,
0.67058825492858887, 0.67058825492858887), (0.58823531866073608,
0.67450982332229614, 0.67450982332229614), (0.59243696928024292,
0.67450982332229614, 0.67450982332229614), (0.59663867950439453,
0.67450982332229614, 0.67450982332229614), (0.60084033012390137,
0.67843139171600342, 0.67843139171600342), (0.60504204034805298,
0.67843139171600342, 0.67843139171600342), (0.60924369096755981,
0.68235296010971069, 0.68235296010971069), (0.61344540119171143,
0.68235296010971069, 0.68235296010971069), (0.61764705181121826,
0.68627452850341797, 0.68627452850341797), (0.62184876203536987,
0.68627452850341797, 0.68627452850341797), (0.62605041265487671,
0.68627452850341797, 0.68627452850341797), (0.63025212287902832,
0.69019609689712524, 0.69019609689712524), (0.63445377349853516,
0.69019609689712524, 0.69019609689712524), (0.63865548372268677,
0.69411766529083252, 0.69411766529083252), (0.6428571343421936,
0.69411766529083252, 0.69411766529083252), (0.64705884456634521,
0.69803923368453979, 0.69803923368453979), (0.65126049518585205,
0.69803923368453979, 0.69803923368453979), (0.65546220541000366,
0.70196080207824707, 0.70196080207824707), (0.6596638560295105,
0.70196080207824707, 0.70196080207824707), (0.66386556625366211,
0.70196080207824707, 0.70196080207824707), (0.66806721687316895,
0.70588237047195435, 0.70588237047195435), (0.67226892709732056,
0.70588237047195435, 0.70588237047195435), (0.67647057771682739,
0.70980393886566162, 0.70980393886566162), (0.680672287940979,
0.70980393886566162, 0.70980393886566162), (0.68487393856048584,
0.7137255072593689, 0.7137255072593689), (0.68907564878463745,
0.7137255072593689, 0.7137255072593689), (0.69327729940414429,
0.71764707565307617, 0.71764707565307617), (0.6974790096282959,
0.71764707565307617, 0.71764707565307617), (0.70168066024780273,
0.7137255072593689, 0.7137255072593689), (0.70588237047195435,
0.70980393886566162, 0.70980393886566162), (0.71008402109146118,
0.70980393886566162, 0.70980393886566162), (0.71428573131561279,
0.70588237047195435, 0.70588237047195435), (0.71848738193511963,
0.70196080207824707, 0.70196080207824707), (0.72268909215927124,
0.69803923368453979, 0.69803923368453979), (0.72689074277877808,
0.69411766529083252, 0.69411766529083252), (0.73109245300292969,
0.69019609689712524, 0.69019609689712524), (0.73529410362243652,
0.68627452850341797, 0.68627452850341797), (0.73949581384658813,
0.68235296010971069, 0.68235296010971069), (0.74369746446609497,
0.67843139171600342, 0.67843139171600342), (0.74789917469024658,
0.67450982332229614, 0.67450982332229614), (0.75210082530975342,
0.67058825492858887, 0.67058825492858887), (0.75630253553390503,
0.66666668653488159, 0.66666668653488159), (0.76050418615341187,
0.66274511814117432, 0.66274511814117432), (0.76470589637756348,
0.65882354974746704, 0.65882354974746704), (0.76890754699707031,
0.65490198135375977, 0.65490198135375977), (0.77310925722122192,
0.65098041296005249, 0.65098041296005249), (0.77731090784072876,
0.64705884456634521, 0.64705884456634521), (0.78151261806488037,
0.64313727617263794, 0.64313727617263794), (0.78571426868438721,
0.63921570777893066, 0.63921570777893066), (0.78991597890853882,
0.63921570777893066, 0.63921570777893066), (0.79411762952804565,
0.64313727617263794, 0.64313727617263794), (0.79831933975219727,
0.64313727617263794, 0.64313727617263794), (0.8025209903717041,
0.64705884456634521, 0.64705884456634521), (0.80672270059585571,
0.64705884456634521, 0.64705884456634521), (0.81092435121536255,
0.65098041296005249, 0.65098041296005249), (0.81512606143951416,
0.65490198135375977, 0.65490198135375977), (0.819327712059021,
0.65490198135375977, 0.65490198135375977), (0.82352942228317261,
0.65882354974746704, 0.65882354974746704), (0.82773107290267944,
0.66274511814117432, 0.66274511814117432), (0.83193278312683105,
0.66666668653488159, 0.66666668653488159), (0.83613443374633789,
0.67058825492858887, 0.67058825492858887), (0.8403361439704895,
0.67450982332229614, 0.67450982332229614), (0.84453779458999634,
0.67843139171600342, 0.67843139171600342), (0.84873950481414795,
0.68235296010971069, 0.68235296010971069), (0.85294115543365479,
0.68627452850341797, 0.68627452850341797), (0.8571428656578064,
0.69019609689712524, 0.69019609689712524), (0.86134451627731323,
0.69411766529083252, 0.69411766529083252), (0.86554622650146484,
0.69803923368453979, 0.69803923368453979), (0.86974787712097168,
0.70196080207824707, 0.70196080207824707), (0.87394958734512329,
0.70980393886566162, 0.70980393886566162), (0.87815123796463013,
0.7137255072593689, 0.7137255072593689), (0.88235294818878174,
0.72156864404678345, 0.72156864404678345), (0.88655459880828857,
0.72549021244049072, 0.72549021244049072), (0.89075630903244019,
0.73333334922790527, 0.73333334922790527), (0.89495795965194702,
0.73725491762161255, 0.73725491762161255), (0.89915966987609863,
0.7450980544090271, 0.7450980544090271), (0.90336132049560547,
0.75294119119644165, 0.75294119119644165), (0.90756303071975708,
0.7607843279838562, 0.7607843279838562), (0.91176468133926392,
0.76862746477127075, 0.76862746477127075), (0.91596639156341553,
0.7764706015586853, 0.7764706015586853), (0.92016804218292236,
0.78431373834609985, 0.78431373834609985), (0.92436975240707397,
0.7921568751335144, 0.7921568751335144), (0.92857140302658081,
0.80000001192092896, 0.80000001192092896), (0.93277311325073242,
0.80784314870834351, 0.80784314870834351), (0.93697476387023926,
0.81568628549575806, 0.81568628549575806), (0.94117647409439087,
0.82745099067687988, 0.82745099067687988), (0.94537812471389771,
0.83529412746429443, 0.83529412746429443), (0.94957983493804932,
0.84313726425170898, 0.84313726425170898), (0.95378148555755615,
0.85490196943283081, 0.85490196943283081), (0.95798319578170776,
0.86666667461395264, 0.86666667461395264), (0.9621848464012146,
0.87450981140136719, 0.87450981140136719), (0.96638655662536621,
0.88627451658248901, 0.88627451658248901), (0.97058820724487305,
0.89803922176361084, 0.89803922176361084), (0.97478991746902466,
0.90980392694473267, 0.90980392694473267), (0.97899156808853149,
0.92156863212585449, 0.92156863212585449), (0.98319327831268311,
0.93333333730697632, 0.93333333730697632), (0.98739492893218994,
0.94509804248809814, 0.94509804248809814), (0.99159663915634155,
0.95686274766921997, 0.95686274766921997), (0.99579828977584839,
0.97254902124404907, 0.97254902124404907), (1.0, 0.9843137264251709,
0.9843137264251709)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0, 0.0), (0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0,
0.0), (0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.0, 0.0), (0.037815127521753311,
0.0039215688593685627, 0.0039215688593685627), (0.042016807943582535,
0.0078431377187371254, 0.0078431377187371254), (0.046218488365411758,
0.0078431377187371254, 0.0078431377187371254), (0.050420168787240982,
0.011764706112444401, 0.011764706112444401), (0.054621849209070206,
0.015686275437474251, 0.015686275437474251), (0.058823529630899429,
0.019607843831181526, 0.019607843831181526), (0.063025213778018951,
0.019607843831181526, 0.019607843831181526), (0.067226894199848175,
0.023529412224888802, 0.023529412224888802), (0.071428574621677399,
0.027450980618596077, 0.027450980618596077), (0.075630255043506622,
0.031372550874948502, 0.031372550874948502), (0.079831935465335846,
0.031372550874948502, 0.031372550874948502), (0.08403361588716507,
0.035294119268655777, 0.035294119268655777), (0.088235296308994293,
0.039215687662363052, 0.039215687662363052), (0.092436976730823517,
0.043137256056070328, 0.043137256056070328), (0.09663865715265274,
0.043137256056070328, 0.043137256056070328), (0.10084033757448196,
0.047058824449777603, 0.047058824449777603), (0.10504201799631119,
0.050980392843484879, 0.050980392843484879), (0.10924369841814041,
0.054901961237192154, 0.054901961237192154), (0.11344537883996964,
0.058823529630899429, 0.058823529630899429), (0.11764705926179886,
0.058823529630899429, 0.058823529630899429), (0.12184873968362808,
0.062745101749897003, 0.062745101749897003), (0.1260504275560379,
0.066666670143604279, 0.066666670143604279), (0.13025210797786713,
0.070588238537311554, 0.070588238537311554), (0.13445378839969635,
0.070588238537311554, 0.070588238537311554), (0.13865546882152557,
0.074509806931018829, 0.074509806931018829), (0.1428571492433548,
0.078431375324726105, 0.078431375324726105), (0.14705882966518402,
0.08235294371843338, 0.08235294371843338), (0.15126051008701324,
0.086274512112140656, 0.086274512112140656), (0.15546219050884247,
0.086274512112140656, 0.086274512112140656), (0.15966387093067169,
0.090196080505847931, 0.090196080505847931), (0.16386555135250092,
0.094117648899555206, 0.094117648899555206), (0.16806723177433014,
0.098039217293262482, 0.098039217293262482), (0.17226891219615936,
0.10196078568696976, 0.10196078568696976), (0.17647059261798859,
0.10196078568696976, 0.10196078568696976), (0.18067227303981781,
0.10588235408067703, 0.10588235408067703), (0.18487395346164703,
0.10980392247438431, 0.10980392247438431), (0.18907563388347626,
0.11372549086809158, 0.11372549086809158), (0.19327731430530548,
0.11764705926179886, 0.11764705926179886), (0.1974789947271347,
0.12156862765550613, 0.12156862765550613), (0.20168067514896393,
0.12156862765550613, 0.12156862765550613), (0.20588235557079315,
0.12549020349979401, 0.12549020349979401), (0.21008403599262238,
0.12941177189350128, 0.12941177189350128), (0.2142857164144516,
0.13333334028720856, 0.13333334028720856), (0.21848739683628082,
0.13725490868091583, 0.13725490868091583), (0.22268907725811005,
0.14117647707462311, 0.14117647707462311), (0.22689075767993927,
0.14117647707462311, 0.14117647707462311), (0.23109243810176849,
0.14509804546833038, 0.14509804546833038), (0.23529411852359772,
0.14901961386203766, 0.14901961386203766), (0.23949579894542694,
0.15294118225574493, 0.15294118225574493), (0.24369747936725616,
0.15686275064945221, 0.15686275064945221), (0.24789915978908539,
0.16078431904315948, 0.16078431904315948), (0.25210085511207581,
0.16078431904315948, 0.16078431904315948), (0.25630253553390503,
0.16470588743686676, 0.16470588743686676), (0.26050421595573425,
0.16862745583057404, 0.16862745583057404), (0.26470589637756348,
0.17254902422428131, 0.17254902422428131), (0.2689075767993927,
0.17647059261798859, 0.17647059261798859), (0.27310925722122192,
0.18039216101169586, 0.18039216101169586), (0.27731093764305115,
0.18431372940540314, 0.18431372940540314), (0.28151261806488037,
0.18823529779911041, 0.18823529779911041), (0.28571429848670959,
0.18823529779911041, 0.18823529779911041), (0.28991597890853882,
0.18823529779911041, 0.18823529779911041), (0.29411765933036804,
0.19215686619281769, 0.19215686619281769), (0.29831933975219727,
0.19215686619281769, 0.19215686619281769), (0.30252102017402649,
0.19607843458652496, 0.19607843458652496), (0.30672270059585571,
0.19607843458652496, 0.19607843458652496), (0.31092438101768494,
0.20000000298023224, 0.20000000298023224), (0.31512606143951416,
0.20000000298023224, 0.20000000298023224), (0.31932774186134338,
0.20392157137393951, 0.20392157137393951), (0.32352942228317261,
0.20392157137393951, 0.20392157137393951), (0.32773110270500183,
0.20784313976764679, 0.20784313976764679), (0.33193278312683105,
0.20784313976764679, 0.20784313976764679), (0.33613446354866028,
0.21176470816135406, 0.21176470816135406), (0.3403361439704895,
0.21176470816135406, 0.21176470816135406), (0.34453782439231873,
0.21568627655506134, 0.21568627655506134), (0.34873950481414795,
0.21568627655506134, 0.21568627655506134), (0.35294118523597717,
0.21960784494876862, 0.21960784494876862), (0.3571428656578064,
0.21960784494876862, 0.21960784494876862), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.22352941334247589, 0.22352941334247589), (0.36974790692329407,
0.22745098173618317, 0.22745098173618317), (0.37394958734512329,
0.22745098173618317, 0.22745098173618317), (0.37815126776695251,
0.23137255012989044, 0.23137255012989044), (0.38235294818878174,
0.23137255012989044, 0.23137255012989044), (0.38655462861061096,
0.23529411852359772, 0.23529411852359772), (0.39075630903244019,
0.23921568691730499, 0.23921568691730499), (0.39495798945426941,
0.23921568691730499, 0.23921568691730499), (0.39915966987609863,
0.24313725531101227, 0.24313725531101227), (0.40336135029792786,
0.24313725531101227, 0.24313725531101227), (0.40756303071975708,
0.24705882370471954, 0.24705882370471954), (0.4117647111415863,
0.24705882370471954, 0.24705882370471954), (0.41596639156341553,
0.25098040699958801, 0.25098040699958801), (0.42016807198524475,
0.25098040699958801, 0.25098040699958801), (0.42436975240707397,
0.25490197539329529, 0.25490197539329529), (0.4285714328289032,
0.25490197539329529, 0.25490197539329529), (0.43277311325073242,
0.25882354378700256, 0.25882354378700256), (0.43697479367256165,
0.26274511218070984, 0.26274511218070984), (0.44117647409439087,
0.26274511218070984, 0.26274511218070984), (0.44537815451622009,
0.26666668057441711, 0.26666668057441711), (0.44957983493804932,
0.26666668057441711, 0.26666668057441711), (0.45378151535987854,
0.27058824896812439, 0.27058824896812439), (0.45798319578170776,
0.27058824896812439, 0.27058824896812439), (0.46218487620353699,
0.27450981736183167, 0.27450981736183167), (0.46638655662536621,
0.27843138575553894, 0.27843138575553894), (0.47058823704719543,
0.28627452254295349, 0.28627452254295349), (0.47478991746902466,
0.29803922772407532, 0.29803922772407532), (0.47899159789085388,
0.30588236451148987, 0.30588236451148987), (0.48319327831268311,
0.31764706969261169, 0.31764706969261169), (0.48739495873451233,
0.32549020648002625, 0.32549020648002625), (0.49159663915634155,
0.33725491166114807, 0.33725491166114807), (0.49579831957817078,
0.34509804844856262, 0.34509804844856262), (0.5, 0.35686275362968445,
0.35686275362968445), (0.50420171022415161, 0.36862745881080627,
0.36862745881080627), (0.50840336084365845, 0.37647059559822083,
0.37647059559822083), (0.51260507106781006, 0.38823530077934265,
0.38823530077934265), (0.51680672168731689, 0.3960784375667572,
0.3960784375667572), (0.52100843191146851, 0.40784314274787903,
0.40784314274787903), (0.52521008253097534, 0.41568627953529358,
0.41568627953529358), (0.52941179275512695, 0.42745098471641541,
0.42745098471641541), (0.53361344337463379, 0.43529412150382996,
0.43529412150382996), (0.5378151535987854, 0.44705882668495178,
0.44705882668495178), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.46666666865348816,
0.46666666865348816), (0.55042016506195068, 0.47450980544090271,
0.47450980544090271), (0.55462187528610229, 0.47843137383460999,
0.47843137383460999), (0.55882352590560913, 0.48627451062202454,
0.48627451062202454), (0.56302523612976074, 0.49411764740943909,
0.49411764740943909), (0.56722688674926758, 0.50196081399917603,
0.50196081399917603), (0.57142859697341919, 0.5058823823928833,
0.5058823823928833), (0.57563024759292603, 0.51372551918029785,
0.51372551918029785), (0.57983195781707764, 0.5215686559677124,
0.5215686559677124), (0.58403360843658447, 0.52941179275512695,
0.52941179275512695), (0.58823531866073608, 0.53333336114883423,
0.53333336114883423), (0.59243696928024292, 0.54117649793624878,
0.54117649793624878), (0.59663867950439453, 0.54901963472366333,
0.54901963472366333), (0.60084033012390137, 0.55294120311737061,
0.55294120311737061), (0.60504204034805298, 0.56078433990478516,
0.56078433990478516), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.57647061347961426,
0.57647061347961426), (0.61764705181121826, 0.58431375026702881,
0.58431375026702881), (0.62184876203536987, 0.58823531866073608,
0.58823531866073608), (0.62605041265487671, 0.59607845544815063,
0.59607845544815063), (0.63025212287902832, 0.60392159223556519,
0.60392159223556519), (0.63445377349853516, 0.61176472902297974,
0.61176472902297974), (0.63865548372268677, 0.61568629741668701,
0.61568629741668701), (0.6428571343421936, 0.62352943420410156,
0.62352943420410156), (0.64705884456634521, 0.63137257099151611,
0.63137257099151611), (0.65126049518585205, 0.63921570777893066,
0.63921570777893066), (0.65546220541000366, 0.64705884456634521,
0.64705884456634521), (0.6596638560295105, 0.65098041296005249,
0.65098041296005249), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67450982332229614,
0.67450982332229614), (0.67647057771682739, 0.68235296010971069,
0.68235296010971069), (0.680672287940979, 0.68627452850341797,
0.68627452850341797), (0.68487393856048584, 0.69411766529083252,
0.69411766529083252), (0.68907564878463745, 0.70196080207824707,
0.70196080207824707), (0.69327729940414429, 0.70980393886566162,
0.70980393886566162), (0.6974790096282959, 0.71764707565307617,
0.71764707565307617), (0.70168066024780273, 0.71764707565307617,
0.71764707565307617), (0.70588237047195435, 0.72156864404678345,
0.72156864404678345), (0.71008402109146118, 0.72156864404678345,
0.72156864404678345), (0.71428573131561279, 0.72549021244049072,
0.72549021244049072), (0.71848738193511963, 0.72549021244049072,
0.72549021244049072), (0.72268909215927124, 0.729411780834198,
0.729411780834198), (0.72689074277877808, 0.729411780834198,
0.729411780834198), (0.73109245300292969, 0.73333334922790527,
0.73333334922790527), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.73725491762161255,
0.73725491762161255), (0.75210082530975342, 0.74117648601531982,
0.74117648601531982), (0.75630253553390503, 0.74117648601531982,
0.74117648601531982), (0.76050418615341187, 0.7450980544090271,
0.7450980544090271), (0.76470589637756348, 0.7450980544090271,
0.7450980544090271), (0.76890754699707031, 0.7450980544090271,
0.7450980544090271), (0.77310925722122192, 0.74901962280273438,
0.74901962280273438), (0.77731090784072876, 0.74901962280273438,
0.74901962280273438), (0.78151261806488037, 0.75294119119644165,
0.75294119119644165), (0.78571426868438721, 0.75294119119644165,
0.75294119119644165), (0.78991597890853882, 0.75686275959014893,
0.75686275959014893), (0.79411762952804565, 0.76470589637756348,
0.76470589637756348), (0.79831933975219727, 0.76862746477127075,
0.76862746477127075), (0.8025209903717041, 0.77254903316497803,
0.77254903316497803), (0.80672270059585571, 0.7764706015586853,
0.7764706015586853), (0.81092435121536255, 0.78039216995239258,
0.78039216995239258), (0.81512606143951416, 0.78823530673980713,
0.78823530673980713), (0.819327712059021, 0.7921568751335144,
0.7921568751335144), (0.82352942228317261, 0.79607844352722168,
0.79607844352722168), (0.82773107290267944, 0.80000001192092896,
0.80000001192092896), (0.83193278312683105, 0.80392158031463623,
0.80392158031463623), (0.83613443374633789, 0.81176471710205078,
0.81176471710205078), (0.8403361439704895, 0.81568628549575806,
0.81568628549575806), (0.84453779458999634, 0.81960785388946533,
0.81960785388946533), (0.84873950481414795, 0.82352942228317261,
0.82352942228317261), (0.85294115543365479, 0.82745099067687988,
0.82745099067687988), (0.8571428656578064, 0.83529412746429443,
0.83529412746429443), (0.86134451627731323, 0.83921569585800171,
0.83921569585800171), (0.86554622650146484, 0.84313726425170898,
0.84313726425170898), (0.86974787712097168, 0.84705883264541626,
0.84705883264541626), (0.87394958734512329, 0.85098040103912354,
0.85098040103912354), (0.87815123796463013, 0.85882353782653809,
0.85882353782653809), (0.88235294818878174, 0.86274510622024536,
0.86274510622024536), (0.88655459880828857, 0.86666667461395264,
0.86666667461395264), (0.89075630903244019, 0.87058824300765991,
0.87058824300765991), (0.89495795965194702, 0.87450981140136719,
0.87450981140136719), (0.89915966987609863, 0.88235294818878174,
0.88235294818878174), (0.90336132049560547, 0.88627451658248901,
0.88627451658248901), (0.90756303071975708, 0.89019608497619629,
0.89019608497619629), (0.91176468133926392, 0.89411765336990356,
0.89411765336990356), (0.91596639156341553, 0.89803922176361084,
0.89803922176361084), (0.92016804218292236, 0.90588235855102539,
0.90588235855102539), (0.92436975240707397, 0.90980392694473267,
0.90980392694473267), (0.92857140302658081, 0.91372549533843994,
0.91372549533843994), (0.93277311325073242, 0.91764706373214722,
0.91764706373214722), (0.93697476387023926, 0.92156863212585449,
0.92156863212585449), (0.94117647409439087, 0.92941176891326904,
0.92941176891326904), (0.94537812471389771, 0.93333333730697632,
0.93333333730697632), (0.94957983493804932, 0.93725490570068359,
0.93725490570068359), (0.95378148555755615, 0.94117647409439087,
0.94117647409439087), (0.95798319578170776, 0.94509804248809814,
0.94509804248809814), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.95686274766921997,
0.95686274766921997), (0.97058820724487305, 0.96078431606292725,
0.96078431606292725), (0.97478991746902466, 0.96470588445663452,
0.96470588445663452), (0.97899156808853149, 0.9686274528503418,
0.9686274528503418), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_gray_data = {'blue': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.0039215688593685627, 0.0039215688593685627), (0.0084033617749810219,
0.0078431377187371254, 0.0078431377187371254), (0.012605042196810246,
0.011764706112444401, 0.011764706112444401), (0.016806723549962044,
0.015686275437474251, 0.015686275437474251), (0.021008403971791267,
0.019607843831181526, 0.019607843831181526), (0.025210084393620491,
0.023529412224888802, 0.023529412224888802), (0.029411764815449715,
0.027450980618596077, 0.027450980618596077), (0.033613447099924088,
0.035294119268655777, 0.035294119268655777), (0.037815127521753311,
0.039215687662363052, 0.039215687662363052), (0.042016807943582535,
0.043137256056070328, 0.043137256056070328), (0.046218488365411758,
0.047058824449777603, 0.047058824449777603), (0.050420168787240982,
0.050980392843484879, 0.050980392843484879), (0.054621849209070206,
0.054901961237192154, 0.054901961237192154), (0.058823529630899429,
0.058823529630899429, 0.058823529630899429), (0.063025213778018951,
0.062745101749897003, 0.062745101749897003), (0.067226894199848175,
0.066666670143604279, 0.066666670143604279), (0.071428574621677399,
0.070588238537311554, 0.070588238537311554), (0.075630255043506622,
0.074509806931018829, 0.074509806931018829), (0.079831935465335846,
0.078431375324726105, 0.078431375324726105), (0.08403361588716507,
0.08235294371843338, 0.08235294371843338), (0.088235296308994293,
0.086274512112140656, 0.086274512112140656), (0.092436976730823517,
0.090196080505847931, 0.090196080505847931), (0.09663865715265274,
0.098039217293262482, 0.098039217293262482), (0.10084033757448196,
0.10196078568696976, 0.10196078568696976), (0.10504201799631119,
0.10588235408067703, 0.10588235408067703), (0.10924369841814041,
0.10980392247438431, 0.10980392247438431), (0.11344537883996964,
0.11372549086809158, 0.11372549086809158), (0.11764705926179886,
0.11764705926179886, 0.11764705926179886), (0.12184873968362808,
0.12156862765550613, 0.12156862765550613), (0.1260504275560379,
0.12549020349979401, 0.12549020349979401), (0.13025210797786713,
0.12941177189350128, 0.12941177189350128), (0.13445378839969635,
0.13333334028720856, 0.13333334028720856), (0.13865546882152557,
0.13725490868091583, 0.13725490868091583), (0.1428571492433548,
0.14117647707462311, 0.14117647707462311), (0.14705882966518402,
0.14509804546833038, 0.14509804546833038), (0.15126051008701324,
0.14901961386203766, 0.14901961386203766), (0.15546219050884247,
0.15294118225574493, 0.15294118225574493), (0.15966387093067169,
0.16078431904315948, 0.16078431904315948), (0.16386555135250092,
0.16470588743686676, 0.16470588743686676), (0.16806723177433014,
0.16862745583057404, 0.16862745583057404), (0.17226891219615936,
0.17254902422428131, 0.17254902422428131), (0.17647059261798859,
0.17647059261798859, 0.17647059261798859), (0.18067227303981781,
0.18039216101169586, 0.18039216101169586), (0.18487395346164703,
0.18431372940540314, 0.18431372940540314), (0.18907563388347626,
0.18823529779911041, 0.18823529779911041), (0.19327731430530548,
0.19215686619281769, 0.19215686619281769), (0.1974789947271347,
0.19607843458652496, 0.19607843458652496), (0.20168067514896393,
0.20000000298023224, 0.20000000298023224), (0.20588235557079315,
0.20392157137393951, 0.20392157137393951), (0.21008403599262238,
0.20784313976764679, 0.20784313976764679), (0.2142857164144516,
0.21176470816135406, 0.21176470816135406), (0.21848739683628082,
0.21568627655506134, 0.21568627655506134), (0.22268907725811005,
0.22352941334247589, 0.22352941334247589), (0.22689075767993927,
0.22745098173618317, 0.22745098173618317), (0.23109243810176849,
0.23137255012989044, 0.23137255012989044), (0.23529411852359772,
0.23529411852359772, 0.23529411852359772), (0.23949579894542694,
0.23921568691730499, 0.23921568691730499), (0.24369747936725616,
0.24313725531101227, 0.24313725531101227), (0.24789915978908539,
0.24705882370471954, 0.24705882370471954), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28627452254295349, 0.28627452254295349), (0.28991597890853882,
0.29019609093666077, 0.29019609093666077), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.3490196168422699, 0.3490196168422699), (0.35294118523597717,
0.35294118523597717, 0.35294118523597717), (0.3571428656578064,
0.35686275362968445, 0.35686275362968445), (0.36134454607963562,
0.36078432202339172, 0.36078432202339172), (0.36554622650146484,
0.364705890417099, 0.364705890417099), (0.36974790692329407,
0.36862745881080627, 0.36862745881080627), (0.37394958734512329,
0.37254902720451355, 0.37254902720451355), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.4117647111415863, 0.4117647111415863), (0.41596639156341553,
0.41568627953529358, 0.41568627953529358), (0.42016807198524475,
0.41960784792900085, 0.41960784792900085), (0.42436975240707397,
0.42352941632270813, 0.42352941632270813), (0.4285714328289032,
0.42745098471641541, 0.42745098471641541), (0.43277311325073242,
0.43137255311012268, 0.43137255311012268), (0.43697479367256165,
0.43529412150382996, 0.43529412150382996), (0.44117647409439087,
0.43921568989753723, 0.43921568989753723), (0.44537815451622009,
0.44313725829124451, 0.44313725829124451), (0.44957983493804932,
0.44705882668495178, 0.44705882668495178), (0.45378151535987854,
0.45098039507865906, 0.45098039507865906), (0.45798319578170776,
0.45490196347236633, 0.45490196347236633), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47450980544090271, 0.47450980544090271), (0.47899159789085388,
0.47843137383460999, 0.47843137383460999), (0.48319327831268311,
0.48235294222831726, 0.48235294222831726), (0.48739495873451233,
0.48627451062202454, 0.48627451062202454), (0.49159663915634155,
0.49019607901573181, 0.49019607901573181), (0.49579831957817078,
0.49411764740943909, 0.49411764740943909), (0.5, 0.49803921580314636,
0.49803921580314636), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.5372549295425415,
0.5372549295425415), (0.54201680421829224, 0.54117649793624878,
0.54117649793624878), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.60392159223556519,
0.60392159223556519), (0.60924369096755981, 0.60784316062927246,
0.60784316062927246), (0.61344540119171143, 0.61176472902297974,
0.61176472902297974), (0.61764705181121826, 0.61568629741668701,
0.61568629741668701), (0.62184876203536987, 0.61960786581039429,
0.61960786581039429), (0.62605041265487671, 0.62352943420410156,
0.62352943420410156), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.66274511814117432,
0.66274511814117432), (0.66806721687316895, 0.66666668653488159,
0.66666668653488159), (0.67226892709732056, 0.67058825492858887,
0.67058825492858887), (0.67647057771682739, 0.67450982332229614,
0.67450982332229614), (0.680672287940979, 0.67843139171600342,
0.67843139171600342), (0.68487393856048584, 0.68235296010971069,
0.68235296010971069), (0.68907564878463745, 0.68627452850341797,
0.68627452850341797), (0.69327729940414429, 0.69019609689712524,
0.69019609689712524), (0.6974790096282959, 0.69411766529083252,
0.69411766529083252), (0.70168066024780273, 0.69803923368453979,
0.69803923368453979), (0.70588237047195435, 0.70196080207824707,
0.70196080207824707), (0.71008402109146118, 0.70588237047195435,
0.70588237047195435), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72549021244049072,
0.72549021244049072), (0.73109245300292969, 0.729411780834198,
0.729411780834198), (0.73529410362243652, 0.73333334922790527,
0.73333334922790527), (0.73949581384658813, 0.73725491762161255,
0.73725491762161255), (0.74369746446609497, 0.74117648601531982,
0.74117648601531982), (0.74789917469024658, 0.7450980544090271,
0.7450980544090271), (0.75210082530975342, 0.74901962280273438,
0.74901962280273438), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78823530673980713,
0.78823530673980713), (0.79411762952804565, 0.7921568751335144,
0.7921568751335144), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.85098040103912354,
0.85098040103912354), (0.8571428656578064, 0.85490196943283081,
0.85490196943283081), (0.86134451627731323, 0.85882353782653809,
0.85882353782653809), (0.86554622650146484, 0.86274510622024536,
0.86274510622024536), (0.86974787712097168, 0.86666667461395264,
0.86666667461395264), (0.87394958734512329, 0.87058824300765991,
0.87058824300765991), (0.87815123796463013, 0.87450981140136719,
0.87450981140136719), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.91372549533843994,
0.91372549533843994), (0.92016804218292236, 0.91764706373214722,
0.91764706373214722), (0.92436975240707397, 0.92156863212585449,
0.92156863212585449), (0.92857140302658081, 0.92549020051956177,
0.92549020051956177), (0.93277311325073242, 0.92941176891326904,
0.92941176891326904), (0.93697476387023926, 0.93333333730697632,
0.93333333730697632), (0.94117647409439087, 0.93725490570068359,
0.93725490570068359), (0.94537812471389771, 0.94117647409439087,
0.94117647409439087), (0.94957983493804932, 0.94509804248809814,
0.94509804248809814), (0.95378148555755615, 0.94901961088180542,
0.94901961088180542), (0.95798319578170776, 0.9529411792755127,
0.9529411792755127), (0.9621848464012146, 0.95686274766921997,
0.95686274766921997), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97647058963775635,
0.97647058963775635), (0.98319327831268311, 0.98039215803146362,
0.98039215803146362), (0.98739492893218994, 0.9843137264251709,
0.9843137264251709), (0.99159663915634155, 0.98823529481887817,
0.98823529481887817), (0.99579828977584839, 0.99215686321258545,
0.99215686321258545), (1.0, 0.99607843160629272, 0.99607843160629272)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.011764706112444401,
0.011764706112444401), (0.016806723549962044, 0.015686275437474251,
0.015686275437474251), (0.021008403971791267, 0.019607843831181526,
0.019607843831181526), (0.025210084393620491, 0.023529412224888802,
0.023529412224888802), (0.029411764815449715, 0.027450980618596077,
0.027450980618596077), (0.033613447099924088, 0.035294119268655777,
0.035294119268655777), (0.037815127521753311, 0.039215687662363052,
0.039215687662363052), (0.042016807943582535, 0.043137256056070328,
0.043137256056070328), (0.046218488365411758, 0.047058824449777603,
0.047058824449777603), (0.050420168787240982, 0.050980392843484879,
0.050980392843484879), (0.054621849209070206, 0.054901961237192154,
0.054901961237192154), (0.058823529630899429, 0.058823529630899429,
0.058823529630899429), (0.063025213778018951, 0.062745101749897003,
0.062745101749897003), (0.067226894199848175, 0.066666670143604279,
0.066666670143604279), (0.071428574621677399, 0.070588238537311554,
0.070588238537311554), (0.075630255043506622, 0.074509806931018829,
0.074509806931018829), (0.079831935465335846, 0.078431375324726105,
0.078431375324726105), (0.08403361588716507, 0.08235294371843338,
0.08235294371843338), (0.088235296308994293, 0.086274512112140656,
0.086274512112140656), (0.092436976730823517, 0.090196080505847931,
0.090196080505847931), (0.09663865715265274, 0.098039217293262482,
0.098039217293262482), (0.10084033757448196, 0.10196078568696976,
0.10196078568696976), (0.10504201799631119, 0.10588235408067703,
0.10588235408067703), (0.10924369841814041, 0.10980392247438431,
0.10980392247438431), (0.11344537883996964, 0.11372549086809158,
0.11372549086809158), (0.11764705926179886, 0.11764705926179886,
0.11764705926179886), (0.12184873968362808, 0.12156862765550613,
0.12156862765550613), (0.1260504275560379, 0.12549020349979401,
0.12549020349979401), (0.13025210797786713, 0.12941177189350128,
0.12941177189350128), (0.13445378839969635, 0.13333334028720856,
0.13333334028720856), (0.13865546882152557, 0.13725490868091583,
0.13725490868091583), (0.1428571492433548, 0.14117647707462311,
0.14117647707462311), (0.14705882966518402, 0.14509804546833038,
0.14509804546833038), (0.15126051008701324, 0.14901961386203766,
0.14901961386203766), (0.15546219050884247, 0.15294118225574493,
0.15294118225574493), (0.15966387093067169, 0.16078431904315948,
0.16078431904315948), (0.16386555135250092, 0.16470588743686676,
0.16470588743686676), (0.16806723177433014, 0.16862745583057404,
0.16862745583057404), (0.17226891219615936, 0.17254902422428131,
0.17254902422428131), (0.17647059261798859, 0.17647059261798859,
0.17647059261798859), (0.18067227303981781, 0.18039216101169586,
0.18039216101169586), (0.18487395346164703, 0.18431372940540314,
0.18431372940540314), (0.18907563388347626, 0.18823529779911041,
0.18823529779911041), (0.19327731430530548, 0.19215686619281769,
0.19215686619281769), (0.1974789947271347, 0.19607843458652496,
0.19607843458652496), (0.20168067514896393, 0.20000000298023224,
0.20000000298023224), (0.20588235557079315, 0.20392157137393951,
0.20392157137393951), (0.21008403599262238, 0.20784313976764679,
0.20784313976764679), (0.2142857164144516, 0.21176470816135406,
0.21176470816135406), (0.21848739683628082, 0.21568627655506134,
0.21568627655506134), (0.22268907725811005, 0.22352941334247589,
0.22352941334247589), (0.22689075767993927, 0.22745098173618317,
0.22745098173618317), (0.23109243810176849, 0.23137255012989044,
0.23137255012989044), (0.23529411852359772, 0.23529411852359772,
0.23529411852359772), (0.23949579894542694, 0.23921568691730499,
0.23921568691730499), (0.24369747936725616, 0.24313725531101227,
0.24313725531101227), (0.24789915978908539, 0.24705882370471954,
0.24705882370471954), (0.25210085511207581, 0.25098040699958801,
0.25098040699958801), (0.25630253553390503, 0.25490197539329529,
0.25490197539329529), (0.26050421595573425, 0.25882354378700256,
0.25882354378700256), (0.26470589637756348, 0.26274511218070984,
0.26274511218070984), (0.2689075767993927, 0.26666668057441711,
0.26666668057441711), (0.27310925722122192, 0.27058824896812439,
0.27058824896812439), (0.27731093764305115, 0.27450981736183167,
0.27450981736183167), (0.28151261806488037, 0.27843138575553894,
0.27843138575553894), (0.28571429848670959, 0.28627452254295349,
0.28627452254295349), (0.28991597890853882, 0.29019609093666077,
0.29019609093666077), (0.29411765933036804, 0.29411765933036804,
0.29411765933036804), (0.29831933975219727, 0.29803922772407532,
0.29803922772407532), (0.30252102017402649, 0.30196079611778259,
0.30196079611778259), (0.30672270059585571, 0.30588236451148987,
0.30588236451148987), (0.31092438101768494, 0.30980393290519714,
0.30980393290519714), (0.31512606143951416, 0.31372550129890442,
0.31372550129890442), (0.31932774186134338, 0.31764706969261169,
0.31764706969261169), (0.32352942228317261, 0.32156863808631897,
0.32156863808631897), (0.32773110270500183, 0.32549020648002625,
0.32549020648002625), (0.33193278312683105, 0.32941177487373352,
0.32941177487373352), (0.33613446354866028, 0.3333333432674408,
0.3333333432674408), (0.3403361439704895, 0.33725491166114807,
0.33725491166114807), (0.34453782439231873, 0.34117648005485535,
0.34117648005485535), (0.34873950481414795, 0.3490196168422699,
0.3490196168422699), (0.35294118523597717, 0.35294118523597717,
0.35294118523597717), (0.3571428656578064, 0.35686275362968445,
0.35686275362968445), (0.36134454607963562, 0.36078432202339172,
0.36078432202339172), (0.36554622650146484, 0.364705890417099,
0.364705890417099), (0.36974790692329407, 0.36862745881080627,
0.36862745881080627), (0.37394958734512329, 0.37254902720451355,
0.37254902720451355), (0.37815126776695251, 0.37647059559822083,
0.37647059559822083), (0.38235294818878174, 0.3803921639919281,
0.3803921639919281), (0.38655462861061096, 0.38431373238563538,
0.38431373238563538), (0.39075630903244019, 0.38823530077934265,
0.38823530077934265), (0.39495798945426941, 0.39215686917304993,
0.39215686917304993), (0.39915966987609863, 0.3960784375667572,
0.3960784375667572), (0.40336135029792786, 0.40000000596046448,
0.40000000596046448), (0.40756303071975708, 0.40392157435417175,
0.40392157435417175), (0.4117647111415863, 0.4117647111415863,
0.4117647111415863), (0.41596639156341553, 0.41568627953529358,
0.41568627953529358), (0.42016807198524475, 0.41960784792900085,
0.41960784792900085), (0.42436975240707397, 0.42352941632270813,
0.42352941632270813), (0.4285714328289032, 0.42745098471641541,
0.42745098471641541), (0.43277311325073242, 0.43137255311012268,
0.43137255311012268), (0.43697479367256165, 0.43529412150382996,
0.43529412150382996), (0.44117647409439087, 0.43921568989753723,
0.43921568989753723), (0.44537815451622009, 0.44313725829124451,
0.44313725829124451), (0.44957983493804932, 0.44705882668495178,
0.44705882668495178), (0.45378151535987854, 0.45098039507865906,
0.45098039507865906), (0.45798319578170776, 0.45490196347236633,
0.45490196347236633), (0.46218487620353699, 0.45882353186607361,
0.45882353186607361), (0.46638655662536621, 0.46274510025978088,
0.46274510025978088), (0.47058823704719543, 0.46666666865348816,
0.46666666865348816), (0.47478991746902466, 0.47450980544090271,
0.47450980544090271), (0.47899159789085388, 0.47843137383460999,
0.47843137383460999), (0.48319327831268311, 0.48235294222831726,
0.48235294222831726), (0.48739495873451233, 0.48627451062202454,
0.48627451062202454), (0.49159663915634155, 0.49019607901573181,
0.49019607901573181), (0.49579831957817078, 0.49411764740943909,
0.49411764740943909), (0.5, 0.49803921580314636, 0.49803921580314636),
(0.50420171022415161, 0.50196081399917603, 0.50196081399917603),
(0.50840336084365845, 0.5058823823928833, 0.5058823823928833),
(0.51260507106781006, 0.50980395078659058, 0.50980395078659058),
(0.51680672168731689, 0.51372551918029785, 0.51372551918029785),
(0.52100843191146851, 0.51764708757400513, 0.51764708757400513),
(0.52521008253097534, 0.5215686559677124, 0.5215686559677124),
(0.52941179275512695, 0.52549022436141968, 0.52549022436141968),
(0.53361344337463379, 0.52941179275512695, 0.52941179275512695),
(0.5378151535987854, 0.5372549295425415, 0.5372549295425415),
(0.54201680421829224, 0.54117649793624878, 0.54117649793624878),
(0.54621851444244385, 0.54509806632995605, 0.54509806632995605),
(0.55042016506195068, 0.54901963472366333, 0.54901963472366333),
(0.55462187528610229, 0.55294120311737061, 0.55294120311737061),
(0.55882352590560913, 0.55686277151107788, 0.55686277151107788),
(0.56302523612976074, 0.56078433990478516, 0.56078433990478516),
(0.56722688674926758, 0.56470590829849243, 0.56470590829849243),
(0.57142859697341919, 0.56862747669219971, 0.56862747669219971),
(0.57563024759292603, 0.57254904508590698, 0.57254904508590698),
(0.57983195781707764, 0.57647061347961426, 0.57647061347961426),
(0.58403360843658447, 0.58039218187332153, 0.58039218187332153),
(0.58823531866073608, 0.58431375026702881, 0.58431375026702881),
(0.59243696928024292, 0.58823531866073608, 0.58823531866073608),
(0.59663867950439453, 0.59215688705444336, 0.59215688705444336),
(0.60084033012390137, 0.60000002384185791, 0.60000002384185791),
(0.60504204034805298, 0.60392159223556519, 0.60392159223556519),
(0.60924369096755981, 0.60784316062927246, 0.60784316062927246),
(0.61344540119171143, 0.61176472902297974, 0.61176472902297974),
(0.61764705181121826, 0.61568629741668701, 0.61568629741668701),
(0.62184876203536987, 0.61960786581039429, 0.61960786581039429),
(0.62605041265487671, 0.62352943420410156, 0.62352943420410156),
(0.63025212287902832, 0.62745100259780884, 0.62745100259780884),
(0.63445377349853516, 0.63137257099151611, 0.63137257099151611),
(0.63865548372268677, 0.63529413938522339, 0.63529413938522339),
(0.6428571343421936, 0.63921570777893066, 0.63921570777893066),
(0.64705884456634521, 0.64313727617263794, 0.64313727617263794),
(0.65126049518585205, 0.64705884456634521, 0.64705884456634521),
(0.65546220541000366, 0.65098041296005249, 0.65098041296005249),
(0.6596638560295105, 0.65490198135375977, 0.65490198135375977),
(0.66386556625366211, 0.66274511814117432, 0.66274511814117432),
(0.66806721687316895, 0.66666668653488159, 0.66666668653488159),
(0.67226892709732056, 0.67058825492858887, 0.67058825492858887),
(0.67647057771682739, 0.67450982332229614, 0.67450982332229614),
(0.680672287940979, 0.67843139171600342, 0.67843139171600342),
(0.68487393856048584, 0.68235296010971069, 0.68235296010971069),
(0.68907564878463745, 0.68627452850341797, 0.68627452850341797),
(0.69327729940414429, 0.69019609689712524, 0.69019609689712524),
(0.6974790096282959, 0.69411766529083252, 0.69411766529083252),
(0.70168066024780273, 0.69803923368453979, 0.69803923368453979),
(0.70588237047195435, 0.70196080207824707, 0.70196080207824707),
(0.71008402109146118, 0.70588237047195435, 0.70588237047195435),
(0.71428573131561279, 0.70980393886566162, 0.70980393886566162),
(0.71848738193511963, 0.7137255072593689, 0.7137255072593689),
(0.72268909215927124, 0.71764707565307617, 0.71764707565307617),
(0.72689074277877808, 0.72549021244049072, 0.72549021244049072),
(0.73109245300292969, 0.729411780834198, 0.729411780834198),
(0.73529410362243652, 0.73333334922790527, 0.73333334922790527),
(0.73949581384658813, 0.73725491762161255, 0.73725491762161255),
(0.74369746446609497, 0.74117648601531982, 0.74117648601531982),
(0.74789917469024658, 0.7450980544090271, 0.7450980544090271),
(0.75210082530975342, 0.74901962280273438, 0.74901962280273438),
(0.75630253553390503, 0.75294119119644165, 0.75294119119644165),
(0.76050418615341187, 0.75686275959014893, 0.75686275959014893),
(0.76470589637756348, 0.7607843279838562, 0.7607843279838562),
(0.76890754699707031, 0.76470589637756348, 0.76470589637756348),
(0.77310925722122192, 0.76862746477127075, 0.76862746477127075),
(0.77731090784072876, 0.77254903316497803, 0.77254903316497803),
(0.78151261806488037, 0.7764706015586853, 0.7764706015586853),
(0.78571426868438721, 0.78039216995239258, 0.78039216995239258),
(0.78991597890853882, 0.78823530673980713, 0.78823530673980713),
(0.79411762952804565, 0.7921568751335144, 0.7921568751335144),
(0.79831933975219727, 0.79607844352722168, 0.79607844352722168),
(0.8025209903717041, 0.80000001192092896, 0.80000001192092896),
(0.80672270059585571, 0.80392158031463623, 0.80392158031463623),
(0.81092435121536255, 0.80784314870834351, 0.80784314870834351),
(0.81512606143951416, 0.81176471710205078, 0.81176471710205078),
(0.819327712059021, 0.81568628549575806, 0.81568628549575806),
(0.82352942228317261, 0.81960785388946533, 0.81960785388946533),
(0.82773107290267944, 0.82352942228317261, 0.82352942228317261),
(0.83193278312683105, 0.82745099067687988, 0.82745099067687988),
(0.83613443374633789, 0.83137255907058716, 0.83137255907058716),
(0.8403361439704895, 0.83529412746429443, 0.83529412746429443),
(0.84453779458999634, 0.83921569585800171, 0.83921569585800171),
(0.84873950481414795, 0.84313726425170898, 0.84313726425170898),
(0.85294115543365479, 0.85098040103912354, 0.85098040103912354),
(0.8571428656578064, 0.85490196943283081, 0.85490196943283081),
(0.86134451627731323, 0.85882353782653809, 0.85882353782653809),
(0.86554622650146484, 0.86274510622024536, 0.86274510622024536),
(0.86974787712097168, 0.86666667461395264, 0.86666667461395264),
(0.87394958734512329, 0.87058824300765991, 0.87058824300765991),
(0.87815123796463013, 0.87450981140136719, 0.87450981140136719),
(0.88235294818878174, 0.87843137979507446, 0.87843137979507446),
(0.88655459880828857, 0.88235294818878174, 0.88235294818878174),
(0.89075630903244019, 0.88627451658248901, 0.88627451658248901),
(0.89495795965194702, 0.89019608497619629, 0.89019608497619629),
(0.89915966987609863, 0.89411765336990356, 0.89411765336990356),
(0.90336132049560547, 0.89803922176361084, 0.89803922176361084),
(0.90756303071975708, 0.90196079015731812, 0.90196079015731812),
(0.91176468133926392, 0.90588235855102539, 0.90588235855102539),
(0.91596639156341553, 0.91372549533843994, 0.91372549533843994),
(0.92016804218292236, 0.91764706373214722, 0.91764706373214722),
(0.92436975240707397, 0.92156863212585449, 0.92156863212585449),
(0.92857140302658081, 0.92549020051956177, 0.92549020051956177),
(0.93277311325073242, 0.92941176891326904, 0.92941176891326904),
(0.93697476387023926, 0.93333333730697632, 0.93333333730697632),
(0.94117647409439087, 0.93725490570068359, 0.93725490570068359),
(0.94537812471389771, 0.94117647409439087, 0.94117647409439087),
(0.94957983493804932, 0.94509804248809814, 0.94509804248809814),
(0.95378148555755615, 0.94901961088180542, 0.94901961088180542),
(0.95798319578170776, 0.9529411792755127, 0.9529411792755127),
(0.9621848464012146, 0.95686274766921997, 0.95686274766921997),
(0.96638655662536621, 0.96078431606292725, 0.96078431606292725),
(0.97058820724487305, 0.96470588445663452, 0.96470588445663452),
(0.97478991746902466, 0.9686274528503418, 0.9686274528503418),
(0.97899156808853149, 0.97647058963775635, 0.97647058963775635),
(0.98319327831268311, 0.98039215803146362, 0.98039215803146362),
(0.98739492893218994, 0.9843137264251709, 0.9843137264251709),
(0.99159663915634155, 0.98823529481887817, 0.98823529481887817),
(0.99579828977584839, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)], 'red': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.035294119268655777, 0.035294119268655777),
(0.037815127521753311, 0.039215687662363052, 0.039215687662363052),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.098039217293262482, 0.098039217293262482),
(0.10084033757448196, 0.10196078568696976, 0.10196078568696976),
(0.10504201799631119, 0.10588235408067703, 0.10588235408067703),
(0.10924369841814041, 0.10980392247438431, 0.10980392247438431),
(0.11344537883996964, 0.11372549086809158, 0.11372549086809158),
(0.11764705926179886, 0.11764705926179886, 0.11764705926179886),
(0.12184873968362808, 0.12156862765550613, 0.12156862765550613),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.16078431904315948, 0.16078431904315948),
(0.16386555135250092, 0.16470588743686676, 0.16470588743686676),
(0.16806723177433014, 0.16862745583057404, 0.16862745583057404),
(0.17226891219615936, 0.17254902422428131, 0.17254902422428131),
(0.17647059261798859, 0.17647059261798859, 0.17647059261798859),
(0.18067227303981781, 0.18039216101169586, 0.18039216101169586),
(0.18487395346164703, 0.18431372940540314, 0.18431372940540314),
(0.18907563388347626, 0.18823529779911041, 0.18823529779911041),
(0.19327731430530548, 0.19215686619281769, 0.19215686619281769),
(0.1974789947271347, 0.19607843458652496, 0.19607843458652496),
(0.20168067514896393, 0.20000000298023224, 0.20000000298023224),
(0.20588235557079315, 0.20392157137393951, 0.20392157137393951),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.22352941334247589, 0.22352941334247589),
(0.22689075767993927, 0.22745098173618317, 0.22745098173618317),
(0.23109243810176849, 0.23137255012989044, 0.23137255012989044),
(0.23529411852359772, 0.23529411852359772, 0.23529411852359772),
(0.23949579894542694, 0.23921568691730499, 0.23921568691730499),
(0.24369747936725616, 0.24313725531101227, 0.24313725531101227),
(0.24789915978908539, 0.24705882370471954, 0.24705882370471954),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28627452254295349, 0.28627452254295349),
(0.28991597890853882, 0.29019609093666077, 0.29019609093666077),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.3490196168422699, 0.3490196168422699),
(0.35294118523597717, 0.35294118523597717, 0.35294118523597717),
(0.3571428656578064, 0.35686275362968445, 0.35686275362968445),
(0.36134454607963562, 0.36078432202339172, 0.36078432202339172),
(0.36554622650146484, 0.364705890417099, 0.364705890417099),
(0.36974790692329407, 0.36862745881080627, 0.36862745881080627),
(0.37394958734512329, 0.37254902720451355, 0.37254902720451355),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.4117647111415863, 0.4117647111415863),
(0.41596639156341553, 0.41568627953529358, 0.41568627953529358),
(0.42016807198524475, 0.41960784792900085, 0.41960784792900085),
(0.42436975240707397, 0.42352941632270813, 0.42352941632270813),
(0.4285714328289032, 0.42745098471641541, 0.42745098471641541),
(0.43277311325073242, 0.43137255311012268, 0.43137255311012268),
(0.43697479367256165, 0.43529412150382996, 0.43529412150382996),
(0.44117647409439087, 0.43921568989753723, 0.43921568989753723),
(0.44537815451622009, 0.44313725829124451, 0.44313725829124451),
(0.44957983493804932, 0.44705882668495178, 0.44705882668495178),
(0.45378151535987854, 0.45098039507865906, 0.45098039507865906),
(0.45798319578170776, 0.45490196347236633, 0.45490196347236633),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47450980544090271, 0.47450980544090271),
(0.47899159789085388, 0.47843137383460999, 0.47843137383460999),
(0.48319327831268311, 0.48235294222831726, 0.48235294222831726),
(0.48739495873451233, 0.48627451062202454, 0.48627451062202454),
(0.49159663915634155, 0.49019607901573181, 0.49019607901573181),
(0.49579831957817078, 0.49411764740943909, 0.49411764740943909), (0.5,
0.49803921580314636, 0.49803921580314636), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.5372549295425415, 0.5372549295425415), (0.54201680421829224,
0.54117649793624878, 0.54117649793624878), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.60000002384185791, 0.60000002384185791), (0.60504204034805298,
0.60392159223556519, 0.60392159223556519), (0.60924369096755981,
0.60784316062927246, 0.60784316062927246), (0.61344540119171143,
0.61176472902297974, 0.61176472902297974), (0.61764705181121826,
0.61568629741668701, 0.61568629741668701), (0.62184876203536987,
0.61960786581039429, 0.61960786581039429), (0.62605041265487671,
0.62352943420410156, 0.62352943420410156), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.66274511814117432, 0.66274511814117432), (0.66806721687316895,
0.66666668653488159, 0.66666668653488159), (0.67226892709732056,
0.67058825492858887, 0.67058825492858887), (0.67647057771682739,
0.67450982332229614, 0.67450982332229614), (0.680672287940979,
0.67843139171600342, 0.67843139171600342), (0.68487393856048584,
0.68235296010971069, 0.68235296010971069), (0.68907564878463745,
0.68627452850341797, 0.68627452850341797), (0.69327729940414429,
0.69019609689712524, 0.69019609689712524), (0.6974790096282959,
0.69411766529083252, 0.69411766529083252), (0.70168066024780273,
0.69803923368453979, 0.69803923368453979), (0.70588237047195435,
0.70196080207824707, 0.70196080207824707), (0.71008402109146118,
0.70588237047195435, 0.70588237047195435), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72549021244049072, 0.72549021244049072), (0.73109245300292969,
0.729411780834198, 0.729411780834198), (0.73529410362243652,
0.73333334922790527, 0.73333334922790527), (0.73949581384658813,
0.73725491762161255, 0.73725491762161255), (0.74369746446609497,
0.74117648601531982, 0.74117648601531982), (0.74789917469024658,
0.7450980544090271, 0.7450980544090271), (0.75210082530975342,
0.74901962280273438, 0.74901962280273438), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78823530673980713, 0.78823530673980713), (0.79411762952804565,
0.7921568751335144, 0.7921568751335144), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.85098040103912354, 0.85098040103912354), (0.8571428656578064,
0.85490196943283081, 0.85490196943283081), (0.86134451627731323,
0.85882353782653809, 0.85882353782653809), (0.86554622650146484,
0.86274510622024536, 0.86274510622024536), (0.86974787712097168,
0.86666667461395264, 0.86666667461395264), (0.87394958734512329,
0.87058824300765991, 0.87058824300765991), (0.87815123796463013,
0.87450981140136719, 0.87450981140136719), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.91372549533843994, 0.91372549533843994), (0.92016804218292236,
0.91764706373214722, 0.91764706373214722), (0.92436975240707397,
0.92156863212585449, 0.92156863212585449), (0.92857140302658081,
0.92549020051956177, 0.92549020051956177), (0.93277311325073242,
0.92941176891326904, 0.92941176891326904), (0.93697476387023926,
0.93333333730697632, 0.93333333730697632), (0.94117647409439087,
0.93725490570068359, 0.93725490570068359), (0.94537812471389771,
0.94117647409439087, 0.94117647409439087), (0.94957983493804932,
0.94509804248809814, 0.94509804248809814), (0.95378148555755615,
0.94901961088180542, 0.94901961088180542), (0.95798319578170776,
0.9529411792755127, 0.9529411792755127), (0.9621848464012146,
0.95686274766921997, 0.95686274766921997), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97647058963775635, 0.97647058963775635), (0.98319327831268311,
0.98039215803146362, 0.98039215803146362), (0.98739492893218994,
0.9843137264251709, 0.9843137264251709), (0.99159663915634155,
0.98823529481887817, 0.98823529481887817), (0.99579828977584839,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)]}
_gist_heat_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388, 0.0, 0.0),
(0.48319327831268311, 0.0, 0.0), (0.48739495873451233, 0.0, 0.0),
(0.49159663915634155, 0.0, 0.0), (0.49579831957817078, 0.0, 0.0), (0.5,
0.0, 0.0), (0.50420171022415161, 0.0, 0.0), (0.50840336084365845, 0.0,
0.0), (0.51260507106781006, 0.0, 0.0), (0.51680672168731689, 0.0, 0.0),
(0.52100843191146851, 0.0, 0.0), (0.52521008253097534, 0.0, 0.0),
(0.52941179275512695, 0.0, 0.0), (0.53361344337463379, 0.0, 0.0),
(0.5378151535987854, 0.0, 0.0), (0.54201680421829224, 0.0, 0.0),
(0.54621851444244385, 0.0, 0.0), (0.55042016506195068, 0.0, 0.0),
(0.55462187528610229, 0.0, 0.0), (0.55882352590560913, 0.0, 0.0),
(0.56302523612976074, 0.0, 0.0), (0.56722688674926758, 0.0, 0.0),
(0.57142859697341919, 0.0, 0.0), (0.57563024759292603, 0.0, 0.0),
(0.57983195781707764, 0.0, 0.0), (0.58403360843658447, 0.0, 0.0),
(0.58823531866073608, 0.0, 0.0), (0.59243696928024292, 0.0, 0.0),
(0.59663867950439453, 0.0, 0.0), (0.60084033012390137, 0.0, 0.0),
(0.60504204034805298, 0.0, 0.0), (0.60924369096755981, 0.0, 0.0),
(0.61344540119171143, 0.0, 0.0), (0.61764705181121826, 0.0, 0.0),
(0.62184876203536987, 0.0, 0.0), (0.62605041265487671, 0.0, 0.0),
(0.63025212287902832, 0.0, 0.0), (0.63445377349853516, 0.0, 0.0),
(0.63865548372268677, 0.0, 0.0), (0.6428571343421936, 0.0, 0.0),
(0.64705884456634521, 0.0, 0.0), (0.65126049518585205, 0.0, 0.0),
(0.65546220541000366, 0.0, 0.0), (0.6596638560295105, 0.0, 0.0),
(0.66386556625366211, 0.0, 0.0), (0.66806721687316895, 0.0, 0.0),
(0.67226892709732056, 0.0, 0.0), (0.67647057771682739, 0.0, 0.0),
(0.680672287940979, 0.0, 0.0), (0.68487393856048584, 0.0, 0.0),
(0.68907564878463745, 0.0, 0.0), (0.69327729940414429, 0.0, 0.0),
(0.6974790096282959, 0.0, 0.0), (0.70168066024780273, 0.0, 0.0),
(0.70588237047195435, 0.0, 0.0), (0.71008402109146118, 0.0, 0.0),
(0.71428573131561279, 0.0, 0.0), (0.71848738193511963, 0.0, 0.0),
(0.72268909215927124, 0.0, 0.0), (0.72689074277877808, 0.0, 0.0),
(0.73109245300292969, 0.0, 0.0), (0.73529410362243652, 0.0, 0.0),
(0.73949581384658813, 0.0, 0.0), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.0, 0.0), (0.75210082530975342, 0.0, 0.0),
(0.75630253553390503, 0.027450980618596077, 0.027450980618596077),
(0.76050418615341187, 0.043137256056070328, 0.043137256056070328),
(0.76470589637756348, 0.058823529630899429, 0.058823529630899429),
(0.76890754699707031, 0.074509806931018829, 0.074509806931018829),
(0.77310925722122192, 0.090196080505847931, 0.090196080505847931),
(0.77731090784072876, 0.10588235408067703, 0.10588235408067703),
(0.78151261806488037, 0.12156862765550613, 0.12156862765550613),
(0.78571426868438721, 0.13725490868091583, 0.13725490868091583),
(0.78991597890853882, 0.15294118225574493, 0.15294118225574493),
(0.79411762952804565, 0.16862745583057404, 0.16862745583057404),
(0.79831933975219727, 0.20000000298023224, 0.20000000298023224),
(0.8025209903717041, 0.21176470816135406, 0.21176470816135406),
(0.80672270059585571, 0.22745098173618317, 0.22745098173618317),
(0.81092435121536255, 0.24313725531101227, 0.24313725531101227),
(0.81512606143951416, 0.25882354378700256, 0.25882354378700256),
(0.819327712059021, 0.27450981736183167, 0.27450981736183167),
(0.82352942228317261, 0.29019609093666077, 0.29019609093666077),
(0.82773107290267944, 0.30588236451148987, 0.30588236451148987),
(0.83193278312683105, 0.32156863808631897, 0.32156863808631897),
(0.83613443374633789, 0.33725491166114807, 0.33725491166114807),
(0.8403361439704895, 0.35294118523597717, 0.35294118523597717),
(0.84453779458999634, 0.36862745881080627, 0.36862745881080627),
(0.84873950481414795, 0.38431373238563538, 0.38431373238563538),
(0.85294115543365479, 0.40000000596046448, 0.40000000596046448),
(0.8571428656578064, 0.4117647111415863, 0.4117647111415863),
(0.86134451627731323, 0.42745098471641541, 0.42745098471641541),
(0.86554622650146484, 0.44313725829124451, 0.44313725829124451),
(0.86974787712097168, 0.45882353186607361, 0.45882353186607361),
(0.87394958734512329, 0.47450980544090271, 0.47450980544090271),
(0.87815123796463013, 0.49019607901573181, 0.49019607901573181),
(0.88235294818878174, 0.5215686559677124, 0.5215686559677124),
(0.88655459880828857, 0.5372549295425415, 0.5372549295425415),
(0.89075630903244019, 0.55294120311737061, 0.55294120311737061),
(0.89495795965194702, 0.56862747669219971, 0.56862747669219971),
(0.89915966987609863, 0.58431375026702881, 0.58431375026702881),
(0.90336132049560547, 0.60000002384185791, 0.60000002384185791),
(0.90756303071975708, 0.61176472902297974, 0.61176472902297974),
(0.91176468133926392, 0.62745100259780884, 0.62745100259780884),
(0.91596639156341553, 0.64313727617263794, 0.64313727617263794),
(0.92016804218292236, 0.65882354974746704, 0.65882354974746704),
(0.92436975240707397, 0.67450982332229614, 0.67450982332229614),
(0.92857140302658081, 0.69019609689712524, 0.69019609689712524),
(0.93277311325073242, 0.70588237047195435, 0.70588237047195435),
(0.93697476387023926, 0.72156864404678345, 0.72156864404678345),
(0.94117647409439087, 0.73725491762161255, 0.73725491762161255),
(0.94537812471389771, 0.75294119119644165, 0.75294119119644165),
(0.94957983493804932, 0.76862746477127075, 0.76862746477127075),
(0.95378148555755615, 0.78431373834609985, 0.78431373834609985),
(0.95798319578170776, 0.80000001192092896, 0.80000001192092896),
(0.9621848464012146, 0.81176471710205078, 0.81176471710205078),
(0.96638655662536621, 0.84313726425170898, 0.84313726425170898),
(0.97058820724487305, 0.85882353782653809, 0.85882353782653809),
(0.97478991746902466, 0.87450981140136719, 0.87450981140136719),
(0.97899156808853149, 0.89019608497619629, 0.89019608497619629),
(0.98319327831268311, 0.90588235855102539, 0.90588235855102539),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0, 0.0), (0.0084033617749810219, 0.0, 0.0),
(0.012605042196810246, 0.0, 0.0), (0.016806723549962044, 0.0, 0.0),
(0.021008403971791267, 0.0, 0.0), (0.025210084393620491, 0.0, 0.0),
(0.029411764815449715, 0.0, 0.0), (0.033613447099924088, 0.0, 0.0),
(0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0, 0.0), (0.4117647111415863, 0.0, 0.0),
(0.41596639156341553, 0.0, 0.0), (0.42016807198524475, 0.0, 0.0),
(0.42436975240707397, 0.0, 0.0), (0.4285714328289032, 0.0, 0.0),
(0.43277311325073242, 0.0, 0.0), (0.43697479367256165, 0.0, 0.0),
(0.44117647409439087, 0.0, 0.0), (0.44537815451622009, 0.0, 0.0),
(0.44957983493804932, 0.0, 0.0), (0.45378151535987854, 0.0, 0.0),
(0.45798319578170776, 0.0, 0.0), (0.46218487620353699, 0.0, 0.0),
(0.46638655662536621, 0.0, 0.0), (0.47058823704719543, 0.0, 0.0),
(0.47478991746902466, 0.0, 0.0), (0.47899159789085388,
0.0039215688593685627, 0.0039215688593685627), (0.48319327831268311,
0.011764706112444401, 0.011764706112444401), (0.48739495873451233,
0.019607843831181526, 0.019607843831181526), (0.49159663915634155,
0.027450980618596077, 0.027450980618596077), (0.49579831957817078,
0.035294119268655777, 0.035294119268655777), (0.5, 0.043137256056070328,
0.043137256056070328), (0.50420171022415161, 0.058823529630899429,
0.058823529630899429), (0.50840336084365845, 0.066666670143604279,
0.066666670143604279), (0.51260507106781006, 0.070588238537311554,
0.070588238537311554), (0.51680672168731689, 0.078431375324726105,
0.078431375324726105), (0.52100843191146851, 0.086274512112140656,
0.086274512112140656), (0.52521008253097534, 0.094117648899555206,
0.094117648899555206), (0.52941179275512695, 0.10196078568696976,
0.10196078568696976), (0.53361344337463379, 0.10980392247438431,
0.10980392247438431), (0.5378151535987854, 0.11764705926179886,
0.11764705926179886), (0.54201680421829224, 0.12549020349979401,
0.12549020349979401), (0.54621851444244385, 0.13725490868091583,
0.13725490868091583), (0.55042016506195068, 0.14509804546833038,
0.14509804546833038), (0.55462187528610229, 0.15294118225574493,
0.15294118225574493), (0.55882352590560913, 0.16078431904315948,
0.16078431904315948), (0.56302523612976074, 0.16862745583057404,
0.16862745583057404), (0.56722688674926758, 0.17647059261798859,
0.17647059261798859), (0.57142859697341919, 0.18431372940540314,
0.18431372940540314), (0.57563024759292603, 0.19215686619281769,
0.19215686619281769), (0.57983195781707764, 0.20000000298023224,
0.20000000298023224), (0.58403360843658447, 0.20392157137393951,
0.20392157137393951), (0.58823531866073608, 0.21176470816135406,
0.21176470816135406), (0.59243696928024292, 0.21960784494876862,
0.21960784494876862), (0.59663867950439453, 0.22745098173618317,
0.22745098173618317), (0.60084033012390137, 0.23529411852359772,
0.23529411852359772), (0.60504204034805298, 0.24313725531101227,
0.24313725531101227), (0.60924369096755981, 0.25098040699958801,
0.25098040699958801), (0.61344540119171143, 0.25882354378700256,
0.25882354378700256), (0.61764705181121826, 0.26666668057441711,
0.26666668057441711), (0.62184876203536987, 0.27058824896812439,
0.27058824896812439), (0.62605041265487671, 0.27843138575553894,
0.27843138575553894), (0.63025212287902832, 0.29411765933036804,
0.29411765933036804), (0.63445377349853516, 0.30196079611778259,
0.30196079611778259), (0.63865548372268677, 0.30980393290519714,
0.30980393290519714), (0.6428571343421936, 0.31764706969261169,
0.31764706969261169), (0.64705884456634521, 0.32549020648002625,
0.32549020648002625), (0.65126049518585205, 0.3333333432674408,
0.3333333432674408), (0.65546220541000366, 0.33725491166114807,
0.33725491166114807), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.35294118523597717,
0.35294118523597717), (0.66806721687316895, 0.36078432202339172,
0.36078432202339172), (0.67226892709732056, 0.36862745881080627,
0.36862745881080627), (0.67647057771682739, 0.37647059559822083,
0.37647059559822083), (0.680672287940979, 0.38431373238563538,
0.38431373238563538), (0.68487393856048584, 0.39215686917304993,
0.39215686917304993), (0.68907564878463745, 0.40000000596046448,
0.40000000596046448), (0.69327729940414429, 0.40392157435417175,
0.40392157435417175), (0.6974790096282959, 0.4117647111415863,
0.4117647111415863), (0.70168066024780273, 0.41960784792900085,
0.41960784792900085), (0.70588237047195435, 0.42745098471641541,
0.42745098471641541), (0.71008402109146118, 0.43529412150382996,
0.43529412150382996), (0.71428573131561279, 0.45098039507865906,
0.45098039507865906), (0.71848738193511963, 0.45882353186607361,
0.45882353186607361), (0.72268909215927124, 0.46666666865348816,
0.46666666865348816), (0.72689074277877808, 0.47058823704719543,
0.47058823704719543), (0.73109245300292969, 0.47843137383460999,
0.47843137383460999), (0.73529410362243652, 0.48627451062202454,
0.48627451062202454), (0.73949581384658813, 0.49411764740943909,
0.49411764740943909), (0.74369746446609497, 0.50196081399917603,
0.50196081399917603), (0.74789917469024658, 0.50980395078659058,
0.50980395078659058), (0.75210082530975342, 0.51764708757400513,
0.51764708757400513), (0.75630253553390503, 0.53333336114883423,
0.53333336114883423), (0.76050418615341187, 0.5372549295425415,
0.5372549295425415), (0.76470589637756348, 0.54509806632995605,
0.54509806632995605), (0.76890754699707031, 0.55294120311737061,
0.55294120311737061), (0.77310925722122192, 0.56078433990478516,
0.56078433990478516), (0.77731090784072876, 0.56862747669219971,
0.56862747669219971), (0.78151261806488037, 0.57647061347961426,
0.57647061347961426), (0.78571426868438721, 0.58431375026702881,
0.58431375026702881), (0.78991597890853882, 0.59215688705444336,
0.59215688705444336), (0.79411762952804565, 0.60000002384185791,
0.60000002384185791), (0.79831933975219727, 0.61176472902297974,
0.61176472902297974), (0.8025209903717041, 0.61960786581039429,
0.61960786581039429), (0.80672270059585571, 0.62745100259780884,
0.62745100259780884), (0.81092435121536255, 0.63529413938522339,
0.63529413938522339), (0.81512606143951416, 0.64313727617263794,
0.64313727617263794), (0.819327712059021, 0.65098041296005249,
0.65098041296005249), (0.82352942228317261, 0.65882354974746704,
0.65882354974746704), (0.82773107290267944, 0.66666668653488159,
0.66666668653488159), (0.83193278312683105, 0.67058825492858887,
0.67058825492858887), (0.83613443374633789, 0.67843139171600342,
0.67843139171600342), (0.8403361439704895, 0.68627452850341797,
0.68627452850341797), (0.84453779458999634, 0.69411766529083252,
0.69411766529083252), (0.84873950481414795, 0.70196080207824707,
0.70196080207824707), (0.85294115543365479, 0.70980393886566162,
0.70980393886566162), (0.8571428656578064, 0.71764707565307617,
0.71764707565307617), (0.86134451627731323, 0.72549021244049072,
0.72549021244049072), (0.86554622650146484, 0.73333334922790527,
0.73333334922790527), (0.86974787712097168, 0.73725491762161255,
0.73725491762161255), (0.87394958734512329, 0.7450980544090271,
0.7450980544090271), (0.87815123796463013, 0.75294119119644165,
0.75294119119644165), (0.88235294818878174, 0.76862746477127075,
0.76862746477127075), (0.88655459880828857, 0.7764706015586853,
0.7764706015586853), (0.89075630903244019, 0.78431373834609985,
0.78431373834609985), (0.89495795965194702, 0.7921568751335144,
0.7921568751335144), (0.89915966987609863, 0.80000001192092896,
0.80000001192092896), (0.90336132049560547, 0.80392158031463623,
0.80392158031463623), (0.90756303071975708, 0.81176471710205078,
0.81176471710205078), (0.91176468133926392, 0.81960785388946533,
0.81960785388946533), (0.91596639156341553, 0.82745099067687988,
0.82745099067687988), (0.92016804218292236, 0.83529412746429443,
0.83529412746429443), (0.92436975240707397, 0.84313726425170898,
0.84313726425170898), (0.92857140302658081, 0.85098040103912354,
0.85098040103912354), (0.93277311325073242, 0.85882353782653809,
0.85882353782653809), (0.93697476387023926, 0.86666667461395264,
0.86666667461395264), (0.94117647409439087, 0.87058824300765991,
0.87058824300765991), (0.94537812471389771, 0.87843137979507446,
0.87843137979507446), (0.94957983493804932, 0.88627451658248901,
0.88627451658248901), (0.95378148555755615, 0.89411765336990356,
0.89411765336990356), (0.95798319578170776, 0.90196079015731812,
0.90196079015731812), (0.9621848464012146, 0.90980392694473267,
0.90980392694473267), (0.96638655662536621, 0.92549020051956177,
0.92549020051956177), (0.97058820724487305, 0.93333333730697632,
0.93333333730697632), (0.97478991746902466, 0.93725490570068359,
0.93725490570068359), (0.97899156808853149, 0.94509804248809814,
0.94509804248809814), (0.98319327831268311, 0.9529411792755127,
0.9529411792755127), (0.98739492893218994, 0.96078431606292725,
0.96078431606292725), (0.99159663915634155, 0.9686274528503418,
0.9686274528503418), (0.99579828977584839, 0.97647058963775635,
0.97647058963775635), (1.0, 0.9843137264251709, 0.9843137264251709)],
'red': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.0078431377187371254,
0.0078431377187371254), (0.012605042196810246, 0.015686275437474251,
0.015686275437474251), (0.016806723549962044, 0.019607843831181526,
0.019607843831181526), (0.021008403971791267, 0.027450980618596077,
0.027450980618596077), (0.025210084393620491, 0.031372550874948502,
0.031372550874948502), (0.029411764815449715, 0.039215687662363052,
0.039215687662363052), (0.033613447099924088, 0.043137256056070328,
0.043137256056070328), (0.037815127521753311, 0.050980392843484879,
0.050980392843484879), (0.042016807943582535, 0.058823529630899429,
0.058823529630899429), (0.046218488365411758, 0.066666670143604279,
0.066666670143604279), (0.050420168787240982, 0.070588238537311554,
0.070588238537311554), (0.054621849209070206, 0.078431375324726105,
0.078431375324726105), (0.058823529630899429, 0.08235294371843338,
0.08235294371843338), (0.063025213778018951, 0.090196080505847931,
0.090196080505847931), (0.067226894199848175, 0.094117648899555206,
0.094117648899555206), (0.071428574621677399, 0.10196078568696976,
0.10196078568696976), (0.075630255043506622, 0.10588235408067703,
0.10588235408067703), (0.079831935465335846, 0.10980392247438431,
0.10980392247438431), (0.08403361588716507, 0.11764705926179886,
0.11764705926179886), (0.088235296308994293, 0.12156862765550613,
0.12156862765550613), (0.092436976730823517, 0.12941177189350128,
0.12941177189350128), (0.09663865715265274, 0.13333334028720856,
0.13333334028720856), (0.10084033757448196, 0.14117647707462311,
0.14117647707462311), (0.10504201799631119, 0.14509804546833038,
0.14509804546833038), (0.10924369841814041, 0.15294118225574493,
0.15294118225574493), (0.11344537883996964, 0.15686275064945221,
0.15686275064945221), (0.11764705926179886, 0.16470588743686676,
0.16470588743686676), (0.12184873968362808, 0.16862745583057404,
0.16862745583057404), (0.1260504275560379, 0.18039216101169586,
0.18039216101169586), (0.13025210797786713, 0.18431372940540314,
0.18431372940540314), (0.13445378839969635, 0.19215686619281769,
0.19215686619281769), (0.13865546882152557, 0.19607843458652496,
0.19607843458652496), (0.1428571492433548, 0.20392157137393951,
0.20392157137393951), (0.14705882966518402, 0.20784313976764679,
0.20784313976764679), (0.15126051008701324, 0.21568627655506134,
0.21568627655506134), (0.15546219050884247, 0.21960784494876862,
0.21960784494876862), (0.15966387093067169, 0.22352941334247589,
0.22352941334247589), (0.16386555135250092, 0.23137255012989044,
0.23137255012989044), (0.16806723177433014, 0.23529411852359772,
0.23529411852359772), (0.17226891219615936, 0.24313725531101227,
0.24313725531101227), (0.17647059261798859, 0.24705882370471954,
0.24705882370471954), (0.18067227303981781, 0.25490197539329529,
0.25490197539329529), (0.18487395346164703, 0.25882354378700256,
0.25882354378700256), (0.18907563388347626, 0.26666668057441711,
0.26666668057441711), (0.19327731430530548, 0.27058824896812439,
0.27058824896812439), (0.1974789947271347, 0.27450981736183167,
0.27450981736183167), (0.20168067514896393, 0.28235295414924622,
0.28235295414924622), (0.20588235557079315, 0.28627452254295349,
0.28627452254295349), (0.21008403599262238, 0.29803922772407532,
0.29803922772407532), (0.2142857164144516, 0.30588236451148987,
0.30588236451148987), (0.21848739683628082, 0.30980393290519714,
0.30980393290519714), (0.22268907725811005, 0.31764706969261169,
0.31764706969261169), (0.22689075767993927, 0.32156863808631897,
0.32156863808631897), (0.23109243810176849, 0.32941177487373352,
0.32941177487373352), (0.23529411852359772, 0.3333333432674408,
0.3333333432674408), (0.23949579894542694, 0.33725491166114807,
0.33725491166114807), (0.24369747936725616, 0.34509804844856262,
0.34509804844856262), (0.24789915978908539, 0.3490196168422699,
0.3490196168422699), (0.25210085511207581, 0.36078432202339172,
0.36078432202339172), (0.25630253553390503, 0.36862745881080627,
0.36862745881080627), (0.26050421595573425, 0.37254902720451355,
0.37254902720451355), (0.26470589637756348, 0.3803921639919281,
0.3803921639919281), (0.2689075767993927, 0.38431373238563538,
0.38431373238563538), (0.27310925722122192, 0.38823530077934265,
0.38823530077934265), (0.27731093764305115, 0.3960784375667572,
0.3960784375667572), (0.28151261806488037, 0.40000000596046448,
0.40000000596046448), (0.28571429848670959, 0.40784314274787903,
0.40784314274787903), (0.28991597890853882, 0.4117647111415863,
0.4117647111415863), (0.29411765933036804, 0.42352941632270813,
0.42352941632270813), (0.29831933975219727, 0.43137255311012268,
0.43137255311012268), (0.30252102017402649, 0.43529412150382996,
0.43529412150382996), (0.30672270059585571, 0.44313725829124451,
0.44313725829124451), (0.31092438101768494, 0.44705882668495178,
0.44705882668495178), (0.31512606143951416, 0.45098039507865906,
0.45098039507865906), (0.31932774186134338, 0.45882353186607361,
0.45882353186607361), (0.32352942228317261, 0.46274510025978088,
0.46274510025978088), (0.32773110270500183, 0.47058823704719543,
0.47058823704719543), (0.33193278312683105, 0.47450980544090271,
0.47450980544090271), (0.33613446354866028, 0.48235294222831726,
0.48235294222831726), (0.3403361439704895, 0.48627451062202454,
0.48627451062202454), (0.34453782439231873, 0.49411764740943909,
0.49411764740943909), (0.34873950481414795, 0.49803921580314636,
0.49803921580314636), (0.35294118523597717, 0.50196081399917603,
0.50196081399917603), (0.3571428656578064, 0.50980395078659058,
0.50980395078659058), (0.36134454607963562, 0.51372551918029785,
0.51372551918029785), (0.36554622650146484, 0.5215686559677124,
0.5215686559677124), (0.36974790692329407, 0.52549022436141968,
0.52549022436141968), (0.37394958734512329, 0.53333336114883423,
0.53333336114883423), (0.37815126776695251, 0.54509806632995605,
0.54509806632995605), (0.38235294818878174, 0.54901963472366333,
0.54901963472366333), (0.38655462861061096, 0.55294120311737061,
0.55294120311737061), (0.39075630903244019, 0.56078433990478516,
0.56078433990478516), (0.39495798945426941, 0.56470590829849243,
0.56470590829849243), (0.39915966987609863, 0.57254904508590698,
0.57254904508590698), (0.40336135029792786, 0.57647061347961426,
0.57647061347961426), (0.40756303071975708, 0.58431375026702881,
0.58431375026702881), (0.4117647111415863, 0.58823531866073608,
0.58823531866073608), (0.41596639156341553, 0.59607845544815063,
0.59607845544815063), (0.42016807198524475, 0.60000002384185791,
0.60000002384185791), (0.42436975240707397, 0.60784316062927246,
0.60784316062927246), (0.4285714328289032, 0.61176472902297974,
0.61176472902297974), (0.43277311325073242, 0.61568629741668701,
0.61568629741668701), (0.43697479367256165, 0.62352943420410156,
0.62352943420410156), (0.44117647409439087, 0.62745100259780884,
0.62745100259780884), (0.44537815451622009, 0.63529413938522339,
0.63529413938522339), (0.44957983493804932, 0.63921570777893066,
0.63921570777893066), (0.45378151535987854, 0.64705884456634521,
0.64705884456634521), (0.45798319578170776, 0.65098041296005249,
0.65098041296005249), (0.46218487620353699, 0.66274511814117432,
0.66274511814117432), (0.46638655662536621, 0.66666668653488159,
0.66666668653488159), (0.47058823704719543, 0.67450982332229614,
0.67450982332229614), (0.47478991746902466, 0.67843139171600342,
0.67843139171600342), (0.47899159789085388, 0.68627452850341797,
0.68627452850341797), (0.48319327831268311, 0.69019609689712524,
0.69019609689712524), (0.48739495873451233, 0.69803923368453979,
0.69803923368453979), (0.49159663915634155, 0.70196080207824707,
0.70196080207824707), (0.49579831957817078, 0.70980393886566162,
0.70980393886566162), (0.5, 0.7137255072593689, 0.7137255072593689),
(0.50420171022415161, 0.72549021244049072, 0.72549021244049072),
(0.50840336084365845, 0.729411780834198, 0.729411780834198),
(0.51260507106781006, 0.73725491762161255, 0.73725491762161255),
(0.51680672168731689, 0.74117648601531982, 0.74117648601531982),
(0.52100843191146851, 0.74901962280273438, 0.74901962280273438),
(0.52521008253097534, 0.75294119119644165, 0.75294119119644165),
(0.52941179275512695, 0.7607843279838562, 0.7607843279838562),
(0.53361344337463379, 0.76470589637756348, 0.76470589637756348),
(0.5378151535987854, 0.77254903316497803, 0.77254903316497803),
(0.54201680421829224, 0.7764706015586853, 0.7764706015586853),
(0.54621851444244385, 0.78823530673980713, 0.78823530673980713),
(0.55042016506195068, 0.7921568751335144, 0.7921568751335144),
(0.55462187528610229, 0.80000001192092896, 0.80000001192092896),
(0.55882352590560913, 0.80392158031463623, 0.80392158031463623),
(0.56302523612976074, 0.81176471710205078, 0.81176471710205078),
(0.56722688674926758, 0.81568628549575806, 0.81568628549575806),
(0.57142859697341919, 0.82352942228317261, 0.82352942228317261),
(0.57563024759292603, 0.82745099067687988, 0.82745099067687988),
(0.57983195781707764, 0.83137255907058716, 0.83137255907058716),
(0.58403360843658447, 0.83921569585800171, 0.83921569585800171),
(0.58823531866073608, 0.84313726425170898, 0.84313726425170898),
(0.59243696928024292, 0.85098040103912354, 0.85098040103912354),
(0.59663867950439453, 0.85490196943283081, 0.85490196943283081),
(0.60084033012390137, 0.86274510622024536, 0.86274510622024536),
(0.60504204034805298, 0.86666667461395264, 0.86666667461395264),
(0.60924369096755981, 0.87450981140136719, 0.87450981140136719),
(0.61344540119171143, 0.87843137979507446, 0.87843137979507446),
(0.61764705181121826, 0.88627451658248901, 0.88627451658248901),
(0.62184876203536987, 0.89019608497619629, 0.89019608497619629),
(0.62605041265487671, 0.89411765336990356, 0.89411765336990356),
(0.63025212287902832, 0.90588235855102539, 0.90588235855102539),
(0.63445377349853516, 0.91372549533843994, 0.91372549533843994),
(0.63865548372268677, 0.91764706373214722, 0.91764706373214722),
(0.6428571343421936, 0.92549020051956177, 0.92549020051956177),
(0.64705884456634521, 0.92941176891326904, 0.92941176891326904),
(0.65126049518585205, 0.93725490570068359, 0.93725490570068359),
(0.65546220541000366, 0.94117647409439087, 0.94117647409439087),
(0.6596638560295105, 0.94509804248809814, 0.94509804248809814),
(0.66386556625366211, 0.9529411792755127, 0.9529411792755127),
(0.66806721687316895, 0.95686274766921997, 0.95686274766921997),
(0.67226892709732056, 0.96470588445663452, 0.96470588445663452),
(0.67647057771682739, 0.9686274528503418, 0.9686274528503418),
(0.680672287940979, 0.97647058963775635, 0.97647058963775635),
(0.68487393856048584, 0.98039215803146362, 0.98039215803146362),
(0.68907564878463745, 0.98823529481887817, 0.98823529481887817),
(0.69327729940414429, 0.99215686321258545, 0.99215686321258545),
(0.6974790096282959, 1.0, 1.0), (0.70168066024780273, 1.0, 1.0),
(0.70588237047195435, 1.0, 1.0), (0.71008402109146118, 1.0, 1.0),
(0.71428573131561279, 1.0, 1.0), (0.71848738193511963, 1.0, 1.0),
(0.72268909215927124, 1.0, 1.0), (0.72689074277877808, 1.0, 1.0),
(0.73109245300292969, 1.0, 1.0), (0.73529410362243652, 1.0, 1.0),
(0.73949581384658813, 1.0, 1.0), (0.74369746446609497, 1.0, 1.0),
(0.74789917469024658, 1.0, 1.0), (0.75210082530975342, 1.0, 1.0),
(0.75630253553390503, 1.0, 1.0), (0.76050418615341187, 1.0, 1.0),
(0.76470589637756348, 1.0, 1.0), (0.76890754699707031, 1.0, 1.0),
(0.77310925722122192, 1.0, 1.0), (0.77731090784072876, 1.0, 1.0),
(0.78151261806488037, 1.0, 1.0), (0.78571426868438721, 1.0, 1.0),
(0.78991597890853882, 1.0, 1.0), (0.79411762952804565, 1.0, 1.0),
(0.79831933975219727, 1.0, 1.0), (0.8025209903717041, 1.0, 1.0),
(0.80672270059585571, 1.0, 1.0), (0.81092435121536255, 1.0, 1.0),
(0.81512606143951416, 1.0, 1.0), (0.819327712059021, 1.0, 1.0),
(0.82352942228317261, 1.0, 1.0), (0.82773107290267944, 1.0, 1.0),
(0.83193278312683105, 1.0, 1.0), (0.83613443374633789, 1.0, 1.0),
(0.8403361439704895, 1.0, 1.0), (0.84453779458999634, 1.0, 1.0),
(0.84873950481414795, 1.0, 1.0), (0.85294115543365479, 1.0, 1.0),
(0.8571428656578064, 1.0, 1.0), (0.86134451627731323, 1.0, 1.0),
(0.86554622650146484, 1.0, 1.0), (0.86974787712097168, 1.0, 1.0),
(0.87394958734512329, 1.0, 1.0), (0.87815123796463013, 1.0, 1.0),
(0.88235294818878174, 1.0, 1.0), (0.88655459880828857, 1.0, 1.0),
(0.89075630903244019, 1.0, 1.0), (0.89495795965194702, 1.0, 1.0),
(0.89915966987609863, 1.0, 1.0), (0.90336132049560547, 1.0, 1.0),
(0.90756303071975708, 1.0, 1.0), (0.91176468133926392, 1.0, 1.0),
(0.91596639156341553, 1.0, 1.0), (0.92016804218292236, 1.0, 1.0),
(0.92436975240707397, 1.0, 1.0), (0.92857140302658081, 1.0, 1.0),
(0.93277311325073242, 1.0, 1.0), (0.93697476387023926, 1.0, 1.0),
(0.94117647409439087, 1.0, 1.0), (0.94537812471389771, 1.0, 1.0),
(0.94957983493804932, 1.0, 1.0), (0.95378148555755615, 1.0, 1.0),
(0.95798319578170776, 1.0, 1.0), (0.9621848464012146, 1.0, 1.0),
(0.96638655662536621, 1.0, 1.0), (0.97058820724487305, 1.0, 1.0),
(0.97478991746902466, 1.0, 1.0), (0.97899156808853149, 1.0, 1.0),
(0.98319327831268311, 1.0, 1.0), (0.98739492893218994, 1.0, 1.0),
(0.99159663915634155, 1.0, 1.0), (0.99579828977584839, 1.0, 1.0), (1.0,
1.0, 1.0)]}
_gist_ncar_data = {'blue': [(0.0, 0.50196081399917603,
0.50196081399917603), (0.0050505050458014011, 0.45098039507865906,
0.45098039507865906), (0.010101010091602802, 0.40392157435417175,
0.40392157435417175), (0.015151515603065491, 0.35686275362968445,
0.35686275362968445), (0.020202020183205605, 0.30980393290519714,
0.30980393290519714), (0.025252524763345718, 0.25882354378700256,
0.25882354378700256), (0.030303031206130981, 0.21176470816135406,
0.21176470816135406), (0.035353533923625946, 0.16470588743686676,
0.16470588743686676), (0.040404040366411209, 0.11764705926179886,
0.11764705926179886), (0.045454546809196472, 0.070588238537311554,
0.070588238537311554), (0.050505049526691437, 0.019607843831181526,
0.019607843831181526), (0.0555555559694767, 0.047058824449777603,
0.047058824449777603), (0.060606062412261963, 0.14509804546833038,
0.14509804546833038), (0.065656565129756927, 0.23921568691730499,
0.23921568691730499), (0.070707067847251892, 0.3333333432674408,
0.3333333432674408), (0.075757578015327454, 0.43137255311012268,
0.43137255311012268), (0.080808080732822418, 0.52549022436141968,
0.52549022436141968), (0.085858583450317383, 0.61960786581039429,
0.61960786581039429), (0.090909093618392944, 0.71764707565307617,
0.71764707565307617), (0.095959596335887909, 0.81176471710205078,
0.81176471710205078), (0.10101009905338287, 0.90588235855102539,
0.90588235855102539), (0.10606060922145844, 1.0, 1.0),
(0.1111111119389534, 1.0, 1.0), (0.11616161465644836, 1.0, 1.0),
(0.12121212482452393, 1.0, 1.0), (0.12626262009143829, 1.0, 1.0),
(0.13131313025951385, 1.0, 1.0), (0.13636364042758942, 1.0, 1.0),
(0.14141413569450378, 1.0, 1.0), (0.14646464586257935, 1.0, 1.0),
(0.15151515603065491, 1.0, 1.0), (0.15656565129756927, 1.0, 1.0),
(0.16161616146564484, 1.0, 1.0), (0.1666666716337204, 1.0, 1.0),
(0.17171716690063477, 1.0, 1.0), (0.17676767706871033, 1.0, 1.0),
(0.18181818723678589, 1.0, 1.0), (0.18686868250370026, 1.0, 1.0),
(0.19191919267177582, 1.0, 1.0), (0.19696970283985138, 1.0, 1.0),
(0.20202019810676575, 1.0, 1.0), (0.20707070827484131, 1.0, 1.0),
(0.21212121844291687, 0.99215686321258545, 0.99215686321258545),
(0.21717171370983124, 0.95686274766921997, 0.95686274766921997),
(0.2222222238779068, 0.91764706373214722, 0.91764706373214722),
(0.22727273404598236, 0.88235294818878174, 0.88235294818878174),
(0.23232322931289673, 0.84313726425170898, 0.84313726425170898),
(0.23737373948097229, 0.80392158031463623, 0.80392158031463623),
(0.24242424964904785, 0.76862746477127075, 0.76862746477127075),
(0.24747474491596222, 0.729411780834198, 0.729411780834198),
(0.25252524018287659, 0.69019609689712524, 0.69019609689712524),
(0.25757575035095215, 0.65490198135375977, 0.65490198135375977),
(0.26262626051902771, 0.61568629741668701, 0.61568629741668701),
(0.26767677068710327, 0.56470590829849243, 0.56470590829849243),
(0.27272728085517883, 0.50980395078659058, 0.50980395078659058),
(0.27777779102325439, 0.45098039507865906, 0.45098039507865906),
(0.28282827138900757, 0.39215686917304993, 0.39215686917304993),
(0.28787878155708313, 0.3333333432674408, 0.3333333432674408),
(0.29292929172515869, 0.27843138575553894, 0.27843138575553894),
(0.29797980189323425, 0.21960784494876862, 0.21960784494876862),
(0.30303031206130981, 0.16078431904315948, 0.16078431904315948),
(0.30808082222938538, 0.10588235408067703, 0.10588235408067703),
(0.31313130259513855, 0.047058824449777603, 0.047058824449777603),
(0.31818181276321411, 0.0, 0.0), (0.32323232293128967, 0.0, 0.0),
(0.32828283309936523, 0.0, 0.0), (0.3333333432674408, 0.0, 0.0),
(0.33838382363319397, 0.0, 0.0), (0.34343433380126953, 0.0, 0.0),
(0.34848484396934509, 0.0, 0.0), (0.35353535413742065, 0.0, 0.0),
(0.35858586430549622, 0.0, 0.0), (0.36363637447357178, 0.0, 0.0),
(0.36868685483932495, 0.0, 0.0), (0.37373736500740051, 0.0, 0.0),
(0.37878787517547607, 0.0, 0.0), (0.38383838534355164, 0.0, 0.0),
(0.3888888955116272, 0.0, 0.0), (0.39393940567970276, 0.0, 0.0),
(0.39898988604545593, 0.0, 0.0), (0.40404039621353149, 0.0, 0.0),
(0.40909090638160706, 0.0, 0.0), (0.41414141654968262, 0.0, 0.0),
(0.41919192671775818, 0.0, 0.0), (0.42424243688583374,
0.0039215688593685627, 0.0039215688593685627), (0.42929291725158691,
0.027450980618596077, 0.027450980618596077), (0.43434342741966248,
0.050980392843484879, 0.050980392843484879), (0.43939393758773804,
0.074509806931018829, 0.074509806931018829), (0.4444444477558136,
0.094117648899555206, 0.094117648899555206), (0.44949495792388916,
0.11764705926179886, 0.11764705926179886), (0.45454546809196472,
0.14117647707462311, 0.14117647707462311), (0.4595959484577179,
0.16470588743686676, 0.16470588743686676), (0.46464645862579346,
0.18823529779911041, 0.18823529779911041), (0.46969696879386902,
0.21176470816135406, 0.21176470816135406), (0.47474747896194458,
0.23529411852359772, 0.23529411852359772), (0.47979798913002014,
0.22352941334247589, 0.22352941334247589), (0.4848484992980957,
0.20000000298023224, 0.20000000298023224), (0.48989897966384888,
0.17647059261798859, 0.17647059261798859), (0.49494948983192444,
0.15294118225574493, 0.15294118225574493), (0.5, 0.12941177189350128,
0.12941177189350128), (0.50505048036575317, 0.10980392247438431,
0.10980392247438431), (0.51010102033615112, 0.086274512112140656,
0.086274512112140656), (0.5151515007019043, 0.062745101749897003,
0.062745101749897003), (0.52020204067230225, 0.039215687662363052,
0.039215687662363052), (0.52525252103805542, 0.015686275437474251,
0.015686275437474251), (0.53030300140380859, 0.0, 0.0),
(0.53535354137420654, 0.0, 0.0), (0.54040402173995972, 0.0, 0.0),
(0.54545456171035767, 0.0, 0.0), (0.55050504207611084, 0.0, 0.0),
(0.55555558204650879, 0.0, 0.0), (0.56060606241226196, 0.0, 0.0),
(0.56565654277801514, 0.0, 0.0), (0.57070708274841309, 0.0, 0.0),
(0.57575756311416626, 0.0, 0.0), (0.58080810308456421, 0.0, 0.0),
(0.58585858345031738, 0.0039215688593685627, 0.0039215688593685627),
(0.59090906381607056, 0.0078431377187371254, 0.0078431377187371254),
(0.59595960378646851, 0.011764706112444401, 0.011764706112444401),
(0.60101008415222168, 0.019607843831181526, 0.019607843831181526),
(0.60606062412261963, 0.023529412224888802, 0.023529412224888802),
(0.6111111044883728, 0.031372550874948502, 0.031372550874948502),
(0.61616164445877075, 0.035294119268655777, 0.035294119268655777),
(0.62121212482452393, 0.043137256056070328, 0.043137256056070328),
(0.6262626051902771, 0.047058824449777603, 0.047058824449777603),
(0.63131314516067505, 0.054901961237192154, 0.054901961237192154),
(0.63636362552642822, 0.054901961237192154, 0.054901961237192154),
(0.64141416549682617, 0.050980392843484879, 0.050980392843484879),
(0.64646464586257935, 0.043137256056070328, 0.043137256056070328),
(0.65151512622833252, 0.039215687662363052, 0.039215687662363052),
(0.65656566619873047, 0.031372550874948502, 0.031372550874948502),
(0.66161614656448364, 0.027450980618596077, 0.027450980618596077),
(0.66666668653488159, 0.019607843831181526, 0.019607843831181526),
(0.67171716690063477, 0.015686275437474251, 0.015686275437474251),
(0.67676764726638794, 0.011764706112444401, 0.011764706112444401),
(0.68181818723678589, 0.0039215688593685627, 0.0039215688593685627),
(0.68686866760253906, 0.0, 0.0), (0.69191920757293701, 0.0, 0.0),
(0.69696968793869019, 0.0, 0.0), (0.70202022790908813, 0.0, 0.0),
(0.70707070827484131, 0.0, 0.0), (0.71212118864059448, 0.0, 0.0),
(0.71717172861099243, 0.0, 0.0), (0.72222220897674561, 0.0, 0.0),
(0.72727274894714355, 0.0, 0.0), (0.73232322931289673, 0.0, 0.0),
(0.7373737096786499, 0.0, 0.0), (0.74242424964904785,
0.031372550874948502, 0.031372550874948502), (0.74747473001480103,
0.12941177189350128, 0.12941177189350128), (0.75252526998519897,
0.22352941334247589, 0.22352941334247589), (0.75757575035095215,
0.32156863808631897, 0.32156863808631897), (0.7626262903213501,
0.41568627953529358, 0.41568627953529358), (0.76767677068710327,
0.50980395078659058, 0.50980395078659058), (0.77272725105285645,
0.60784316062927246, 0.60784316062927246), (0.77777779102325439,
0.70196080207824707, 0.70196080207824707), (0.78282827138900757,
0.79607844352722168, 0.79607844352722168), (0.78787881135940552,
0.89411765336990356, 0.89411765336990356), (0.79292929172515869,
0.98823529481887817, 0.98823529481887817), (0.79797977209091187, 1.0,
1.0), (0.80303031206130981, 1.0, 1.0), (0.80808079242706299, 1.0, 1.0),
(0.81313133239746094, 1.0, 1.0), (0.81818181276321411, 1.0, 1.0),
(0.82323235273361206, 1.0, 1.0), (0.82828283309936523, 1.0, 1.0),
(0.83333331346511841, 1.0, 1.0), (0.83838385343551636, 1.0, 1.0),
(0.84343433380126953, 1.0, 1.0), (0.84848487377166748,
0.99607843160629272, 0.99607843160629272), (0.85353535413742065,
0.98823529481887817, 0.98823529481887817), (0.85858583450317383,
0.9843137264251709, 0.9843137264251709), (0.86363637447357178,
0.97647058963775635, 0.97647058963775635), (0.86868685483932495,
0.9686274528503418, 0.9686274528503418), (0.8737373948097229,
0.96470588445663452, 0.96470588445663452), (0.87878787517547607,
0.95686274766921997, 0.95686274766921997), (0.88383835554122925,
0.94901961088180542, 0.94901961088180542), (0.8888888955116272,
0.94509804248809814, 0.94509804248809814), (0.89393937587738037,
0.93725490570068359, 0.93725490570068359), (0.89898991584777832,
0.93333333730697632, 0.93333333730697632), (0.90404039621353149,
0.93333333730697632, 0.93333333730697632), (0.90909093618392944,
0.93725490570068359, 0.93725490570068359), (0.91414141654968262,
0.93725490570068359, 0.93725490570068359), (0.91919189691543579,
0.94117647409439087, 0.94117647409439087), (0.92424243688583374,
0.94509804248809814, 0.94509804248809814), (0.92929291725158691,
0.94509804248809814, 0.94509804248809814), (0.93434345722198486,
0.94901961088180542, 0.94901961088180542), (0.93939393758773804,
0.9529411792755127, 0.9529411792755127), (0.94444441795349121,
0.9529411792755127, 0.9529411792755127), (0.94949495792388916,
0.95686274766921997, 0.95686274766921997), (0.95454543828964233,
0.96078431606292725, 0.96078431606292725), (0.95959597826004028,
0.96470588445663452, 0.96470588445663452), (0.96464645862579346,
0.9686274528503418, 0.9686274528503418), (0.96969699859619141,
0.97254902124404907, 0.97254902124404907), (0.97474747896194458,
0.97647058963775635, 0.97647058963775635), (0.97979795932769775,
0.98039215803146362, 0.98039215803146362), (0.9848484992980957,
0.9843137264251709, 0.9843137264251709), (0.98989897966384888,
0.98823529481887817, 0.98823529481887817), (0.99494951963424683,
0.99215686321258545, 0.99215686321258545), (1.0, 0.99607843160629272,
0.99607843160629272)], 'green': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.035294119268655777, 0.035294119268655777), (0.010101010091602802,
0.074509806931018829, 0.074509806931018829), (0.015151515603065491,
0.10980392247438431, 0.10980392247438431), (0.020202020183205605,
0.14901961386203766, 0.14901961386203766), (0.025252524763345718,
0.18431372940540314, 0.18431372940540314), (0.030303031206130981,
0.22352941334247589, 0.22352941334247589), (0.035353533923625946,
0.25882354378700256, 0.25882354378700256), (0.040404040366411209,
0.29803922772407532, 0.29803922772407532), (0.045454546809196472,
0.3333333432674408, 0.3333333432674408), (0.050505049526691437,
0.37254902720451355, 0.37254902720451355), (0.0555555559694767,
0.36862745881080627, 0.36862745881080627), (0.060606062412261963,
0.3333333432674408, 0.3333333432674408), (0.065656565129756927,
0.29411765933036804, 0.29411765933036804), (0.070707067847251892,
0.25882354378700256, 0.25882354378700256), (0.075757578015327454,
0.21960784494876862, 0.21960784494876862), (0.080808080732822418,
0.18431372940540314, 0.18431372940540314), (0.085858583450317383,
0.14509804546833038, 0.14509804546833038), (0.090909093618392944,
0.10980392247438431, 0.10980392247438431), (0.095959596335887909,
0.070588238537311554, 0.070588238537311554), (0.10101009905338287,
0.035294119268655777, 0.035294119268655777), (0.10606060922145844, 0.0,
0.0), (0.1111111119389534, 0.074509806931018829, 0.074509806931018829),
(0.11616161465644836, 0.14509804546833038, 0.14509804546833038),
(0.12121212482452393, 0.21568627655506134, 0.21568627655506134),
(0.12626262009143829, 0.28627452254295349, 0.28627452254295349),
(0.13131313025951385, 0.36078432202339172, 0.36078432202339172),
(0.13636364042758942, 0.43137255311012268, 0.43137255311012268),
(0.14141413569450378, 0.50196081399917603, 0.50196081399917603),
(0.14646464586257935, 0.57254904508590698, 0.57254904508590698),
(0.15151515603065491, 0.64705884456634521, 0.64705884456634521),
(0.15656565129756927, 0.71764707565307617, 0.71764707565307617),
(0.16161616146564484, 0.7607843279838562, 0.7607843279838562),
(0.1666666716337204, 0.78431373834609985, 0.78431373834609985),
(0.17171716690063477, 0.80784314870834351, 0.80784314870834351),
(0.17676767706871033, 0.83137255907058716, 0.83137255907058716),
(0.18181818723678589, 0.85490196943283081, 0.85490196943283081),
(0.18686868250370026, 0.88235294818878174, 0.88235294818878174),
(0.19191919267177582, 0.90588235855102539, 0.90588235855102539),
(0.19696970283985138, 0.92941176891326904, 0.92941176891326904),
(0.20202019810676575, 0.9529411792755127, 0.9529411792755127),
(0.20707070827484131, 0.97647058963775635, 0.97647058963775635),
(0.21212121844291687, 0.99607843160629272, 0.99607843160629272),
(0.21717171370983124, 0.99607843160629272, 0.99607843160629272),
(0.2222222238779068, 0.99215686321258545, 0.99215686321258545),
(0.22727273404598236, 0.99215686321258545, 0.99215686321258545),
(0.23232322931289673, 0.99215686321258545, 0.99215686321258545),
(0.23737373948097229, 0.98823529481887817, 0.98823529481887817),
(0.24242424964904785, 0.98823529481887817, 0.98823529481887817),
(0.24747474491596222, 0.9843137264251709, 0.9843137264251709),
(0.25252524018287659, 0.9843137264251709, 0.9843137264251709),
(0.25757575035095215, 0.98039215803146362, 0.98039215803146362),
(0.26262626051902771, 0.98039215803146362, 0.98039215803146362),
(0.26767677068710327, 0.98039215803146362, 0.98039215803146362),
(0.27272728085517883, 0.98039215803146362, 0.98039215803146362),
(0.27777779102325439, 0.9843137264251709, 0.9843137264251709),
(0.28282827138900757, 0.9843137264251709, 0.9843137264251709),
(0.28787878155708313, 0.98823529481887817, 0.98823529481887817),
(0.29292929172515869, 0.98823529481887817, 0.98823529481887817),
(0.29797980189323425, 0.99215686321258545, 0.99215686321258545),
(0.30303031206130981, 0.99215686321258545, 0.99215686321258545),
(0.30808082222938538, 0.99607843160629272, 0.99607843160629272),
(0.31313130259513855, 0.99607843160629272, 0.99607843160629272),
(0.31818181276321411, 0.99607843160629272, 0.99607843160629272),
(0.32323232293128967, 0.97647058963775635, 0.97647058963775635),
(0.32828283309936523, 0.95686274766921997, 0.95686274766921997),
(0.3333333432674408, 0.93725490570068359, 0.93725490570068359),
(0.33838382363319397, 0.92156863212585449, 0.92156863212585449),
(0.34343433380126953, 0.90196079015731812, 0.90196079015731812),
(0.34848484396934509, 0.88235294818878174, 0.88235294818878174),
(0.35353535413742065, 0.86274510622024536, 0.86274510622024536),
(0.35858586430549622, 0.84705883264541626, 0.84705883264541626),
(0.36363637447357178, 0.82745099067687988, 0.82745099067687988),
(0.36868685483932495, 0.80784314870834351, 0.80784314870834351),
(0.37373736500740051, 0.81568628549575806, 0.81568628549575806),
(0.37878787517547607, 0.83529412746429443, 0.83529412746429443),
(0.38383838534355164, 0.85098040103912354, 0.85098040103912354),
(0.3888888955116272, 0.87058824300765991, 0.87058824300765991),
(0.39393940567970276, 0.89019608497619629, 0.89019608497619629),
(0.39898988604545593, 0.90980392694473267, 0.90980392694473267),
(0.40404039621353149, 0.92549020051956177, 0.92549020051956177),
(0.40909090638160706, 0.94509804248809814, 0.94509804248809814),
(0.41414141654968262, 0.96470588445663452, 0.96470588445663452),
(0.41919192671775818, 0.9843137264251709, 0.9843137264251709),
(0.42424243688583374, 1.0, 1.0), (0.42929291725158691, 1.0, 1.0),
(0.43434342741966248, 1.0, 1.0), (0.43939393758773804, 1.0, 1.0),
(0.4444444477558136, 1.0, 1.0), (0.44949495792388916, 1.0, 1.0),
(0.45454546809196472, 1.0, 1.0), (0.4595959484577179, 1.0, 1.0),
(0.46464645862579346, 1.0, 1.0), (0.46969696879386902, 1.0, 1.0),
(0.47474747896194458, 1.0, 1.0), (0.47979798913002014, 1.0, 1.0),
(0.4848484992980957, 1.0, 1.0), (0.48989897966384888, 1.0, 1.0),
(0.49494948983192444, 1.0, 1.0), (0.5, 1.0, 1.0), (0.50505048036575317,
1.0, 1.0), (0.51010102033615112, 1.0, 1.0), (0.5151515007019043, 1.0,
1.0), (0.52020204067230225, 1.0, 1.0), (0.52525252103805542, 1.0, 1.0),
(0.53030300140380859, 0.99215686321258545, 0.99215686321258545),
(0.53535354137420654, 0.98039215803146362, 0.98039215803146362),
(0.54040402173995972, 0.96470588445663452, 0.96470588445663452),
(0.54545456171035767, 0.94901961088180542, 0.94901961088180542),
(0.55050504207611084, 0.93333333730697632, 0.93333333730697632),
(0.55555558204650879, 0.91764706373214722, 0.91764706373214722),
(0.56060606241226196, 0.90588235855102539, 0.90588235855102539),
(0.56565654277801514, 0.89019608497619629, 0.89019608497619629),
(0.57070708274841309, 0.87450981140136719, 0.87450981140136719),
(0.57575756311416626, 0.85882353782653809, 0.85882353782653809),
(0.58080810308456421, 0.84313726425170898, 0.84313726425170898),
(0.58585858345031738, 0.83137255907058716, 0.83137255907058716),
(0.59090906381607056, 0.81960785388946533, 0.81960785388946533),
(0.59595960378646851, 0.81176471710205078, 0.81176471710205078),
(0.60101008415222168, 0.80000001192092896, 0.80000001192092896),
(0.60606062412261963, 0.78823530673980713, 0.78823530673980713),
(0.6111111044883728, 0.7764706015586853, 0.7764706015586853),
(0.61616164445877075, 0.76470589637756348, 0.76470589637756348),
(0.62121212482452393, 0.75294119119644165, 0.75294119119644165),
(0.6262626051902771, 0.74117648601531982, 0.74117648601531982),
(0.63131314516067505, 0.729411780834198, 0.729411780834198),
(0.63636362552642822, 0.70980393886566162, 0.70980393886566162),
(0.64141416549682617, 0.66666668653488159, 0.66666668653488159),
(0.64646464586257935, 0.62352943420410156, 0.62352943420410156),
(0.65151512622833252, 0.58039218187332153, 0.58039218187332153),
(0.65656566619873047, 0.5372549295425415, 0.5372549295425415),
(0.66161614656448364, 0.49411764740943909, 0.49411764740943909),
(0.66666668653488159, 0.45098039507865906, 0.45098039507865906),
(0.67171716690063477, 0.40392157435417175, 0.40392157435417175),
(0.67676764726638794, 0.36078432202339172, 0.36078432202339172),
(0.68181818723678589, 0.31764706969261169, 0.31764706969261169),
(0.68686866760253906, 0.27450981736183167, 0.27450981736183167),
(0.69191920757293701, 0.24705882370471954, 0.24705882370471954),
(0.69696968793869019, 0.21960784494876862, 0.21960784494876862),
(0.70202022790908813, 0.19607843458652496, 0.19607843458652496),
(0.70707070827484131, 0.16862745583057404, 0.16862745583057404),
(0.71212118864059448, 0.14509804546833038, 0.14509804546833038),
(0.71717172861099243, 0.11764705926179886, 0.11764705926179886),
(0.72222220897674561, 0.090196080505847931, 0.090196080505847931),
(0.72727274894714355, 0.066666670143604279, 0.066666670143604279),
(0.73232322931289673, 0.039215687662363052, 0.039215687662363052),
(0.7373737096786499, 0.015686275437474251, 0.015686275437474251),
(0.74242424964904785, 0.0, 0.0), (0.74747473001480103, 0.0, 0.0),
(0.75252526998519897, 0.0, 0.0), (0.75757575035095215, 0.0, 0.0),
(0.7626262903213501, 0.0, 0.0), (0.76767677068710327, 0.0, 0.0),
(0.77272725105285645, 0.0, 0.0), (0.77777779102325439, 0.0, 0.0),
(0.78282827138900757, 0.0, 0.0), (0.78787881135940552, 0.0, 0.0),
(0.79292929172515869, 0.0, 0.0), (0.79797977209091187,
0.015686275437474251, 0.015686275437474251), (0.80303031206130981,
0.031372550874948502, 0.031372550874948502), (0.80808079242706299,
0.050980392843484879, 0.050980392843484879), (0.81313133239746094,
0.066666670143604279, 0.066666670143604279), (0.81818181276321411,
0.086274512112140656, 0.086274512112140656), (0.82323235273361206,
0.10588235408067703, 0.10588235408067703), (0.82828283309936523,
0.12156862765550613, 0.12156862765550613), (0.83333331346511841,
0.14117647707462311, 0.14117647707462311), (0.83838385343551636,
0.15686275064945221, 0.15686275064945221), (0.84343433380126953,
0.17647059261798859, 0.17647059261798859), (0.84848487377166748,
0.20000000298023224, 0.20000000298023224), (0.85353535413742065,
0.23137255012989044, 0.23137255012989044), (0.85858583450317383,
0.25882354378700256, 0.25882354378700256), (0.86363637447357178,
0.29019609093666077, 0.29019609093666077), (0.86868685483932495,
0.32156863808631897, 0.32156863808631897), (0.8737373948097229,
0.35294118523597717, 0.35294118523597717), (0.87878787517547607,
0.38431373238563538, 0.38431373238563538), (0.88383835554122925,
0.41568627953529358, 0.41568627953529358), (0.8888888955116272,
0.44313725829124451, 0.44313725829124451), (0.89393937587738037,
0.47450980544090271, 0.47450980544090271), (0.89898991584777832,
0.5058823823928833, 0.5058823823928833), (0.90404039621353149,
0.52941179275512695, 0.52941179275512695), (0.90909093618392944,
0.55294120311737061, 0.55294120311737061), (0.91414141654968262,
0.57254904508590698, 0.57254904508590698), (0.91919189691543579,
0.59607845544815063, 0.59607845544815063), (0.92424243688583374,
0.61960786581039429, 0.61960786581039429), (0.92929291725158691,
0.64313727617263794, 0.64313727617263794), (0.93434345722198486,
0.66274511814117432, 0.66274511814117432), (0.93939393758773804,
0.68627452850341797, 0.68627452850341797), (0.94444441795349121,
0.70980393886566162, 0.70980393886566162), (0.94949495792388916,
0.729411780834198, 0.729411780834198), (0.95454543828964233,
0.75294119119644165, 0.75294119119644165), (0.95959597826004028,
0.78039216995239258, 0.78039216995239258), (0.96464645862579346,
0.80392158031463623, 0.80392158031463623), (0.96969699859619141,
0.82745099067687988, 0.82745099067687988), (0.97474747896194458,
0.85098040103912354, 0.85098040103912354), (0.97979795932769775,
0.87450981140136719, 0.87450981140136719), (0.9848484992980957,
0.90196079015731812, 0.90196079015731812), (0.98989897966384888,
0.92549020051956177, 0.92549020051956177), (0.99494951963424683,
0.94901961088180542, 0.94901961088180542), (1.0, 0.97254902124404907,
0.97254902124404907)], 'red': [(0.0, 0.0, 0.0), (0.0050505050458014011,
0.0, 0.0), (0.010101010091602802, 0.0, 0.0), (0.015151515603065491, 0.0,
0.0), (0.020202020183205605, 0.0, 0.0), (0.025252524763345718, 0.0, 0.0),
(0.030303031206130981, 0.0, 0.0), (0.035353533923625946, 0.0, 0.0),
(0.040404040366411209, 0.0, 0.0), (0.045454546809196472, 0.0, 0.0),
(0.050505049526691437, 0.0, 0.0), (0.0555555559694767, 0.0, 0.0),
(0.060606062412261963, 0.0, 0.0), (0.065656565129756927, 0.0, 0.0),
(0.070707067847251892, 0.0, 0.0), (0.075757578015327454, 0.0, 0.0),
(0.080808080732822418, 0.0, 0.0), (0.085858583450317383, 0.0, 0.0),
(0.090909093618392944, 0.0, 0.0), (0.095959596335887909, 0.0, 0.0),
(0.10101009905338287, 0.0, 0.0), (0.10606060922145844, 0.0, 0.0),
(0.1111111119389534, 0.0, 0.0), (0.11616161465644836, 0.0, 0.0),
(0.12121212482452393, 0.0, 0.0), (0.12626262009143829, 0.0, 0.0),
(0.13131313025951385, 0.0, 0.0), (0.13636364042758942, 0.0, 0.0),
(0.14141413569450378, 0.0, 0.0), (0.14646464586257935, 0.0, 0.0),
(0.15151515603065491, 0.0, 0.0), (0.15656565129756927, 0.0, 0.0),
(0.16161616146564484, 0.0, 0.0), (0.1666666716337204, 0.0, 0.0),
(0.17171716690063477, 0.0, 0.0), (0.17676767706871033, 0.0, 0.0),
(0.18181818723678589, 0.0, 0.0), (0.18686868250370026, 0.0, 0.0),
(0.19191919267177582, 0.0, 0.0), (0.19696970283985138, 0.0, 0.0),
(0.20202019810676575, 0.0, 0.0), (0.20707070827484131, 0.0, 0.0),
(0.21212121844291687, 0.0, 0.0), (0.21717171370983124, 0.0, 0.0),
(0.2222222238779068, 0.0, 0.0), (0.22727273404598236, 0.0, 0.0),
(0.23232322931289673, 0.0, 0.0), (0.23737373948097229, 0.0, 0.0),
(0.24242424964904785, 0.0, 0.0), (0.24747474491596222, 0.0, 0.0),
(0.25252524018287659, 0.0, 0.0), (0.25757575035095215, 0.0, 0.0),
(0.26262626051902771, 0.0, 0.0), (0.26767677068710327, 0.0, 0.0),
(0.27272728085517883, 0.0, 0.0), (0.27777779102325439, 0.0, 0.0),
(0.28282827138900757, 0.0, 0.0), (0.28787878155708313, 0.0, 0.0),
(0.29292929172515869, 0.0, 0.0), (0.29797980189323425, 0.0, 0.0),
(0.30303031206130981, 0.0, 0.0), (0.30808082222938538, 0.0, 0.0),
(0.31313130259513855, 0.0, 0.0), (0.31818181276321411,
0.0039215688593685627, 0.0039215688593685627), (0.32323232293128967,
0.043137256056070328, 0.043137256056070328), (0.32828283309936523,
0.08235294371843338, 0.08235294371843338), (0.3333333432674408,
0.11764705926179886, 0.11764705926179886), (0.33838382363319397,
0.15686275064945221, 0.15686275064945221), (0.34343433380126953,
0.19607843458652496, 0.19607843458652496), (0.34848484396934509,
0.23137255012989044, 0.23137255012989044), (0.35353535413742065,
0.27058824896812439, 0.27058824896812439), (0.35858586430549622,
0.30980393290519714, 0.30980393290519714), (0.36363637447357178,
0.3490196168422699, 0.3490196168422699), (0.36868685483932495,
0.38431373238563538, 0.38431373238563538), (0.37373736500740051,
0.40392157435417175, 0.40392157435417175), (0.37878787517547607,
0.41568627953529358, 0.41568627953529358), (0.38383838534355164,
0.42352941632270813, 0.42352941632270813), (0.3888888955116272,
0.43137255311012268, 0.43137255311012268), (0.39393940567970276,
0.44313725829124451, 0.44313725829124451), (0.39898988604545593,
0.45098039507865906, 0.45098039507865906), (0.40404039621353149,
0.45882353186607361, 0.45882353186607361), (0.40909090638160706,
0.47058823704719543, 0.47058823704719543), (0.41414141654968262,
0.47843137383460999, 0.47843137383460999), (0.41919192671775818,
0.49019607901573181, 0.49019607901573181), (0.42424243688583374,
0.50196081399917603, 0.50196081399917603), (0.42929291725158691,
0.52549022436141968, 0.52549022436141968), (0.43434342741966248,
0.54901963472366333, 0.54901963472366333), (0.43939393758773804,
0.57254904508590698, 0.57254904508590698), (0.4444444477558136,
0.60000002384185791, 0.60000002384185791), (0.44949495792388916,
0.62352943420410156, 0.62352943420410156), (0.45454546809196472,
0.64705884456634521, 0.64705884456634521), (0.4595959484577179,
0.67058825492858887, 0.67058825492858887), (0.46464645862579346,
0.69411766529083252, 0.69411766529083252), (0.46969696879386902,
0.72156864404678345, 0.72156864404678345), (0.47474747896194458,
0.7450980544090271, 0.7450980544090271), (0.47979798913002014,
0.76862746477127075, 0.76862746477127075), (0.4848484992980957,
0.7921568751335144, 0.7921568751335144), (0.48989897966384888,
0.81568628549575806, 0.81568628549575806), (0.49494948983192444,
0.83921569585800171, 0.83921569585800171), (0.5, 0.86274510622024536,
0.86274510622024536), (0.50505048036575317, 0.88627451658248901,
0.88627451658248901), (0.51010102033615112, 0.90980392694473267,
0.90980392694473267), (0.5151515007019043, 0.93333333730697632,
0.93333333730697632), (0.52020204067230225, 0.95686274766921997,
0.95686274766921997), (0.52525252103805542, 0.98039215803146362,
0.98039215803146362), (0.53030300140380859, 1.0, 1.0),
(0.53535354137420654, 1.0, 1.0), (0.54040402173995972, 1.0, 1.0),
(0.54545456171035767, 1.0, 1.0), (0.55050504207611084, 1.0, 1.0),
(0.55555558204650879, 1.0, 1.0), (0.56060606241226196, 1.0, 1.0),
(0.56565654277801514, 1.0, 1.0), (0.57070708274841309, 1.0, 1.0),
(0.57575756311416626, 1.0, 1.0), (0.58080810308456421, 1.0, 1.0),
(0.58585858345031738, 1.0, 1.0), (0.59090906381607056, 1.0, 1.0),
(0.59595960378646851, 1.0, 1.0), (0.60101008415222168, 1.0, 1.0),
(0.60606062412261963, 1.0, 1.0), (0.6111111044883728, 1.0, 1.0),
(0.61616164445877075, 1.0, 1.0), (0.62121212482452393, 1.0, 1.0),
(0.6262626051902771, 1.0, 1.0), (0.63131314516067505, 1.0, 1.0),
(0.63636362552642822, 1.0, 1.0), (0.64141416549682617, 1.0, 1.0),
(0.64646464586257935, 1.0, 1.0), (0.65151512622833252, 1.0, 1.0),
(0.65656566619873047, 1.0, 1.0), (0.66161614656448364, 1.0, 1.0),
(0.66666668653488159, 1.0, 1.0), (0.67171716690063477, 1.0, 1.0),
(0.67676764726638794, 1.0, 1.0), (0.68181818723678589, 1.0, 1.0),
(0.68686866760253906, 1.0, 1.0), (0.69191920757293701, 1.0, 1.0),
(0.69696968793869019, 1.0, 1.0), (0.70202022790908813, 1.0, 1.0),
(0.70707070827484131, 1.0, 1.0), (0.71212118864059448, 1.0, 1.0),
(0.71717172861099243, 1.0, 1.0), (0.72222220897674561, 1.0, 1.0),
(0.72727274894714355, 1.0, 1.0), (0.73232322931289673, 1.0, 1.0),
(0.7373737096786499, 1.0, 1.0), (0.74242424964904785, 1.0, 1.0),
(0.74747473001480103, 1.0, 1.0), (0.75252526998519897, 1.0, 1.0),
(0.75757575035095215, 1.0, 1.0), (0.7626262903213501, 1.0, 1.0),
(0.76767677068710327, 1.0, 1.0), (0.77272725105285645, 1.0, 1.0),
(0.77777779102325439, 1.0, 1.0), (0.78282827138900757, 1.0, 1.0),
(0.78787881135940552, 1.0, 1.0), (0.79292929172515869, 1.0, 1.0),
(0.79797977209091187, 0.96470588445663452, 0.96470588445663452),
(0.80303031206130981, 0.92549020051956177, 0.92549020051956177),
(0.80808079242706299, 0.89019608497619629, 0.89019608497619629),
(0.81313133239746094, 0.85098040103912354, 0.85098040103912354),
(0.81818181276321411, 0.81568628549575806, 0.81568628549575806),
(0.82323235273361206, 0.7764706015586853, 0.7764706015586853),
(0.82828283309936523, 0.74117648601531982, 0.74117648601531982),
(0.83333331346511841, 0.70196080207824707, 0.70196080207824707),
(0.83838385343551636, 0.66666668653488159, 0.66666668653488159),
(0.84343433380126953, 0.62745100259780884, 0.62745100259780884),
(0.84848487377166748, 0.61960786581039429, 0.61960786581039429),
(0.85353535413742065, 0.65098041296005249, 0.65098041296005249),
(0.85858583450317383, 0.68235296010971069, 0.68235296010971069),
(0.86363637447357178, 0.7137255072593689, 0.7137255072593689),
(0.86868685483932495, 0.7450980544090271, 0.7450980544090271),
(0.8737373948097229, 0.77254903316497803, 0.77254903316497803),
(0.87878787517547607, 0.80392158031463623, 0.80392158031463623),
(0.88383835554122925, 0.83529412746429443, 0.83529412746429443),
(0.8888888955116272, 0.86666667461395264, 0.86666667461395264),
(0.89393937587738037, 0.89803922176361084, 0.89803922176361084),
(0.89898991584777832, 0.92941176891326904, 0.92941176891326904),
(0.90404039621353149, 0.93333333730697632, 0.93333333730697632),
(0.90909093618392944, 0.93725490570068359, 0.93725490570068359),
(0.91414141654968262, 0.93725490570068359, 0.93725490570068359),
(0.91919189691543579, 0.94117647409439087, 0.94117647409439087),
(0.92424243688583374, 0.94509804248809814, 0.94509804248809814),
(0.92929291725158691, 0.94509804248809814, 0.94509804248809814),
(0.93434345722198486, 0.94901961088180542, 0.94901961088180542),
(0.93939393758773804, 0.9529411792755127, 0.9529411792755127),
(0.94444441795349121, 0.9529411792755127, 0.9529411792755127),
(0.94949495792388916, 0.95686274766921997, 0.95686274766921997),
(0.95454543828964233, 0.96078431606292725, 0.96078431606292725),
(0.95959597826004028, 0.96470588445663452, 0.96470588445663452),
(0.96464645862579346, 0.9686274528503418, 0.9686274528503418),
(0.96969699859619141, 0.97254902124404907, 0.97254902124404907),
(0.97474747896194458, 0.97647058963775635, 0.97647058963775635),
(0.97979795932769775, 0.98039215803146362, 0.98039215803146362),
(0.9848484992980957, 0.9843137264251709, 0.9843137264251709),
(0.98989897966384888, 0.98823529481887817, 0.98823529481887817),
(0.99494951963424683, 0.99215686321258545, 0.99215686321258545), (1.0,
0.99607843160629272, 0.99607843160629272)]}
_gist_rainbow_data = {'blue':
[(0.0, 0.16470588743686676, 0.16470588743686676), (0.0042016808874905109,
0.14117647707462311, 0.14117647707462311), (0.0084033617749810219,
0.12156862765550613, 0.12156862765550613), (0.012605042196810246,
0.10196078568696976, 0.10196078568696976), (0.016806723549962044,
0.078431375324726105, 0.078431375324726105), (0.021008403971791267,
0.058823529630899429, 0.058823529630899429), (0.025210084393620491,
0.039215687662363052, 0.039215687662363052), (0.029411764815449715,
0.015686275437474251, 0.015686275437474251), (0.033613447099924088, 0.0,
0.0), (0.037815127521753311, 0.0, 0.0), (0.042016807943582535, 0.0, 0.0),
(0.046218488365411758, 0.0, 0.0), (0.050420168787240982, 0.0, 0.0),
(0.054621849209070206, 0.0, 0.0), (0.058823529630899429, 0.0, 0.0),
(0.063025213778018951, 0.0, 0.0), (0.067226894199848175, 0.0, 0.0),
(0.071428574621677399, 0.0, 0.0), (0.075630255043506622, 0.0, 0.0),
(0.079831935465335846, 0.0, 0.0), (0.08403361588716507, 0.0, 0.0),
(0.088235296308994293, 0.0, 0.0), (0.092436976730823517, 0.0, 0.0),
(0.09663865715265274, 0.0, 0.0), (0.10084033757448196, 0.0, 0.0),
(0.10504201799631119, 0.0, 0.0), (0.10924369841814041, 0.0, 0.0),
(0.11344537883996964, 0.0, 0.0), (0.11764705926179886, 0.0, 0.0),
(0.12184873968362808, 0.0, 0.0), (0.1260504275560379, 0.0, 0.0),
(0.13025210797786713, 0.0, 0.0), (0.13445378839969635, 0.0, 0.0),
(0.13865546882152557, 0.0, 0.0), (0.1428571492433548, 0.0, 0.0),
(0.14705882966518402, 0.0, 0.0), (0.15126051008701324, 0.0, 0.0),
(0.15546219050884247, 0.0, 0.0), (0.15966387093067169, 0.0, 0.0),
(0.16386555135250092, 0.0, 0.0), (0.16806723177433014, 0.0, 0.0),
(0.17226891219615936, 0.0, 0.0), (0.17647059261798859, 0.0, 0.0),
(0.18067227303981781, 0.0, 0.0), (0.18487395346164703, 0.0, 0.0),
(0.18907563388347626, 0.0, 0.0), (0.19327731430530548, 0.0, 0.0),
(0.1974789947271347, 0.0, 0.0), (0.20168067514896393, 0.0, 0.0),
(0.20588235557079315, 0.0, 0.0), (0.21008403599262238, 0.0, 0.0),
(0.2142857164144516, 0.0, 0.0), (0.21848739683628082, 0.0, 0.0),
(0.22268907725811005, 0.0, 0.0), (0.22689075767993927, 0.0, 0.0),
(0.23109243810176849, 0.0, 0.0), (0.23529411852359772, 0.0, 0.0),
(0.23949579894542694, 0.0, 0.0), (0.24369747936725616, 0.0, 0.0),
(0.24789915978908539, 0.0, 0.0), (0.25210085511207581, 0.0, 0.0),
(0.25630253553390503, 0.0, 0.0), (0.26050421595573425, 0.0, 0.0),
(0.26470589637756348, 0.0, 0.0), (0.2689075767993927, 0.0, 0.0),
(0.27310925722122192, 0.0, 0.0), (0.27731093764305115, 0.0, 0.0),
(0.28151261806488037, 0.0, 0.0), (0.28571429848670959, 0.0, 0.0),
(0.28991597890853882, 0.0, 0.0), (0.29411765933036804, 0.0, 0.0),
(0.29831933975219727, 0.0, 0.0), (0.30252102017402649, 0.0, 0.0),
(0.30672270059585571, 0.0, 0.0), (0.31092438101768494, 0.0, 0.0),
(0.31512606143951416, 0.0, 0.0), (0.31932774186134338, 0.0, 0.0),
(0.32352942228317261, 0.0, 0.0), (0.32773110270500183, 0.0, 0.0),
(0.33193278312683105, 0.0, 0.0), (0.33613446354866028, 0.0, 0.0),
(0.3403361439704895, 0.0, 0.0), (0.34453782439231873, 0.0, 0.0),
(0.34873950481414795, 0.0, 0.0), (0.35294118523597717, 0.0, 0.0),
(0.3571428656578064, 0.0, 0.0), (0.36134454607963562, 0.0, 0.0),
(0.36554622650146484, 0.0, 0.0), (0.36974790692329407, 0.0, 0.0),
(0.37394958734512329, 0.0, 0.0), (0.37815126776695251, 0.0, 0.0),
(0.38235294818878174, 0.0, 0.0), (0.38655462861061096, 0.0, 0.0),
(0.39075630903244019, 0.0, 0.0), (0.39495798945426941, 0.0, 0.0),
(0.39915966987609863, 0.0, 0.0), (0.40336135029792786, 0.0, 0.0),
(0.40756303071975708, 0.0039215688593685627, 0.0039215688593685627),
(0.4117647111415863, 0.047058824449777603, 0.047058824449777603),
(0.41596639156341553, 0.066666670143604279, 0.066666670143604279),
(0.42016807198524475, 0.090196080505847931, 0.090196080505847931),
(0.42436975240707397, 0.10980392247438431, 0.10980392247438431),
(0.4285714328289032, 0.12941177189350128, 0.12941177189350128),
(0.43277311325073242, 0.15294118225574493, 0.15294118225574493),
(0.43697479367256165, 0.17254902422428131, 0.17254902422428131),
(0.44117647409439087, 0.19215686619281769, 0.19215686619281769),
(0.44537815451622009, 0.21568627655506134, 0.21568627655506134),
(0.44957983493804932, 0.23529411852359772, 0.23529411852359772),
(0.45378151535987854, 0.25882354378700256, 0.25882354378700256),
(0.45798319578170776, 0.27843138575553894, 0.27843138575553894),
(0.46218487620353699, 0.29803922772407532, 0.29803922772407532),
(0.46638655662536621, 0.32156863808631897, 0.32156863808631897),
(0.47058823704719543, 0.34117648005485535, 0.34117648005485535),
(0.47478991746902466, 0.38431373238563538, 0.38431373238563538),
(0.47899159789085388, 0.40392157435417175, 0.40392157435417175),
(0.48319327831268311, 0.42745098471641541, 0.42745098471641541),
(0.48739495873451233, 0.44705882668495178, 0.44705882668495178),
(0.49159663915634155, 0.46666666865348816, 0.46666666865348816),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.50980395078659058, 0.50980395078659058), (0.50420171022415161,
0.52941179275512695, 0.52941179275512695), (0.50840336084365845,
0.55294120311737061, 0.55294120311737061), (0.51260507106781006,
0.57254904508590698, 0.57254904508590698), (0.51680672168731689,
0.59607845544815063, 0.59607845544815063), (0.52100843191146851,
0.61568629741668701, 0.61568629741668701), (0.52521008253097534,
0.63529413938522339, 0.63529413938522339), (0.52941179275512695,
0.65882354974746704, 0.65882354974746704), (0.53361344337463379,
0.67843139171600342, 0.67843139171600342), (0.5378151535987854,
0.72156864404678345, 0.72156864404678345), (0.54201680421829224,
0.74117648601531982, 0.74117648601531982), (0.54621851444244385,
0.76470589637756348, 0.76470589637756348), (0.55042016506195068,
0.78431373834609985, 0.78431373834609985), (0.55462187528610229,
0.80392158031463623, 0.80392158031463623), (0.55882352590560913,
0.82745099067687988, 0.82745099067687988), (0.56302523612976074,
0.84705883264541626, 0.84705883264541626), (0.56722688674926758,
0.87058824300765991, 0.87058824300765991), (0.57142859697341919,
0.89019608497619629, 0.89019608497619629), (0.57563024759292603,
0.90980392694473267, 0.90980392694473267), (0.57983195781707764,
0.93333333730697632, 0.93333333730697632), (0.58403360843658447,
0.9529411792755127, 0.9529411792755127), (0.58823531866073608,
0.97254902124404907, 0.97254902124404907), (0.59243696928024292,
0.99607843160629272, 0.99607843160629272), (0.59663867950439453, 1.0,
1.0), (0.60084033012390137, 1.0, 1.0), (0.60504204034805298, 1.0, 1.0),
(0.60924369096755981, 1.0, 1.0), (0.61344540119171143, 1.0, 1.0),
(0.61764705181121826, 1.0, 1.0), (0.62184876203536987, 1.0, 1.0),
(0.62605041265487671, 1.0, 1.0), (0.63025212287902832, 1.0, 1.0),
(0.63445377349853516, 1.0, 1.0), (0.63865548372268677, 1.0, 1.0),
(0.6428571343421936, 1.0, 1.0), (0.64705884456634521, 1.0, 1.0),
(0.65126049518585205, 1.0, 1.0), (0.65546220541000366, 1.0, 1.0),
(0.6596638560295105, 1.0, 1.0), (0.66386556625366211, 1.0, 1.0),
(0.66806721687316895, 1.0, 1.0), (0.67226892709732056, 1.0, 1.0),
(0.67647057771682739, 1.0, 1.0), (0.680672287940979, 1.0, 1.0),
(0.68487393856048584, 1.0, 1.0), (0.68907564878463745, 1.0, 1.0),
(0.69327729940414429, 1.0, 1.0), (0.6974790096282959, 1.0, 1.0),
(0.70168066024780273, 1.0, 1.0), (0.70588237047195435, 1.0, 1.0),
(0.71008402109146118, 1.0, 1.0), (0.71428573131561279, 1.0, 1.0),
(0.71848738193511963, 1.0, 1.0), (0.72268909215927124, 1.0, 1.0),
(0.72689074277877808, 1.0, 1.0), (0.73109245300292969, 1.0, 1.0),
(0.73529410362243652, 1.0, 1.0), (0.73949581384658813, 1.0, 1.0),
(0.74369746446609497, 1.0, 1.0), (0.74789917469024658, 1.0, 1.0),
(0.75210082530975342, 1.0, 1.0), (0.75630253553390503, 1.0, 1.0),
(0.76050418615341187, 1.0, 1.0), (0.76470589637756348, 1.0, 1.0),
(0.76890754699707031, 1.0, 1.0), (0.77310925722122192, 1.0, 1.0),
(0.77731090784072876, 1.0, 1.0), (0.78151261806488037, 1.0, 1.0),
(0.78571426868438721, 1.0, 1.0), (0.78991597890853882, 1.0, 1.0),
(0.79411762952804565, 1.0, 1.0), (0.79831933975219727, 1.0, 1.0),
(0.8025209903717041, 1.0, 1.0), (0.80672270059585571, 1.0, 1.0),
(0.81092435121536255, 1.0, 1.0), (0.81512606143951416, 1.0, 1.0),
(0.819327712059021, 1.0, 1.0), (0.82352942228317261, 1.0, 1.0),
(0.82773107290267944, 1.0, 1.0), (0.83193278312683105, 1.0, 1.0),
(0.83613443374633789, 1.0, 1.0), (0.8403361439704895, 1.0, 1.0),
(0.84453779458999634, 1.0, 1.0), (0.84873950481414795, 1.0, 1.0),
(0.85294115543365479, 1.0, 1.0), (0.8571428656578064, 1.0, 1.0),
(0.86134451627731323, 1.0, 1.0), (0.86554622650146484, 1.0, 1.0),
(0.86974787712097168, 1.0, 1.0), (0.87394958734512329, 1.0, 1.0),
(0.87815123796463013, 1.0, 1.0), (0.88235294818878174, 1.0, 1.0),
(0.88655459880828857, 1.0, 1.0), (0.89075630903244019, 1.0, 1.0),
(0.89495795965194702, 1.0, 1.0), (0.89915966987609863, 1.0, 1.0),
(0.90336132049560547, 1.0, 1.0), (0.90756303071975708, 1.0, 1.0),
(0.91176468133926392, 1.0, 1.0), (0.91596639156341553, 1.0, 1.0),
(0.92016804218292236, 1.0, 1.0), (0.92436975240707397, 1.0, 1.0),
(0.92857140302658081, 1.0, 1.0), (0.93277311325073242, 1.0, 1.0),
(0.93697476387023926, 1.0, 1.0), (0.94117647409439087, 1.0, 1.0),
(0.94537812471389771, 1.0, 1.0), (0.94957983493804932, 1.0, 1.0),
(0.95378148555755615, 1.0, 1.0), (0.95798319578170776, 1.0, 1.0),
(0.9621848464012146, 1.0, 1.0), (0.96638655662536621, 0.99607843160629272,
0.99607843160629272), (0.97058820724487305, 0.97647058963775635,
0.97647058963775635), (0.97478991746902466, 0.9529411792755127,
0.9529411792755127), (0.97899156808853149, 0.91372549533843994,
0.91372549533843994), (0.98319327831268311, 0.89019608497619629,
0.89019608497619629), (0.98739492893218994, 0.87058824300765991,
0.87058824300765991), (0.99159663915634155, 0.85098040103912354,
0.85098040103912354), (0.99579828977584839, 0.82745099067687988,
0.82745099067687988), (1.0, 0.80784314870834351, 0.80784314870834351)],
'green': [(0.0, 0.0, 0.0), (0.0042016808874905109, 0.0, 0.0),
(0.0084033617749810219, 0.0, 0.0), (0.012605042196810246, 0.0, 0.0),
(0.016806723549962044, 0.0, 0.0), (0.021008403971791267, 0.0, 0.0),
(0.025210084393620491, 0.0, 0.0), (0.029411764815449715, 0.0, 0.0),
(0.033613447099924088, 0.019607843831181526, 0.019607843831181526),
(0.037815127521753311, 0.043137256056070328, 0.043137256056070328),
(0.042016807943582535, 0.062745101749897003, 0.062745101749897003),
(0.046218488365411758, 0.086274512112140656, 0.086274512112140656),
(0.050420168787240982, 0.10588235408067703, 0.10588235408067703),
(0.054621849209070206, 0.12549020349979401, 0.12549020349979401),
(0.058823529630899429, 0.14901961386203766, 0.14901961386203766),
(0.063025213778018951, 0.16862745583057404, 0.16862745583057404),
(0.067226894199848175, 0.18823529779911041, 0.18823529779911041),
(0.071428574621677399, 0.21176470816135406, 0.21176470816135406),
(0.075630255043506622, 0.23137255012989044, 0.23137255012989044),
(0.079831935465335846, 0.25490197539329529, 0.25490197539329529),
(0.08403361588716507, 0.27450981736183167, 0.27450981736183167),
(0.088235296308994293, 0.29411765933036804, 0.29411765933036804),
(0.092436976730823517, 0.31764706969261169, 0.31764706969261169),
(0.09663865715265274, 0.35686275362968445, 0.35686275362968445),
(0.10084033757448196, 0.3803921639919281, 0.3803921639919281),
(0.10504201799631119, 0.40000000596046448, 0.40000000596046448),
(0.10924369841814041, 0.42352941632270813, 0.42352941632270813),
(0.11344537883996964, 0.44313725829124451, 0.44313725829124451),
(0.11764705926179886, 0.46274510025978088, 0.46274510025978088),
(0.12184873968362808, 0.48627451062202454, 0.48627451062202454),
(0.1260504275560379, 0.5058823823928833, 0.5058823823928833),
(0.13025210797786713, 0.52941179275512695, 0.52941179275512695),
(0.13445378839969635, 0.54901963472366333, 0.54901963472366333),
(0.13865546882152557, 0.56862747669219971, 0.56862747669219971),
(0.1428571492433548, 0.59215688705444336, 0.59215688705444336),
(0.14705882966518402, 0.61176472902297974, 0.61176472902297974),
(0.15126051008701324, 0.63137257099151611, 0.63137257099151611),
(0.15546219050884247, 0.65490198135375977, 0.65490198135375977),
(0.15966387093067169, 0.69803923368453979, 0.69803923368453979),
(0.16386555135250092, 0.71764707565307617, 0.71764707565307617),
(0.16806723177433014, 0.73725491762161255, 0.73725491762161255),
(0.17226891219615936, 0.7607843279838562, 0.7607843279838562),
(0.17647059261798859, 0.78039216995239258, 0.78039216995239258),
(0.18067227303981781, 0.80000001192092896, 0.80000001192092896),
(0.18487395346164703, 0.82352942228317261, 0.82352942228317261),
(0.18907563388347626, 0.84313726425170898, 0.84313726425170898),
(0.19327731430530548, 0.86666667461395264, 0.86666667461395264),
(0.1974789947271347, 0.88627451658248901, 0.88627451658248901),
(0.20168067514896393, 0.90588235855102539, 0.90588235855102539),
(0.20588235557079315, 0.92941176891326904, 0.92941176891326904),
(0.21008403599262238, 0.94901961088180542, 0.94901961088180542),
(0.2142857164144516, 0.9686274528503418, 0.9686274528503418),
(0.21848739683628082, 0.99215686321258545, 0.99215686321258545),
(0.22268907725811005, 1.0, 1.0), (0.22689075767993927, 1.0, 1.0),
(0.23109243810176849, 1.0, 1.0), (0.23529411852359772, 1.0, 1.0),
(0.23949579894542694, 1.0, 1.0), (0.24369747936725616, 1.0, 1.0),
(0.24789915978908539, 1.0, 1.0), (0.25210085511207581, 1.0, 1.0),
(0.25630253553390503, 1.0, 1.0), (0.26050421595573425, 1.0, 1.0),
(0.26470589637756348, 1.0, 1.0), (0.2689075767993927, 1.0, 1.0),
(0.27310925722122192, 1.0, 1.0), (0.27731093764305115, 1.0, 1.0),
(0.28151261806488037, 1.0, 1.0), (0.28571429848670959, 1.0, 1.0),
(0.28991597890853882, 1.0, 1.0), (0.29411765933036804, 1.0, 1.0),
(0.29831933975219727, 1.0, 1.0), (0.30252102017402649, 1.0, 1.0),
(0.30672270059585571, 1.0, 1.0), (0.31092438101768494, 1.0, 1.0),
(0.31512606143951416, 1.0, 1.0), (0.31932774186134338, 1.0, 1.0),
(0.32352942228317261, 1.0, 1.0), (0.32773110270500183, 1.0, 1.0),
(0.33193278312683105, 1.0, 1.0), (0.33613446354866028, 1.0, 1.0),
(0.3403361439704895, 1.0, 1.0), (0.34453782439231873, 1.0, 1.0),
(0.34873950481414795, 1.0, 1.0), (0.35294118523597717, 1.0, 1.0),
(0.3571428656578064, 1.0, 1.0), (0.36134454607963562, 1.0, 1.0),
(0.36554622650146484, 1.0, 1.0), (0.36974790692329407, 1.0, 1.0),
(0.37394958734512329, 1.0, 1.0), (0.37815126776695251, 1.0, 1.0),
(0.38235294818878174, 1.0, 1.0), (0.38655462861061096, 1.0, 1.0),
(0.39075630903244019, 1.0, 1.0), (0.39495798945426941, 1.0, 1.0),
(0.39915966987609863, 1.0, 1.0), (0.40336135029792786, 1.0, 1.0),
(0.40756303071975708, 1.0, 1.0), (0.4117647111415863, 1.0, 1.0),
(0.41596639156341553, 1.0, 1.0), (0.42016807198524475, 1.0, 1.0),
(0.42436975240707397, 1.0, 1.0), (0.4285714328289032, 1.0, 1.0),
(0.43277311325073242, 1.0, 1.0), (0.43697479367256165, 1.0, 1.0),
(0.44117647409439087, 1.0, 1.0), (0.44537815451622009, 1.0, 1.0),
(0.44957983493804932, 1.0, 1.0), (0.45378151535987854, 1.0, 1.0),
(0.45798319578170776, 1.0, 1.0), (0.46218487620353699, 1.0, 1.0),
(0.46638655662536621, 1.0, 1.0), (0.47058823704719543, 1.0, 1.0),
(0.47478991746902466, 1.0, 1.0), (0.47899159789085388, 1.0, 1.0),
(0.48319327831268311, 1.0, 1.0), (0.48739495873451233, 1.0, 1.0),
(0.49159663915634155, 1.0, 1.0), (0.49579831957817078, 1.0, 1.0), (0.5,
1.0, 1.0), (0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 1.0,
1.0), (0.51260507106781006, 1.0, 1.0), (0.51680672168731689, 1.0, 1.0),
(0.52100843191146851, 1.0, 1.0), (0.52521008253097534, 1.0, 1.0),
(0.52941179275512695, 1.0, 1.0), (0.53361344337463379, 1.0, 1.0),
(0.5378151535987854, 1.0, 1.0), (0.54201680421829224, 1.0, 1.0),
(0.54621851444244385, 1.0, 1.0), (0.55042016506195068, 1.0, 1.0),
(0.55462187528610229, 1.0, 1.0), (0.55882352590560913, 1.0, 1.0),
(0.56302523612976074, 1.0, 1.0), (0.56722688674926758, 1.0, 1.0),
(0.57142859697341919, 1.0, 1.0), (0.57563024759292603, 1.0, 1.0),
(0.57983195781707764, 1.0, 1.0), (0.58403360843658447, 1.0, 1.0),
(0.58823531866073608, 1.0, 1.0), (0.59243696928024292, 1.0, 1.0),
(0.59663867950439453, 0.98039215803146362, 0.98039215803146362),
(0.60084033012390137, 0.93725490570068359, 0.93725490570068359),
(0.60504204034805298, 0.91764706373214722, 0.91764706373214722),
(0.60924369096755981, 0.89411765336990356, 0.89411765336990356),
(0.61344540119171143, 0.87450981140136719, 0.87450981140136719),
(0.61764705181121826, 0.85490196943283081, 0.85490196943283081),
(0.62184876203536987, 0.83137255907058716, 0.83137255907058716),
(0.62605041265487671, 0.81176471710205078, 0.81176471710205078),
(0.63025212287902832, 0.78823530673980713, 0.78823530673980713),
(0.63445377349853516, 0.76862746477127075, 0.76862746477127075),
(0.63865548372268677, 0.74901962280273438, 0.74901962280273438),
(0.6428571343421936, 0.72549021244049072, 0.72549021244049072),
(0.64705884456634521, 0.70588237047195435, 0.70588237047195435),
(0.65126049518585205, 0.68235296010971069, 0.68235296010971069),
(0.65546220541000366, 0.66274511814117432, 0.66274511814117432),
(0.6596638560295105, 0.64313727617263794, 0.64313727617263794),
(0.66386556625366211, 0.60000002384185791, 0.60000002384185791),
(0.66806721687316895, 0.58039218187332153, 0.58039218187332153),
(0.67226892709732056, 0.55686277151107788, 0.55686277151107788),
(0.67647057771682739, 0.5372549295425415, 0.5372549295425415),
(0.680672287940979, 0.51372551918029785, 0.51372551918029785),
(0.68487393856048584, 0.49411764740943909, 0.49411764740943909),
(0.68907564878463745, 0.47450980544090271, 0.47450980544090271),
(0.69327729940414429, 0.45098039507865906, 0.45098039507865906),
(0.6974790096282959, 0.43137255311012268, 0.43137255311012268),
(0.70168066024780273, 0.4117647111415863, 0.4117647111415863),
(0.70588237047195435, 0.38823530077934265, 0.38823530077934265),
(0.71008402109146118, 0.36862745881080627, 0.36862745881080627),
(0.71428573131561279, 0.34509804844856262, 0.34509804844856262),
(0.71848738193511963, 0.32549020648002625, 0.32549020648002625),
(0.72268909215927124, 0.30588236451148987, 0.30588236451148987),
(0.72689074277877808, 0.26274511218070984, 0.26274511218070984),
(0.73109245300292969, 0.24313725531101227, 0.24313725531101227),
(0.73529410362243652, 0.21960784494876862, 0.21960784494876862),
(0.73949581384658813, 0.20000000298023224, 0.20000000298023224),
(0.74369746446609497, 0.17647059261798859, 0.17647059261798859),
(0.74789917469024658, 0.15686275064945221, 0.15686275064945221),
(0.75210082530975342, 0.13725490868091583, 0.13725490868091583),
(0.75630253553390503, 0.11372549086809158, 0.11372549086809158),
(0.76050418615341187, 0.094117648899555206, 0.094117648899555206),
(0.76470589637756348, 0.070588238537311554, 0.070588238537311554),
(0.76890754699707031, 0.050980392843484879, 0.050980392843484879),
(0.77310925722122192, 0.031372550874948502, 0.031372550874948502),
(0.77731090784072876, 0.0078431377187371254, 0.0078431377187371254),
(0.78151261806488037, 0.0, 0.0), (0.78571426868438721, 0.0, 0.0),
(0.78991597890853882, 0.0, 0.0), (0.79411762952804565, 0.0, 0.0),
(0.79831933975219727, 0.0, 0.0), (0.8025209903717041, 0.0, 0.0),
(0.80672270059585571, 0.0, 0.0), (0.81092435121536255, 0.0, 0.0),
(0.81512606143951416, 0.0, 0.0), (0.819327712059021, 0.0, 0.0),
(0.82352942228317261, 0.0, 0.0), (0.82773107290267944, 0.0, 0.0),
(0.83193278312683105, 0.0, 0.0), (0.83613443374633789, 0.0, 0.0),
(0.8403361439704895, 0.0, 0.0), (0.84453779458999634, 0.0, 0.0),
(0.84873950481414795, 0.0, 0.0), (0.85294115543365479, 0.0, 0.0),
(0.8571428656578064, 0.0, 0.0), (0.86134451627731323, 0.0, 0.0),
(0.86554622650146484, 0.0, 0.0), (0.86974787712097168, 0.0, 0.0),
(0.87394958734512329, 0.0, 0.0), (0.87815123796463013, 0.0, 0.0),
(0.88235294818878174, 0.0, 0.0), (0.88655459880828857, 0.0, 0.0),
(0.89075630903244019, 0.0, 0.0), (0.89495795965194702, 0.0, 0.0),
(0.89915966987609863, 0.0, 0.0), (0.90336132049560547, 0.0, 0.0),
(0.90756303071975708, 0.0, 0.0), (0.91176468133926392, 0.0, 0.0),
(0.91596639156341553, 0.0, 0.0), (0.92016804218292236, 0.0, 0.0),
(0.92436975240707397, 0.0, 0.0), (0.92857140302658081, 0.0, 0.0),
(0.93277311325073242, 0.0, 0.0), (0.93697476387023926, 0.0, 0.0),
(0.94117647409439087, 0.0, 0.0), (0.94537812471389771, 0.0, 0.0),
(0.94957983493804932, 0.0, 0.0), (0.95378148555755615, 0.0, 0.0),
(0.95798319578170776, 0.0, 0.0), (0.9621848464012146, 0.0, 0.0),
(0.96638655662536621, 0.0, 0.0), (0.97058820724487305, 0.0, 0.0),
(0.97478991746902466, 0.0, 0.0), (0.97899156808853149, 0.0, 0.0),
(0.98319327831268311, 0.0, 0.0), (0.98739492893218994, 0.0, 0.0),
(0.99159663915634155, 0.0, 0.0), (0.99579828977584839, 0.0, 0.0), (1.0,
0.0, 0.0)], 'red': [(0.0, 1.0, 1.0), (0.0042016808874905109, 1.0, 1.0),
(0.0084033617749810219, 1.0, 1.0), (0.012605042196810246, 1.0, 1.0),
(0.016806723549962044, 1.0, 1.0), (0.021008403971791267, 1.0, 1.0),
(0.025210084393620491, 1.0, 1.0), (0.029411764815449715, 1.0, 1.0),
(0.033613447099924088, 1.0, 1.0), (0.037815127521753311, 1.0, 1.0),
(0.042016807943582535, 1.0, 1.0), (0.046218488365411758, 1.0, 1.0),
(0.050420168787240982, 1.0, 1.0), (0.054621849209070206, 1.0, 1.0),
(0.058823529630899429, 1.0, 1.0), (0.063025213778018951, 1.0, 1.0),
(0.067226894199848175, 1.0, 1.0), (0.071428574621677399, 1.0, 1.0),
(0.075630255043506622, 1.0, 1.0), (0.079831935465335846, 1.0, 1.0),
(0.08403361588716507, 1.0, 1.0), (0.088235296308994293, 1.0, 1.0),
(0.092436976730823517, 1.0, 1.0), (0.09663865715265274, 1.0, 1.0),
(0.10084033757448196, 1.0, 1.0), (0.10504201799631119, 1.0, 1.0),
(0.10924369841814041, 1.0, 1.0), (0.11344537883996964, 1.0, 1.0),
(0.11764705926179886, 1.0, 1.0), (0.12184873968362808, 1.0, 1.0),
(0.1260504275560379, 1.0, 1.0), (0.13025210797786713, 1.0, 1.0),
(0.13445378839969635, 1.0, 1.0), (0.13865546882152557, 1.0, 1.0),
(0.1428571492433548, 1.0, 1.0), (0.14705882966518402, 1.0, 1.0),
(0.15126051008701324, 1.0, 1.0), (0.15546219050884247, 1.0, 1.0),
(0.15966387093067169, 1.0, 1.0), (0.16386555135250092, 1.0, 1.0),
(0.16806723177433014, 1.0, 1.0), (0.17226891219615936, 1.0, 1.0),
(0.17647059261798859, 1.0, 1.0), (0.18067227303981781, 1.0, 1.0),
(0.18487395346164703, 1.0, 1.0), (0.18907563388347626, 1.0, 1.0),
(0.19327731430530548, 1.0, 1.0), (0.1974789947271347, 1.0, 1.0),
(0.20168067514896393, 1.0, 1.0), (0.20588235557079315, 1.0, 1.0),
(0.21008403599262238, 1.0, 1.0), (0.2142857164144516, 1.0, 1.0),
(0.21848739683628082, 1.0, 1.0), (0.22268907725811005,
0.96078431606292725, 0.96078431606292725), (0.22689075767993927,
0.94117647409439087, 0.94117647409439087), (0.23109243810176849,
0.92156863212585449, 0.92156863212585449), (0.23529411852359772,
0.89803922176361084, 0.89803922176361084), (0.23949579894542694,
0.87843137979507446, 0.87843137979507446), (0.24369747936725616,
0.85882353782653809, 0.85882353782653809), (0.24789915978908539,
0.83529412746429443, 0.83529412746429443), (0.25210085511207581,
0.81568628549575806, 0.81568628549575806), (0.25630253553390503,
0.7921568751335144, 0.7921568751335144), (0.26050421595573425,
0.77254903316497803, 0.77254903316497803), (0.26470589637756348,
0.75294119119644165, 0.75294119119644165), (0.2689075767993927,
0.729411780834198, 0.729411780834198), (0.27310925722122192,
0.70980393886566162, 0.70980393886566162), (0.27731093764305115,
0.68627452850341797, 0.68627452850341797), (0.28151261806488037,
0.66666668653488159, 0.66666668653488159), (0.28571429848670959,
0.62352943420410156, 0.62352943420410156), (0.28991597890853882,
0.60392159223556519, 0.60392159223556519), (0.29411765933036804,
0.58431375026702881, 0.58431375026702881), (0.29831933975219727,
0.56078433990478516, 0.56078433990478516), (0.30252102017402649,
0.54117649793624878, 0.54117649793624878), (0.30672270059585571,
0.51764708757400513, 0.51764708757400513), (0.31092438101768494,
0.49803921580314636, 0.49803921580314636), (0.31512606143951416,
0.47843137383460999, 0.47843137383460999), (0.31932774186134338,
0.45490196347236633, 0.45490196347236633), (0.32352942228317261,
0.43529412150382996, 0.43529412150382996), (0.32773110270500183,
0.41568627953529358, 0.41568627953529358), (0.33193278312683105,
0.39215686917304993, 0.39215686917304993), (0.33613446354866028,
0.37254902720451355, 0.37254902720451355), (0.3403361439704895,
0.3490196168422699, 0.3490196168422699), (0.34453782439231873,
0.32941177487373352, 0.32941177487373352), (0.34873950481414795,
0.28627452254295349, 0.28627452254295349), (0.35294118523597717,
0.26666668057441711, 0.26666668057441711), (0.3571428656578064,
0.24705882370471954, 0.24705882370471954), (0.36134454607963562,
0.22352941334247589, 0.22352941334247589), (0.36554622650146484,
0.20392157137393951, 0.20392157137393951), (0.36974790692329407,
0.18039216101169586, 0.18039216101169586), (0.37394958734512329,
0.16078431904315948, 0.16078431904315948), (0.37815126776695251,
0.14117647707462311, 0.14117647707462311), (0.38235294818878174,
0.11764705926179886, 0.11764705926179886), (0.38655462861061096,
0.098039217293262482, 0.098039217293262482), (0.39075630903244019,
0.074509806931018829, 0.074509806931018829), (0.39495798945426941,
0.054901961237192154, 0.054901961237192154), (0.39915966987609863,
0.035294119268655777, 0.035294119268655777), (0.40336135029792786,
0.011764706112444401, 0.011764706112444401), (0.40756303071975708, 0.0,
0.0), (0.4117647111415863, 0.0, 0.0), (0.41596639156341553, 0.0, 0.0),
(0.42016807198524475, 0.0, 0.0), (0.42436975240707397, 0.0, 0.0),
(0.4285714328289032, 0.0, 0.0), (0.43277311325073242, 0.0, 0.0),
(0.43697479367256165, 0.0, 0.0), (0.44117647409439087, 0.0, 0.0),
(0.44537815451622009, 0.0, 0.0), (0.44957983493804932, 0.0, 0.0),
(0.45378151535987854, 0.0, 0.0), (0.45798319578170776, 0.0, 0.0),
(0.46218487620353699, 0.0, 0.0), (0.46638655662536621, 0.0, 0.0),
(0.47058823704719543, 0.0, 0.0), (0.47478991746902466, 0.0, 0.0),
(0.47899159789085388, 0.0, 0.0), (0.48319327831268311, 0.0, 0.0),
(0.48739495873451233, 0.0, 0.0), (0.49159663915634155, 0.0, 0.0),
(0.49579831957817078, 0.0, 0.0), (0.5, 0.0, 0.0), (0.50420171022415161,
0.0, 0.0), (0.50840336084365845, 0.0, 0.0), (0.51260507106781006, 0.0,
0.0), (0.51680672168731689, 0.0, 0.0), (0.52100843191146851, 0.0, 0.0),
(0.52521008253097534, 0.0, 0.0), (0.52941179275512695, 0.0, 0.0),
(0.53361344337463379, 0.0, 0.0), (0.5378151535987854, 0.0, 0.0),
(0.54201680421829224, 0.0, 0.0), (0.54621851444244385, 0.0, 0.0),
(0.55042016506195068, 0.0, 0.0), (0.55462187528610229, 0.0, 0.0),
(0.55882352590560913, 0.0, 0.0), (0.56302523612976074, 0.0, 0.0),
(0.56722688674926758, 0.0, 0.0), (0.57142859697341919, 0.0, 0.0),
(0.57563024759292603, 0.0, 0.0), (0.57983195781707764, 0.0, 0.0),
(0.58403360843658447, 0.0, 0.0), (0.58823531866073608, 0.0, 0.0),
(0.59243696928024292, 0.0, 0.0), (0.59663867950439453, 0.0, 0.0),
(0.60084033012390137, 0.0, 0.0), (0.60504204034805298, 0.0, 0.0),
(0.60924369096755981, 0.0, 0.0), (0.61344540119171143, 0.0, 0.0),
(0.61764705181121826, 0.0, 0.0), (0.62184876203536987, 0.0, 0.0),
(0.62605041265487671, 0.0, 0.0), (0.63025212287902832, 0.0, 0.0),
(0.63445377349853516, 0.0, 0.0), (0.63865548372268677, 0.0, 0.0),
(0.6428571343421936, 0.0, 0.0), (0.64705884456634521, 0.0, 0.0),
(0.65126049518585205, 0.0, 0.0), (0.65546220541000366, 0.0, 0.0),
(0.6596638560295105, 0.0, 0.0), (0.66386556625366211, 0.0, 0.0),
(0.66806721687316895, 0.0, 0.0), (0.67226892709732056, 0.0, 0.0),
(0.67647057771682739, 0.0, 0.0), (0.680672287940979, 0.0, 0.0),
(0.68487393856048584, 0.0, 0.0), (0.68907564878463745, 0.0, 0.0),
(0.69327729940414429, 0.0, 0.0), (0.6974790096282959, 0.0, 0.0),
(0.70168066024780273, 0.0, 0.0), (0.70588237047195435, 0.0, 0.0),
(0.71008402109146118, 0.0, 0.0), (0.71428573131561279, 0.0, 0.0),
(0.71848738193511963, 0.0, 0.0), (0.72268909215927124, 0.0, 0.0),
(0.72689074277877808, 0.0, 0.0), (0.73109245300292969, 0.0, 0.0),
(0.73529410362243652, 0.0, 0.0), (0.73949581384658813, 0.0, 0.0),
(0.74369746446609497, 0.0, 0.0), (0.74789917469024658, 0.0, 0.0),
(0.75210082530975342, 0.0, 0.0), (0.75630253553390503, 0.0, 0.0),
(0.76050418615341187, 0.0, 0.0), (0.76470589637756348, 0.0, 0.0),
(0.76890754699707031, 0.0, 0.0), (0.77310925722122192, 0.0, 0.0),
(0.77731090784072876, 0.0, 0.0), (0.78151261806488037,
0.0078431377187371254, 0.0078431377187371254), (0.78571426868438721,
0.027450980618596077, 0.027450980618596077), (0.78991597890853882,
0.070588238537311554, 0.070588238537311554), (0.79411762952804565,
0.094117648899555206, 0.094117648899555206), (0.79831933975219727,
0.11372549086809158, 0.11372549086809158), (0.8025209903717041,
0.13333334028720856, 0.13333334028720856), (0.80672270059585571,
0.15686275064945221, 0.15686275064945221), (0.81092435121536255,
0.17647059261798859, 0.17647059261798859), (0.81512606143951416,
0.19607843458652496, 0.19607843458652496), (0.819327712059021,
0.21960784494876862, 0.21960784494876862), (0.82352942228317261,
0.23921568691730499, 0.23921568691730499), (0.82773107290267944,
0.26274511218070984, 0.26274511218070984), (0.83193278312683105,
0.28235295414924622, 0.28235295414924622), (0.83613443374633789,
0.30196079611778259, 0.30196079611778259), (0.8403361439704895,
0.32549020648002625, 0.32549020648002625), (0.84453779458999634,
0.34509804844856262, 0.34509804844856262), (0.84873950481414795,
0.364705890417099, 0.364705890417099), (0.85294115543365479,
0.40784314274787903, 0.40784314274787903), (0.8571428656578064,
0.43137255311012268, 0.43137255311012268), (0.86134451627731323,
0.45098039507865906, 0.45098039507865906), (0.86554622650146484,
0.47058823704719543, 0.47058823704719543), (0.86974787712097168,
0.49411764740943909, 0.49411764740943909), (0.87394958734512329,
0.51372551918029785, 0.51372551918029785), (0.87815123796463013,
0.53333336114883423, 0.53333336114883423), (0.88235294818878174,
0.55686277151107788, 0.55686277151107788), (0.88655459880828857,
0.57647061347961426, 0.57647061347961426), (0.89075630903244019,
0.60000002384185791, 0.60000002384185791), (0.89495795965194702,
0.61960786581039429, 0.61960786581039429), (0.89915966987609863,
0.63921570777893066, 0.63921570777893066), (0.90336132049560547,
0.66274511814117432, 0.66274511814117432), (0.90756303071975708,
0.68235296010971069, 0.68235296010971069), (0.91176468133926392,
0.70588237047195435, 0.70588237047195435), (0.91596639156341553,
0.7450980544090271, 0.7450980544090271), (0.92016804218292236,
0.76862746477127075, 0.76862746477127075), (0.92436975240707397,
0.78823530673980713, 0.78823530673980713), (0.92857140302658081,
0.80784314870834351, 0.80784314870834351), (0.93277311325073242,
0.83137255907058716, 0.83137255907058716), (0.93697476387023926,
0.85098040103912354, 0.85098040103912354), (0.94117647409439087,
0.87450981140136719, 0.87450981140136719), (0.94537812471389771,
0.89411765336990356, 0.89411765336990356), (0.94957983493804932,
0.91372549533843994, 0.91372549533843994), (0.95378148555755615,
0.93725490570068359, 0.93725490570068359), (0.95798319578170776,
0.95686274766921997, 0.95686274766921997), (0.9621848464012146,
0.97647058963775635, 0.97647058963775635), (0.96638655662536621, 1.0,
1.0), (0.97058820724487305, 1.0, 1.0), (0.97478991746902466, 1.0, 1.0),
(0.97899156808853149, 1.0, 1.0), (0.98319327831268311, 1.0, 1.0),
(0.98739492893218994, 1.0, 1.0), (0.99159663915634155, 1.0, 1.0),
(0.99579828977584839, 1.0, 1.0), (1.0, 1.0, 1.0)]}
_gist_stern_data = {'blue': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627,
0.0039215688593685627), (0.0084033617749810219, 0.011764706112444401,
0.011764706112444401), (0.012605042196810246, 0.019607843831181526,
0.019607843831181526), (0.016806723549962044, 0.027450980618596077,
0.027450980618596077), (0.021008403971791267, 0.035294119268655777,
0.035294119268655777), (0.025210084393620491, 0.043137256056070328,
0.043137256056070328), (0.029411764815449715, 0.050980392843484879,
0.050980392843484879), (0.033613447099924088, 0.058823529630899429,
0.058823529630899429), (0.037815127521753311, 0.066666670143604279,
0.066666670143604279), (0.042016807943582535, 0.08235294371843338,
0.08235294371843338), (0.046218488365411758, 0.090196080505847931,
0.090196080505847931), (0.050420168787240982, 0.098039217293262482,
0.098039217293262482), (0.054621849209070206, 0.10588235408067703,
0.10588235408067703), (0.058823529630899429, 0.11372549086809158,
0.11372549086809158), (0.063025213778018951, 0.12156862765550613,
0.12156862765550613), (0.067226894199848175, 0.12941177189350128,
0.12941177189350128), (0.071428574621677399, 0.13725490868091583,
0.13725490868091583), (0.075630255043506622, 0.14509804546833038,
0.14509804546833038), (0.079831935465335846, 0.15294118225574493,
0.15294118225574493), (0.08403361588716507, 0.16078431904315948,
0.16078431904315948), (0.088235296308994293, 0.16862745583057404,
0.16862745583057404), (0.092436976730823517, 0.17647059261798859,
0.17647059261798859), (0.09663865715265274, 0.18431372940540314,
0.18431372940540314), (0.10084033757448196, 0.19215686619281769,
0.19215686619281769), (0.10504201799631119, 0.20000000298023224,
0.20000000298023224), (0.10924369841814041, 0.20784313976764679,
0.20784313976764679), (0.11344537883996964, 0.21568627655506134,
0.21568627655506134), (0.11764705926179886, 0.22352941334247589,
0.22352941334247589), (0.12184873968362808, 0.23137255012989044,
0.23137255012989044), (0.1260504275560379, 0.24705882370471954,
0.24705882370471954), (0.13025210797786713, 0.25490197539329529,
0.25490197539329529), (0.13445378839969635, 0.26274511218070984,
0.26274511218070984), (0.13865546882152557, 0.27058824896812439,
0.27058824896812439), (0.1428571492433548, 0.27843138575553894,
0.27843138575553894), (0.14705882966518402, 0.28627452254295349,
0.28627452254295349), (0.15126051008701324, 0.29411765933036804,
0.29411765933036804), (0.15546219050884247, 0.30196079611778259,
0.30196079611778259), (0.15966387093067169, 0.30980393290519714,
0.30980393290519714), (0.16386555135250092, 0.31764706969261169,
0.31764706969261169), (0.16806723177433014, 0.32549020648002625,
0.32549020648002625), (0.17226891219615936, 0.3333333432674408,
0.3333333432674408), (0.17647059261798859, 0.34117648005485535,
0.34117648005485535), (0.18067227303981781, 0.3490196168422699,
0.3490196168422699), (0.18487395346164703, 0.35686275362968445,
0.35686275362968445), (0.18907563388347626, 0.364705890417099,
0.364705890417099), (0.19327731430530548, 0.37254902720451355,
0.37254902720451355), (0.1974789947271347, 0.3803921639919281,
0.3803921639919281), (0.20168067514896393, 0.38823530077934265,
0.38823530077934265), (0.20588235557079315, 0.3960784375667572,
0.3960784375667572), (0.21008403599262238, 0.4117647111415863,
0.4117647111415863), (0.2142857164144516, 0.41960784792900085,
0.41960784792900085), (0.21848739683628082, 0.42745098471641541,
0.42745098471641541), (0.22268907725811005, 0.43529412150382996,
0.43529412150382996), (0.22689075767993927, 0.44313725829124451,
0.44313725829124451), (0.23109243810176849, 0.45098039507865906,
0.45098039507865906), (0.23529411852359772, 0.45882353186607361,
0.45882353186607361), (0.23949579894542694, 0.46666666865348816,
0.46666666865348816), (0.24369747936725616, 0.47450980544090271,
0.47450980544090271), (0.24789915978908539, 0.48235294222831726,
0.48235294222831726), (0.25210085511207581, 0.49803921580314636,
0.49803921580314636), (0.25630253553390503, 0.5058823823928833,
0.5058823823928833), (0.26050421595573425, 0.51372551918029785,
0.51372551918029785), (0.26470589637756348, 0.5215686559677124,
0.5215686559677124), (0.2689075767993927, 0.52941179275512695,
0.52941179275512695), (0.27310925722122192, 0.5372549295425415,
0.5372549295425415), (0.27731093764305115, 0.54509806632995605,
0.54509806632995605), (0.28151261806488037, 0.55294120311737061,
0.55294120311737061), (0.28571429848670959, 0.56078433990478516,
0.56078433990478516), (0.28991597890853882, 0.56862747669219971,
0.56862747669219971), (0.29411765933036804, 0.58431375026702881,
0.58431375026702881), (0.29831933975219727, 0.59215688705444336,
0.59215688705444336), (0.30252102017402649, 0.60000002384185791,
0.60000002384185791), (0.30672270059585571, 0.60784316062927246,
0.60784316062927246), (0.31092438101768494, 0.61568629741668701,
0.61568629741668701), (0.31512606143951416, 0.62352943420410156,
0.62352943420410156), (0.31932774186134338, 0.63137257099151611,
0.63137257099151611), (0.32352942228317261, 0.63921570777893066,
0.63921570777893066), (0.32773110270500183, 0.64705884456634521,
0.64705884456634521), (0.33193278312683105, 0.65490198135375977,
0.65490198135375977), (0.33613446354866028, 0.66274511814117432,
0.66274511814117432), (0.3403361439704895, 0.67058825492858887,
0.67058825492858887), (0.34453782439231873, 0.67843139171600342,
0.67843139171600342), (0.34873950481414795, 0.68627452850341797,
0.68627452850341797), (0.35294118523597717, 0.69411766529083252,
0.69411766529083252), (0.3571428656578064, 0.70196080207824707,
0.70196080207824707), (0.36134454607963562, 0.70980393886566162,
0.70980393886566162), (0.36554622650146484, 0.71764707565307617,
0.71764707565307617), (0.36974790692329407, 0.72549021244049072,
0.72549021244049072), (0.37394958734512329, 0.73333334922790527,
0.73333334922790527), (0.37815126776695251, 0.74901962280273438,
0.74901962280273438), (0.38235294818878174, 0.75686275959014893,
0.75686275959014893), (0.38655462861061096, 0.76470589637756348,
0.76470589637756348), (0.39075630903244019, 0.77254903316497803,
0.77254903316497803), (0.39495798945426941, 0.78039216995239258,
0.78039216995239258), (0.39915966987609863, 0.78823530673980713,
0.78823530673980713), (0.40336135029792786, 0.79607844352722168,
0.79607844352722168), (0.40756303071975708, 0.80392158031463623,
0.80392158031463623), (0.4117647111415863, 0.81176471710205078,
0.81176471710205078), (0.41596639156341553, 0.81960785388946533,
0.81960785388946533), (0.42016807198524475, 0.82745099067687988,
0.82745099067687988), (0.42436975240707397, 0.83529412746429443,
0.83529412746429443), (0.4285714328289032, 0.84313726425170898,
0.84313726425170898), (0.43277311325073242, 0.85098040103912354,
0.85098040103912354), (0.43697479367256165, 0.85882353782653809,
0.85882353782653809), (0.44117647409439087, 0.86666667461395264,
0.86666667461395264), (0.44537815451622009, 0.87450981140136719,
0.87450981140136719), (0.44957983493804932, 0.88235294818878174,
0.88235294818878174), (0.45378151535987854, 0.89019608497619629,
0.89019608497619629), (0.45798319578170776, 0.89803922176361084,
0.89803922176361084), (0.46218487620353699, 0.91372549533843994,
0.91372549533843994), (0.46638655662536621, 0.92156863212585449,
0.92156863212585449), (0.47058823704719543, 0.92941176891326904,
0.92941176891326904), (0.47478991746902466, 0.93725490570068359,
0.93725490570068359), (0.47899159789085388, 0.94509804248809814,
0.94509804248809814), (0.48319327831268311, 0.9529411792755127,
0.9529411792755127), (0.48739495873451233, 0.96078431606292725,
0.96078431606292725), (0.49159663915634155, 0.9686274528503418,
0.9686274528503418), (0.49579831957817078, 0.97647058963775635,
0.97647058963775635), (0.5, 0.9843137264251709, 0.9843137264251709),
(0.50420171022415161, 1.0, 1.0), (0.50840336084365845, 0.9843137264251709,
0.9843137264251709), (0.51260507106781006, 0.9686274528503418,
0.9686274528503418), (0.51680672168731689, 0.9529411792755127,
0.9529411792755127), (0.52100843191146851, 0.93333333730697632,
0.93333333730697632), (0.52521008253097534, 0.91764706373214722,
0.91764706373214722), (0.52941179275512695, 0.90196079015731812,
0.90196079015731812), (0.53361344337463379, 0.88627451658248901,
0.88627451658248901), (0.5378151535987854, 0.86666667461395264,
0.86666667461395264), (0.54201680421829224, 0.85098040103912354,
0.85098040103912354), (0.54621851444244385, 0.81960785388946533,
0.81960785388946533), (0.55042016506195068, 0.80000001192092896,
0.80000001192092896), (0.55462187528610229, 0.78431373834609985,
0.78431373834609985), (0.55882352590560913, 0.76862746477127075,
0.76862746477127075), (0.56302523612976074, 0.75294119119644165,
0.75294119119644165), (0.56722688674926758, 0.73333334922790527,
0.73333334922790527), (0.57142859697341919, 0.71764707565307617,
0.71764707565307617), (0.57563024759292603, 0.70196080207824707,
0.70196080207824707), (0.57983195781707764, 0.68627452850341797,
0.68627452850341797), (0.58403360843658447, 0.66666668653488159,
0.66666668653488159), (0.58823531866073608, 0.65098041296005249,
0.65098041296005249), (0.59243696928024292, 0.63529413938522339,
0.63529413938522339), (0.59663867950439453, 0.61960786581039429,
0.61960786581039429), (0.60084033012390137, 0.60000002384185791,
0.60000002384185791), (0.60504204034805298, 0.58431375026702881,
0.58431375026702881), (0.60924369096755981, 0.56862747669219971,
0.56862747669219971), (0.61344540119171143, 0.55294120311737061,
0.55294120311737061), (0.61764705181121826, 0.53333336114883423,
0.53333336114883423), (0.62184876203536987, 0.51764708757400513,
0.51764708757400513), (0.62605041265487671, 0.50196081399917603,
0.50196081399917603), (0.63025212287902832, 0.46666666865348816,
0.46666666865348816), (0.63445377349853516, 0.45098039507865906,
0.45098039507865906), (0.63865548372268677, 0.43529412150382996,
0.43529412150382996), (0.6428571343421936, 0.41960784792900085,
0.41960784792900085), (0.64705884456634521, 0.40000000596046448,
0.40000000596046448), (0.65126049518585205, 0.38431373238563538,
0.38431373238563538), (0.65546220541000366, 0.36862745881080627,
0.36862745881080627), (0.6596638560295105, 0.35294118523597717,
0.35294118523597717), (0.66386556625366211, 0.3333333432674408,
0.3333333432674408), (0.66806721687316895, 0.31764706969261169,
0.31764706969261169), (0.67226892709732056, 0.30196079611778259,
0.30196079611778259), (0.67647057771682739, 0.28627452254295349,
0.28627452254295349), (0.680672287940979, 0.26666668057441711,
0.26666668057441711), (0.68487393856048584, 0.25098040699958801,
0.25098040699958801), (0.68907564878463745, 0.23529411852359772,
0.23529411852359772), (0.69327729940414429, 0.21960784494876862,
0.21960784494876862), (0.6974790096282959, 0.20000000298023224,
0.20000000298023224), (0.70168066024780273, 0.18431372940540314,
0.18431372940540314), (0.70588237047195435, 0.16862745583057404,
0.16862745583057404), (0.71008402109146118, 0.15294118225574493,
0.15294118225574493), (0.71428573131561279, 0.11764705926179886,
0.11764705926179886), (0.71848738193511963, 0.10196078568696976,
0.10196078568696976), (0.72268909215927124, 0.086274512112140656,
0.086274512112140656), (0.72689074277877808, 0.066666670143604279,
0.066666670143604279), (0.73109245300292969, 0.050980392843484879,
0.050980392843484879), (0.73529410362243652, 0.035294119268655777,
0.035294119268655777), (0.73949581384658813, 0.019607843831181526,
0.019607843831181526), (0.74369746446609497, 0.0, 0.0),
(0.74789917469024658, 0.011764706112444401, 0.011764706112444401),
(0.75210082530975342, 0.027450980618596077, 0.027450980618596077),
(0.75630253553390503, 0.058823529630899429, 0.058823529630899429),
(0.76050418615341187, 0.074509806931018829, 0.074509806931018829),
(0.76470589637756348, 0.086274512112140656, 0.086274512112140656),
(0.76890754699707031, 0.10196078568696976, 0.10196078568696976),
(0.77310925722122192, 0.11764705926179886, 0.11764705926179886),
(0.77731090784072876, 0.13333334028720856, 0.13333334028720856),
(0.78151261806488037, 0.14901961386203766, 0.14901961386203766),
(0.78571426868438721, 0.16078431904315948, 0.16078431904315948),
(0.78991597890853882, 0.17647059261798859, 0.17647059261798859),
(0.79411762952804565, 0.19215686619281769, 0.19215686619281769),
(0.79831933975219727, 0.22352941334247589, 0.22352941334247589),
(0.8025209903717041, 0.23529411852359772, 0.23529411852359772),
(0.80672270059585571, 0.25098040699958801, 0.25098040699958801),
(0.81092435121536255, 0.26666668057441711, 0.26666668057441711),
(0.81512606143951416, 0.28235295414924622, 0.28235295414924622),
(0.819327712059021, 0.29803922772407532, 0.29803922772407532),
(0.82352942228317261, 0.30980393290519714, 0.30980393290519714),
(0.82773107290267944, 0.32549020648002625, 0.32549020648002625),
(0.83193278312683105, 0.34117648005485535, 0.34117648005485535),
(0.83613443374633789, 0.35686275362968445, 0.35686275362968445),
(0.8403361439704895, 0.37254902720451355, 0.37254902720451355),
(0.84453779458999634, 0.38431373238563538, 0.38431373238563538),
(0.84873950481414795, 0.40000000596046448, 0.40000000596046448),
(0.85294115543365479, 0.41568627953529358, 0.41568627953529358),
(0.8571428656578064, 0.43137255311012268, 0.43137255311012268),
(0.86134451627731323, 0.44705882668495178, 0.44705882668495178),
(0.86554622650146484, 0.45882353186607361, 0.45882353186607361),
(0.86974787712097168, 0.47450980544090271, 0.47450980544090271),
(0.87394958734512329, 0.49019607901573181, 0.49019607901573181),
(0.87815123796463013, 0.5058823823928833, 0.5058823823928833),
(0.88235294818878174, 0.5372549295425415, 0.5372549295425415),
(0.88655459880828857, 0.54901963472366333, 0.54901963472366333),
(0.89075630903244019, 0.56470590829849243, 0.56470590829849243),
(0.89495795965194702, 0.58039218187332153, 0.58039218187332153),
(0.89915966987609863, 0.59607845544815063, 0.59607845544815063),
(0.90336132049560547, 0.61176472902297974, 0.61176472902297974),
(0.90756303071975708, 0.62352943420410156, 0.62352943420410156),
(0.91176468133926392, 0.63921570777893066, 0.63921570777893066),
(0.91596639156341553, 0.65490198135375977, 0.65490198135375977),
(0.92016804218292236, 0.67058825492858887, 0.67058825492858887),
(0.92436975240707397, 0.68627452850341797, 0.68627452850341797),
(0.92857140302658081, 0.69803923368453979, 0.69803923368453979),
(0.93277311325073242, 0.7137255072593689, 0.7137255072593689),
(0.93697476387023926, 0.729411780834198, 0.729411780834198),
(0.94117647409439087, 0.7450980544090271, 0.7450980544090271),
(0.94537812471389771, 0.7607843279838562, 0.7607843279838562),
(0.94957983493804932, 0.77254903316497803, 0.77254903316497803),
(0.95378148555755615, 0.78823530673980713, 0.78823530673980713),
(0.95798319578170776, 0.80392158031463623, 0.80392158031463623),
(0.9621848464012146, 0.81960785388946533, 0.81960785388946533),
(0.96638655662536621, 0.84705883264541626, 0.84705883264541626),
(0.97058820724487305, 0.86274510622024536, 0.86274510622024536),
(0.97478991746902466, 0.87843137979507446, 0.87843137979507446),
(0.97899156808853149, 0.89411765336990356, 0.89411765336990356),
(0.98319327831268311, 0.90980392694473267, 0.90980392694473267),
(0.98739492893218994, 0.92156863212585449, 0.92156863212585449),
(0.99159663915634155, 0.93725490570068359, 0.93725490570068359),
(0.99579828977584839, 0.9529411792755127, 0.9529411792755127), (1.0,
0.9686274528503418, 0.9686274528503418)], 'green': [(0.0, 0.0, 0.0),
(0.0042016808874905109, 0.0039215688593685627, 0.0039215688593685627),
(0.0084033617749810219, 0.0078431377187371254, 0.0078431377187371254),
(0.012605042196810246, 0.011764706112444401, 0.011764706112444401),
(0.016806723549962044, 0.015686275437474251, 0.015686275437474251),
(0.021008403971791267, 0.019607843831181526, 0.019607843831181526),
(0.025210084393620491, 0.023529412224888802, 0.023529412224888802),
(0.029411764815449715, 0.027450980618596077, 0.027450980618596077),
(0.033613447099924088, 0.031372550874948502, 0.031372550874948502),
(0.037815127521753311, 0.035294119268655777, 0.035294119268655777),
(0.042016807943582535, 0.043137256056070328, 0.043137256056070328),
(0.046218488365411758, 0.047058824449777603, 0.047058824449777603),
(0.050420168787240982, 0.050980392843484879, 0.050980392843484879),
(0.054621849209070206, 0.054901961237192154, 0.054901961237192154),
(0.058823529630899429, 0.058823529630899429, 0.058823529630899429),
(0.063025213778018951, 0.062745101749897003, 0.062745101749897003),
(0.067226894199848175, 0.066666670143604279, 0.066666670143604279),
(0.071428574621677399, 0.070588238537311554, 0.070588238537311554),
(0.075630255043506622, 0.074509806931018829, 0.074509806931018829),
(0.079831935465335846, 0.078431375324726105, 0.078431375324726105),
(0.08403361588716507, 0.08235294371843338, 0.08235294371843338),
(0.088235296308994293, 0.086274512112140656, 0.086274512112140656),
(0.092436976730823517, 0.090196080505847931, 0.090196080505847931),
(0.09663865715265274, 0.094117648899555206, 0.094117648899555206),
(0.10084033757448196, 0.098039217293262482, 0.098039217293262482),
(0.10504201799631119, 0.10196078568696976, 0.10196078568696976),
(0.10924369841814041, 0.10588235408067703, 0.10588235408067703),
(0.11344537883996964, 0.10980392247438431, 0.10980392247438431),
(0.11764705926179886, 0.11372549086809158, 0.11372549086809158),
(0.12184873968362808, 0.11764705926179886, 0.11764705926179886),
(0.1260504275560379, 0.12549020349979401, 0.12549020349979401),
(0.13025210797786713, 0.12941177189350128, 0.12941177189350128),
(0.13445378839969635, 0.13333334028720856, 0.13333334028720856),
(0.13865546882152557, 0.13725490868091583, 0.13725490868091583),
(0.1428571492433548, 0.14117647707462311, 0.14117647707462311),
(0.14705882966518402, 0.14509804546833038, 0.14509804546833038),
(0.15126051008701324, 0.14901961386203766, 0.14901961386203766),
(0.15546219050884247, 0.15294118225574493, 0.15294118225574493),
(0.15966387093067169, 0.15686275064945221, 0.15686275064945221),
(0.16386555135250092, 0.16078431904315948, 0.16078431904315948),
(0.16806723177433014, 0.16470588743686676, 0.16470588743686676),
(0.17226891219615936, 0.16862745583057404, 0.16862745583057404),
(0.17647059261798859, 0.17254902422428131, 0.17254902422428131),
(0.18067227303981781, 0.17647059261798859, 0.17647059261798859),
(0.18487395346164703, 0.18039216101169586, 0.18039216101169586),
(0.18907563388347626, 0.18431372940540314, 0.18431372940540314),
(0.19327731430530548, 0.18823529779911041, 0.18823529779911041),
(0.1974789947271347, 0.19215686619281769, 0.19215686619281769),
(0.20168067514896393, 0.19607843458652496, 0.19607843458652496),
(0.20588235557079315, 0.20000000298023224, 0.20000000298023224),
(0.21008403599262238, 0.20784313976764679, 0.20784313976764679),
(0.2142857164144516, 0.21176470816135406, 0.21176470816135406),
(0.21848739683628082, 0.21568627655506134, 0.21568627655506134),
(0.22268907725811005, 0.21960784494876862, 0.21960784494876862),
(0.22689075767993927, 0.22352941334247589, 0.22352941334247589),
(0.23109243810176849, 0.22745098173618317, 0.22745098173618317),
(0.23529411852359772, 0.23137255012989044, 0.23137255012989044),
(0.23949579894542694, 0.23529411852359772, 0.23529411852359772),
(0.24369747936725616, 0.23921568691730499, 0.23921568691730499),
(0.24789915978908539, 0.24313725531101227, 0.24313725531101227),
(0.25210085511207581, 0.25098040699958801, 0.25098040699958801),
(0.25630253553390503, 0.25490197539329529, 0.25490197539329529),
(0.26050421595573425, 0.25882354378700256, 0.25882354378700256),
(0.26470589637756348, 0.26274511218070984, 0.26274511218070984),
(0.2689075767993927, 0.26666668057441711, 0.26666668057441711),
(0.27310925722122192, 0.27058824896812439, 0.27058824896812439),
(0.27731093764305115, 0.27450981736183167, 0.27450981736183167),
(0.28151261806488037, 0.27843138575553894, 0.27843138575553894),
(0.28571429848670959, 0.28235295414924622, 0.28235295414924622),
(0.28991597890853882, 0.28627452254295349, 0.28627452254295349),
(0.29411765933036804, 0.29411765933036804, 0.29411765933036804),
(0.29831933975219727, 0.29803922772407532, 0.29803922772407532),
(0.30252102017402649, 0.30196079611778259, 0.30196079611778259),
(0.30672270059585571, 0.30588236451148987, 0.30588236451148987),
(0.31092438101768494, 0.30980393290519714, 0.30980393290519714),
(0.31512606143951416, 0.31372550129890442, 0.31372550129890442),
(0.31932774186134338, 0.31764706969261169, 0.31764706969261169),
(0.32352942228317261, 0.32156863808631897, 0.32156863808631897),
(0.32773110270500183, 0.32549020648002625, 0.32549020648002625),
(0.33193278312683105, 0.32941177487373352, 0.32941177487373352),
(0.33613446354866028, 0.3333333432674408, 0.3333333432674408),
(0.3403361439704895, 0.33725491166114807, 0.33725491166114807),
(0.34453782439231873, 0.34117648005485535, 0.34117648005485535),
(0.34873950481414795, 0.34509804844856262, 0.34509804844856262),
(0.35294118523597717, 0.3490196168422699, 0.3490196168422699),
(0.3571428656578064, 0.35294118523597717, 0.35294118523597717),
(0.36134454607963562, 0.35686275362968445, 0.35686275362968445),
(0.36554622650146484, 0.36078432202339172, 0.36078432202339172),
(0.36974790692329407, 0.364705890417099, 0.364705890417099),
(0.37394958734512329, 0.36862745881080627, 0.36862745881080627),
(0.37815126776695251, 0.37647059559822083, 0.37647059559822083),
(0.38235294818878174, 0.3803921639919281, 0.3803921639919281),
(0.38655462861061096, 0.38431373238563538, 0.38431373238563538),
(0.39075630903244019, 0.38823530077934265, 0.38823530077934265),
(0.39495798945426941, 0.39215686917304993, 0.39215686917304993),
(0.39915966987609863, 0.3960784375667572, 0.3960784375667572),
(0.40336135029792786, 0.40000000596046448, 0.40000000596046448),
(0.40756303071975708, 0.40392157435417175, 0.40392157435417175),
(0.4117647111415863, 0.40784314274787903, 0.40784314274787903),
(0.41596639156341553, 0.4117647111415863, 0.4117647111415863),
(0.42016807198524475, 0.41568627953529358, 0.41568627953529358),
(0.42436975240707397, 0.41960784792900085, 0.41960784792900085),
(0.4285714328289032, 0.42352941632270813, 0.42352941632270813),
(0.43277311325073242, 0.42745098471641541, 0.42745098471641541),
(0.43697479367256165, 0.43137255311012268, 0.43137255311012268),
(0.44117647409439087, 0.43529412150382996, 0.43529412150382996),
(0.44537815451622009, 0.43921568989753723, 0.43921568989753723),
(0.44957983493804932, 0.44313725829124451, 0.44313725829124451),
(0.45378151535987854, 0.44705882668495178, 0.44705882668495178),
(0.45798319578170776, 0.45098039507865906, 0.45098039507865906),
(0.46218487620353699, 0.45882353186607361, 0.45882353186607361),
(0.46638655662536621, 0.46274510025978088, 0.46274510025978088),
(0.47058823704719543, 0.46666666865348816, 0.46666666865348816),
(0.47478991746902466, 0.47058823704719543, 0.47058823704719543),
(0.47899159789085388, 0.47450980544090271, 0.47450980544090271),
(0.48319327831268311, 0.47843137383460999, 0.47843137383460999),
(0.48739495873451233, 0.48235294222831726, 0.48235294222831726),
(0.49159663915634155, 0.48627451062202454, 0.48627451062202454),
(0.49579831957817078, 0.49019607901573181, 0.49019607901573181), (0.5,
0.49411764740943909, 0.49411764740943909), (0.50420171022415161,
0.50196081399917603, 0.50196081399917603), (0.50840336084365845,
0.5058823823928833, 0.5058823823928833), (0.51260507106781006,
0.50980395078659058, 0.50980395078659058), (0.51680672168731689,
0.51372551918029785, 0.51372551918029785), (0.52100843191146851,
0.51764708757400513, 0.51764708757400513), (0.52521008253097534,
0.5215686559677124, 0.5215686559677124), (0.52941179275512695,
0.52549022436141968, 0.52549022436141968), (0.53361344337463379,
0.52941179275512695, 0.52941179275512695), (0.5378151535987854,
0.53333336114883423, 0.53333336114883423), (0.54201680421829224,
0.5372549295425415, 0.5372549295425415), (0.54621851444244385,
0.54509806632995605, 0.54509806632995605), (0.55042016506195068,
0.54901963472366333, 0.54901963472366333), (0.55462187528610229,
0.55294120311737061, 0.55294120311737061), (0.55882352590560913,
0.55686277151107788, 0.55686277151107788), (0.56302523612976074,
0.56078433990478516, 0.56078433990478516), (0.56722688674926758,
0.56470590829849243, 0.56470590829849243), (0.57142859697341919,
0.56862747669219971, 0.56862747669219971), (0.57563024759292603,
0.57254904508590698, 0.57254904508590698), (0.57983195781707764,
0.57647061347961426, 0.57647061347961426), (0.58403360843658447,
0.58039218187332153, 0.58039218187332153), (0.58823531866073608,
0.58431375026702881, 0.58431375026702881), (0.59243696928024292,
0.58823531866073608, 0.58823531866073608), (0.59663867950439453,
0.59215688705444336, 0.59215688705444336), (0.60084033012390137,
0.59607845544815063, 0.59607845544815063), (0.60504204034805298,
0.60000002384185791, 0.60000002384185791), (0.60924369096755981,
0.60392159223556519, 0.60392159223556519), (0.61344540119171143,
0.60784316062927246, 0.60784316062927246), (0.61764705181121826,
0.61176472902297974, 0.61176472902297974), (0.62184876203536987,
0.61568629741668701, 0.61568629741668701), (0.62605041265487671,
0.61960786581039429, 0.61960786581039429), (0.63025212287902832,
0.62745100259780884, 0.62745100259780884), (0.63445377349853516,
0.63137257099151611, 0.63137257099151611), (0.63865548372268677,
0.63529413938522339, 0.63529413938522339), (0.6428571343421936,
0.63921570777893066, 0.63921570777893066), (0.64705884456634521,
0.64313727617263794, 0.64313727617263794), (0.65126049518585205,
0.64705884456634521, 0.64705884456634521), (0.65546220541000366,
0.65098041296005249, 0.65098041296005249), (0.6596638560295105,
0.65490198135375977, 0.65490198135375977), (0.66386556625366211,
0.65882354974746704, 0.65882354974746704), (0.66806721687316895,
0.66274511814117432, 0.66274511814117432), (0.67226892709732056,
0.66666668653488159, 0.66666668653488159), (0.67647057771682739,
0.67058825492858887, 0.67058825492858887), (0.680672287940979,
0.67450982332229614, 0.67450982332229614), (0.68487393856048584,
0.67843139171600342, 0.67843139171600342), (0.68907564878463745,
0.68235296010971069, 0.68235296010971069), (0.69327729940414429,
0.68627452850341797, 0.68627452850341797), (0.6974790096282959,
0.69019609689712524, 0.69019609689712524), (0.70168066024780273,
0.69411766529083252, 0.69411766529083252), (0.70588237047195435,
0.69803923368453979, 0.69803923368453979), (0.71008402109146118,
0.70196080207824707, 0.70196080207824707), (0.71428573131561279,
0.70980393886566162, 0.70980393886566162), (0.71848738193511963,
0.7137255072593689, 0.7137255072593689), (0.72268909215927124,
0.71764707565307617, 0.71764707565307617), (0.72689074277877808,
0.72156864404678345, 0.72156864404678345), (0.73109245300292969,
0.72549021244049072, 0.72549021244049072), (0.73529410362243652,
0.729411780834198, 0.729411780834198), (0.73949581384658813,
0.73333334922790527, 0.73333334922790527), (0.74369746446609497,
0.73725491762161255, 0.73725491762161255), (0.74789917469024658,
0.74117648601531982, 0.74117648601531982), (0.75210082530975342,
0.7450980544090271, 0.7450980544090271), (0.75630253553390503,
0.75294119119644165, 0.75294119119644165), (0.76050418615341187,
0.75686275959014893, 0.75686275959014893), (0.76470589637756348,
0.7607843279838562, 0.7607843279838562), (0.76890754699707031,
0.76470589637756348, 0.76470589637756348), (0.77310925722122192,
0.76862746477127075, 0.76862746477127075), (0.77731090784072876,
0.77254903316497803, 0.77254903316497803), (0.78151261806488037,
0.7764706015586853, 0.7764706015586853), (0.78571426868438721,
0.78039216995239258, 0.78039216995239258), (0.78991597890853882,
0.78431373834609985, 0.78431373834609985), (0.79411762952804565,
0.78823530673980713, 0.78823530673980713), (0.79831933975219727,
0.79607844352722168, 0.79607844352722168), (0.8025209903717041,
0.80000001192092896, 0.80000001192092896), (0.80672270059585571,
0.80392158031463623, 0.80392158031463623), (0.81092435121536255,
0.80784314870834351, 0.80784314870834351), (0.81512606143951416,
0.81176471710205078, 0.81176471710205078), (0.819327712059021,
0.81568628549575806, 0.81568628549575806), (0.82352942228317261,
0.81960785388946533, 0.81960785388946533), (0.82773107290267944,
0.82352942228317261, 0.82352942228317261), (0.83193278312683105,
0.82745099067687988, 0.82745099067687988), (0.83613443374633789,
0.83137255907058716, 0.83137255907058716), (0.8403361439704895,
0.83529412746429443, 0.83529412746429443), (0.84453779458999634,
0.83921569585800171, 0.83921569585800171), (0.84873950481414795,
0.84313726425170898, 0.84313726425170898), (0.85294115543365479,
0.84705883264541626, 0.84705883264541626), (0.8571428656578064,
0.85098040103912354, 0.85098040103912354), (0.86134451627731323,
0.85490196943283081, 0.85490196943283081), (0.86554622650146484,
0.85882353782653809, 0.85882353782653809), (0.86974787712097168,
0.86274510622024536, 0.86274510622024536), (0.87394958734512329,
0.86666667461395264, 0.86666667461395264), (0.87815123796463013,
0.87058824300765991, 0.87058824300765991), (0.88235294818878174,
0.87843137979507446, 0.87843137979507446), (0.88655459880828857,
0.88235294818878174, 0.88235294818878174), (0.89075630903244019,
0.88627451658248901, 0.88627451658248901), (0.89495795965194702,
0.89019608497619629, 0.89019608497619629), (0.89915966987609863,
0.89411765336990356, 0.89411765336990356), (0.90336132049560547,
0.89803922176361084, 0.89803922176361084), (0.90756303071975708,
0.90196079015731812, 0.90196079015731812), (0.91176468133926392,
0.90588235855102539, 0.90588235855102539), (0.91596639156341553,
0.90980392694473267, 0.90980392694473267), (0.92016804218292236,
0.91372549533843994, 0.91372549533843994), (0.92436975240707397,
0.91764706373214722, 0.91764706373214722), (0.92857140302658081,
0.92156863212585449, 0.92156863212585449), (0.93277311325073242,
0.92549020051956177, 0.92549020051956177), (0.93697476387023926,
0.92941176891326904, 0.92941176891326904), (0.94117647409439087,
0.93333333730697632, 0.93333333730697632), (0.94537812471389771,
0.93725490570068359, 0.93725490570068359), (0.94957983493804932,
0.94117647409439087, 0.94117647409439087), (0.95378148555755615,
0.94509804248809814, 0.94509804248809814), (0.95798319578170776,
0.94901961088180542, 0.94901961088180542), (0.9621848464012146,
0.9529411792755127, 0.9529411792755127), (0.96638655662536621,
0.96078431606292725, 0.96078431606292725), (0.97058820724487305,
0.96470588445663452, 0.96470588445663452), (0.97478991746902466,
0.9686274528503418, 0.9686274528503418), (0.97899156808853149,
0.97254902124404907, 0.97254902124404907), (0.98319327831268311,
0.97647058963775635, 0.97647058963775635), (0.98739492893218994,
0.98039215803146362, 0.98039215803146362), (0.99159663915634155,
0.9843137264251709, 0.9843137264251709), (0.99579828977584839,
0.98823529481887817, 0.98823529481887817), (1.0, 0.99215686321258545,
0.99215686321258545)], 'red': [(0.0, 0.0, 0.0), (0.0042016808874905109,
0.070588238537311554, 0.070588238537311554), (0.0084033617749810219,
0.14117647707462311, 0.14117647707462311), (0.012605042196810246,
0.21176470816135406, 0.21176470816135406), (0.016806723549962044,
0.28235295414924622, 0.28235295414924622), (0.021008403971791267,
0.35294118523597717, 0.35294118523597717), (0.025210084393620491,
0.42352941632270813, 0.42352941632270813), (0.029411764815449715,
0.49803921580314636, 0.49803921580314636), (0.033613447099924088,
0.56862747669219971, 0.56862747669219971), (0.037815127521753311,
0.63921570777893066, 0.63921570777893066), (0.042016807943582535,
0.78039216995239258, 0.78039216995239258), (0.046218488365411758,
0.85098040103912354, 0.85098040103912354), (0.050420168787240982,
0.92156863212585449, 0.92156863212585449), (0.054621849209070206,
0.99607843160629272, 0.99607843160629272), (0.058823529630899429,
0.97647058963775635, 0.97647058963775635), (0.063025213778018951,
0.95686274766921997, 0.95686274766921997), (0.067226894199848175,
0.93725490570068359, 0.93725490570068359), (0.071428574621677399,
0.91764706373214722, 0.91764706373214722), (0.075630255043506622,
0.89803922176361084, 0.89803922176361084), (0.079831935465335846,
0.87450981140136719, 0.87450981140136719), (0.08403361588716507,
0.85490196943283081, 0.85490196943283081), (0.088235296308994293,
0.83529412746429443, 0.83529412746429443), (0.092436976730823517,
0.81568628549575806, 0.81568628549575806), (0.09663865715265274,
0.79607844352722168, 0.79607844352722168), (0.10084033757448196,
0.77254903316497803, 0.77254903316497803), (0.10504201799631119,
0.75294119119644165, 0.75294119119644165), (0.10924369841814041,
0.73333334922790527, 0.73333334922790527), (0.11344537883996964,
0.7137255072593689, 0.7137255072593689), (0.11764705926179886,
0.69411766529083252, 0.69411766529083252), (0.12184873968362808,
0.67450982332229614, 0.67450982332229614), (0.1260504275560379,
0.63137257099151611, 0.63137257099151611), (0.13025210797786713,
0.61176472902297974, 0.61176472902297974), (0.13445378839969635,
0.59215688705444336, 0.59215688705444336), (0.13865546882152557,
0.57254904508590698, 0.57254904508590698), (0.1428571492433548,
0.54901963472366333, 0.54901963472366333), (0.14705882966518402,
0.52941179275512695, 0.52941179275512695), (0.15126051008701324,
0.50980395078659058, 0.50980395078659058), (0.15546219050884247,
0.49019607901573181, 0.49019607901573181), (0.15966387093067169,
0.47058823704719543, 0.47058823704719543), (0.16386555135250092,
0.45098039507865906, 0.45098039507865906), (0.16806723177433014,
0.42745098471641541, 0.42745098471641541), (0.17226891219615936,
0.40784314274787903, 0.40784314274787903), (0.17647059261798859,
0.38823530077934265, 0.38823530077934265), (0.18067227303981781,
0.36862745881080627, 0.36862745881080627), (0.18487395346164703,
0.3490196168422699, 0.3490196168422699), (0.18907563388347626,
0.32549020648002625, 0.32549020648002625), (0.19327731430530548,
0.30588236451148987, 0.30588236451148987), (0.1974789947271347,
0.28627452254295349, 0.28627452254295349), (0.20168067514896393,
0.26666668057441711, 0.26666668057441711), (0.20588235557079315,
0.24705882370471954, 0.24705882370471954), (0.21008403599262238,
0.20392157137393951, 0.20392157137393951), (0.2142857164144516,
0.18431372940540314, 0.18431372940540314), (0.21848739683628082,
0.16470588743686676, 0.16470588743686676), (0.22268907725811005,
0.14509804546833038, 0.14509804546833038), (0.22689075767993927,
0.12549020349979401, 0.12549020349979401), (0.23109243810176849,
0.10196078568696976, 0.10196078568696976), (0.23529411852359772,
0.08235294371843338, 0.08235294371843338), (0.23949579894542694,
0.062745101749897003, 0.062745101749897003), (0.24369747936725616,
0.043137256056070328, 0.043137256056070328), (0.24789915978908539,
0.023529412224888802, 0.023529412224888802), (0.25210085511207581,
0.25098040699958801, 0.25098040699958801), (0.25630253553390503,
0.25490197539329529, 0.25490197539329529), (0.26050421595573425,
0.25882354378700256, 0.25882354378700256), (0.26470589637756348,
0.26274511218070984, 0.26274511218070984), (0.2689075767993927,
0.26666668057441711, 0.26666668057441711), (0.27310925722122192,
0.27058824896812439, 0.27058824896812439), (0.27731093764305115,
0.27450981736183167, 0.27450981736183167), (0.28151261806488037,
0.27843138575553894, 0.27843138575553894), (0.28571429848670959,
0.28235295414924622, 0.28235295414924622), (0.28991597890853882,
0.28627452254295349, 0.28627452254295349), (0.29411765933036804,
0.29411765933036804, 0.29411765933036804), (0.29831933975219727,
0.29803922772407532, 0.29803922772407532), (0.30252102017402649,
0.30196079611778259, 0.30196079611778259), (0.30672270059585571,
0.30588236451148987, 0.30588236451148987), (0.31092438101768494,
0.30980393290519714, 0.30980393290519714), (0.31512606143951416,
0.31372550129890442, 0.31372550129890442), (0.31932774186134338,
0.31764706969261169, 0.31764706969261169), (0.32352942228317261,
0.32156863808631897, 0.32156863808631897), (0.32773110270500183,
0.32549020648002625, 0.32549020648002625), (0.33193278312683105,
0.32941177487373352, 0.32941177487373352), (0.33613446354866028,
0.3333333432674408, 0.3333333432674408), (0.3403361439704895,
0.33725491166114807, 0.33725491166114807), (0.34453782439231873,
0.34117648005485535, 0.34117648005485535), (0.34873950481414795,
0.34509804844856262, 0.34509804844856262), (0.35294118523597717,
0.3490196168422699, 0.3490196168422699), (0.3571428656578064,
0.35294118523597717, 0.35294118523597717), (0.36134454607963562,
0.35686275362968445, 0.35686275362968445), (0.36554622650146484,
0.36078432202339172, 0.36078432202339172), (0.36974790692329407,
0.364705890417099, 0.364705890417099), (0.37394958734512329,
0.36862745881080627, 0.36862745881080627), (0.37815126776695251,
0.37647059559822083, 0.37647059559822083), (0.38235294818878174,
0.3803921639919281, 0.3803921639919281), (0.38655462861061096,
0.38431373238563538, 0.38431373238563538), (0.39075630903244019,
0.38823530077934265, 0.38823530077934265), (0.39495798945426941,
0.39215686917304993, 0.39215686917304993), (0.39915966987609863,
0.3960784375667572, 0.3960784375667572), (0.40336135029792786,
0.40000000596046448, 0.40000000596046448), (0.40756303071975708,
0.40392157435417175, 0.40392157435417175), (0.4117647111415863,
0.40784314274787903, 0.40784314274787903), (0.41596639156341553,
0.4117647111415863, 0.4117647111415863), (0.42016807198524475,
0.41568627953529358, 0.41568627953529358), (0.42436975240707397,
0.41960784792900085, 0.41960784792900085), (0.4285714328289032,
0.42352941632270813, 0.42352941632270813), (0.43277311325073242,
0.42745098471641541, 0.42745098471641541), (0.43697479367256165,
0.43137255311012268, 0.43137255311012268), (0.44117647409439087,
0.43529412150382996, 0.43529412150382996), (0.44537815451622009,
0.43921568989753723, 0.43921568989753723), (0.44957983493804932,
0.44313725829124451, 0.44313725829124451), (0.45378151535987854,
0.44705882668495178, 0.44705882668495178), (0.45798319578170776,
0.45098039507865906, 0.45098039507865906), (0.46218487620353699,
0.45882353186607361, 0.45882353186607361), (0.46638655662536621,
0.46274510025978088, 0.46274510025978088), (0.47058823704719543,
0.46666666865348816, 0.46666666865348816), (0.47478991746902466,
0.47058823704719543, 0.47058823704719543), (0.47899159789085388,
0.47450980544090271, 0.47450980544090271), (0.48319327831268311,
0.47843137383460999, 0.47843137383460999), (0.48739495873451233,
0.48235294222831726, 0.48235294222831726), (0.49159663915634155,
0.48627451062202454, 0.48627451062202454), (0.49579831957817078,
0.49019607901573181, 0.49019607901573181), (0.5, 0.49411764740943909,
0.49411764740943909), (0.50420171022415161, 0.50196081399917603,
0.50196081399917603), (0.50840336084365845, 0.5058823823928833,
0.5058823823928833), (0.51260507106781006, 0.50980395078659058,
0.50980395078659058), (0.51680672168731689, 0.51372551918029785,
0.51372551918029785), (0.52100843191146851, 0.51764708757400513,
0.51764708757400513), (0.52521008253097534, 0.5215686559677124,
0.5215686559677124), (0.52941179275512695, 0.52549022436141968,
0.52549022436141968), (0.53361344337463379, 0.52941179275512695,
0.52941179275512695), (0.5378151535987854, 0.53333336114883423,
0.53333336114883423), (0.54201680421829224, 0.5372549295425415,
0.5372549295425415), (0.54621851444244385, 0.54509806632995605,
0.54509806632995605), (0.55042016506195068, 0.54901963472366333,
0.54901963472366333), (0.55462187528610229, 0.55294120311737061,
0.55294120311737061), (0.55882352590560913, 0.55686277151107788,
0.55686277151107788), (0.56302523612976074, 0.56078433990478516,
0.56078433990478516), (0.56722688674926758, 0.56470590829849243,
0.56470590829849243), (0.57142859697341919, 0.56862747669219971,
0.56862747669219971), (0.57563024759292603, 0.57254904508590698,
0.57254904508590698), (0.57983195781707764, 0.57647061347961426,
0.57647061347961426), (0.58403360843658447, 0.58039218187332153,
0.58039218187332153), (0.58823531866073608, 0.58431375026702881,
0.58431375026702881), (0.59243696928024292, 0.58823531866073608,
0.58823531866073608), (0.59663867950439453, 0.59215688705444336,
0.59215688705444336), (0.60084033012390137, 0.59607845544815063,
0.59607845544815063), (0.60504204034805298, 0.60000002384185791,
0.60000002384185791), (0.60924369096755981, 0.60392159223556519,
0.60392159223556519), (0.61344540119171143, 0.60784316062927246,
0.60784316062927246), (0.61764705181121826, 0.61176472902297974,
0.61176472902297974), (0.62184876203536987, 0.61568629741668701,
0.61568629741668701), (0.62605041265487671, 0.61960786581039429,
0.61960786581039429), (0.63025212287902832, 0.62745100259780884,
0.62745100259780884), (0.63445377349853516, 0.63137257099151611,
0.63137257099151611), (0.63865548372268677, 0.63529413938522339,
0.63529413938522339), (0.6428571343421936, 0.63921570777893066,
0.63921570777893066), (0.64705884456634521, 0.64313727617263794,
0.64313727617263794), (0.65126049518585205, 0.64705884456634521,
0.64705884456634521), (0.65546220541000366, 0.65098041296005249,
0.65098041296005249), (0.6596638560295105, 0.65490198135375977,
0.65490198135375977), (0.66386556625366211, 0.65882354974746704,
0.65882354974746704), (0.66806721687316895, 0.66274511814117432,
0.66274511814117432), (0.67226892709732056, 0.66666668653488159,
0.66666668653488159), (0.67647057771682739, 0.67058825492858887,
0.67058825492858887), (0.680672287940979, 0.67450982332229614,
0.67450982332229614), (0.68487393856048584, 0.67843139171600342,
0.67843139171600342), (0.68907564878463745, 0.68235296010971069,
0.68235296010971069), (0.69327729940414429, 0.68627452850341797,
0.68627452850341797), (0.6974790096282959, 0.69019609689712524,
0.69019609689712524), (0.70168066024780273, 0.69411766529083252,
0.69411766529083252), (0.70588237047195435, 0.69803923368453979,
0.69803923368453979), (0.71008402109146118, 0.70196080207824707,
0.70196080207824707), (0.71428573131561279, 0.70980393886566162,
0.70980393886566162), (0.71848738193511963, 0.7137255072593689,
0.7137255072593689), (0.72268909215927124, 0.71764707565307617,
0.71764707565307617), (0.72689074277877808, 0.72156864404678345,
0.72156864404678345), (0.73109245300292969, 0.72549021244049072,
0.72549021244049072), (0.73529410362243652, 0.729411780834198,
0.729411780834198), (0.73949581384658813, 0.73333334922790527,
0.73333334922790527), (0.74369746446609497, 0.73725491762161255,
0.73725491762161255), (0.74789917469024658, 0.74117648601531982,
0.74117648601531982), (0.75210082530975342, 0.7450980544090271,
0.7450980544090271), (0.75630253553390503, 0.75294119119644165,
0.75294119119644165), (0.76050418615341187, 0.75686275959014893,
0.75686275959014893), (0.76470589637756348, 0.7607843279838562,
0.7607843279838562), (0.76890754699707031, 0.76470589637756348,
0.76470589637756348), (0.77310925722122192, 0.76862746477127075,
0.76862746477127075), (0.77731090784072876, 0.77254903316497803,
0.77254903316497803), (0.78151261806488037, 0.7764706015586853,
0.7764706015586853), (0.78571426868438721, 0.78039216995239258,
0.78039216995239258), (0.78991597890853882, 0.78431373834609985,
0.78431373834609985), (0.79411762952804565, 0.78823530673980713,
0.78823530673980713), (0.79831933975219727, 0.79607844352722168,
0.79607844352722168), (0.8025209903717041, 0.80000001192092896,
0.80000001192092896), (0.80672270059585571, 0.80392158031463623,
0.80392158031463623), (0.81092435121536255, 0.80784314870834351,
0.80784314870834351), (0.81512606143951416, 0.81176471710205078,
0.81176471710205078), (0.819327712059021, 0.81568628549575806,
0.81568628549575806), (0.82352942228317261, 0.81960785388946533,
0.81960785388946533), (0.82773107290267944, 0.82352942228317261,
0.82352942228317261), (0.83193278312683105, 0.82745099067687988,
0.82745099067687988), (0.83613443374633789, 0.83137255907058716,
0.83137255907058716), (0.8403361439704895, 0.83529412746429443,
0.83529412746429443), (0.84453779458999634, 0.83921569585800171,
0.83921569585800171), (0.84873950481414795, 0.84313726425170898,
0.84313726425170898), (0.85294115543365479, 0.84705883264541626,
0.84705883264541626), (0.8571428656578064, 0.85098040103912354,
0.85098040103912354), (0.86134451627731323, 0.85490196943283081,
0.85490196943283081), (0.86554622650146484, 0.85882353782653809,
0.85882353782653809), (0.86974787712097168, 0.86274510622024536,
0.86274510622024536), (0.87394958734512329, 0.86666667461395264,
0.86666667461395264), (0.87815123796463013, 0.87058824300765991,
0.87058824300765991), (0.88235294818878174, 0.87843137979507446,
0.87843137979507446), (0.88655459880828857, 0.88235294818878174,
0.88235294818878174), (0.89075630903244019, 0.88627451658248901,
0.88627451658248901), (0.89495795965194702, 0.89019608497619629,
0.89019608497619629), (0.89915966987609863, 0.89411765336990356,
0.89411765336990356), (0.90336132049560547, 0.89803922176361084,
0.89803922176361084), (0.90756303071975708, 0.90196079015731812,
0.90196079015731812), (0.91176468133926392, 0.90588235855102539,
0.90588235855102539), (0.91596639156341553, 0.90980392694473267,
0.90980392694473267), (0.92016804218292236, 0.91372549533843994,
0.91372549533843994), (0.92436975240707397, 0.91764706373214722,
0.91764706373214722), (0.92857140302658081, 0.92156863212585449,
0.92156863212585449), (0.93277311325073242, 0.92549020051956177,
0.92549020051956177), (0.93697476387023926, 0.92941176891326904,
0.92941176891326904), (0.94117647409439087, 0.93333333730697632,
0.93333333730697632), (0.94537812471389771, 0.93725490570068359,
0.93725490570068359), (0.94957983493804932, 0.94117647409439087,
0.94117647409439087), (0.95378148555755615, 0.94509804248809814,
0.94509804248809814), (0.95798319578170776, 0.94901961088180542,
0.94901961088180542), (0.9621848464012146, 0.9529411792755127,
0.9529411792755127), (0.96638655662536621, 0.96078431606292725,
0.96078431606292725), (0.97058820724487305, 0.96470588445663452,
0.96470588445663452), (0.97478991746902466, 0.9686274528503418,
0.9686274528503418), (0.97899156808853149, 0.97254902124404907,
0.97254902124404907), (0.98319327831268311, 0.97647058963775635,
0.97647058963775635), (0.98739492893218994, 0.98039215803146362,
0.98039215803146362), (0.99159663915634155, 0.9843137264251709,
0.9843137264251709), (0.99579828977584839, 0.98823529481887817,
0.98823529481887817), (1.0, 0.99215686321258545, 0.99215686321258545)]}
_gist_yarg_data = {'blue': [(0.0, 1.0, 1.0), (0.0042016808874905109,
0.99607843160629272, 0.99607843160629272), (0.0084033617749810219,
0.99215686321258545, 0.99215686321258545), (0.012605042196810246,
0.98823529481887817, 0.98823529481887817), (0.016806723549962044,
0.9843137264251709, 0.9843137264251709), (0.021008403971791267,
0.98039215803146362, 0.98039215803146362), (0.025210084393620491,
0.97647058963775635, 0.97647058963775635), (0.029411764815449715,
0.97254902124404907, 0.97254902124404907), (0.033613447099924088,
0.96470588445663452, 0.96470588445663452), (0.037815127521753311,
0.96078431606292725, 0.96078431606292725), (0.042016807943582535,
0.95686274766921997, 0.95686274766921997), (0.046218488365411758,
0.9529411792755127, 0.9529411792755127), (0.050420168787240982,
0.94901961088180542, 0.94901961088180542), (0.054621849209070206,
0.94509804248809814, 0.94509804248809814), (0.058823529630899429,
0.94117647409439087, 0.94117647409439087), (0.063025213778018951,
0.93725490570068359, 0.93725490570068359), (0.067226894199848175,
0.93333333730697632, 0.93333333730697632), (0.071428574621677399,
0.92941176891326904, 0.92941176891326904), (0.075630255043506622,
0.92549020051956177, 0.92549020051956177), (0.079831935465335846,
0.92156863212585449, 0.92156863212585449), (0.08403361588716507,
0.91764706373214722, 0.91764706373214722), (0.088235296308994293,
0.91372549533843994, 0.91372549533843994), (0.092436976730823517,
0.90980392694473267, 0.90980392694473267), (0.09663865715265274,
0.90196079015731812, 0.90196079015731812), (0.10084033757448196,
0.89803922176361084, 0.89803922176361084), (0.10504201799631119,
0.89411765336990356, 0.89411765336990356), (0.10924369841814041,
0.89019608497619629, 0.89019608497619629), (0.11344537883996964,
0.88627451658248901, 0.88627451658248901), (0.11764705926179886,
0.88235294818878174, 0.88235294818878174), (0.12184873968362808,
0.87843137979507446, 0.87843137979507446), (0.1260504275560379,
0.87450981140136719, 0.87450981140136719), (0.13025210797786713,
0.87058824300765991, 0.87058824300765991), (0.13445378839969635,
0.86666667461395264, 0.86666667461395264), (0.13865546882152557,
0.86274510622024536, 0.86274510622024536), (0.1428571492433548,
0.85882353782653809, 0.85882353782653809), (0.14705882966518402,
0.85490196943283081, 0.85490196943283081), (0.15126051008701324,
0.85098040103912354, 0.85098040103912354), (0.15546219050884247,
0.84705883264541626, 0.84705883264541626), (0.15966387093067169,
0.83921569585800171, 0.83921569585800171), (0.16386555135250092,
0.83529412746429443, 0.83529412746429443), (0.16806723177433014,
0.83137255907058716, 0.83137255907058716), (0.17226891219615936,
0.82745099067687988, 0.82745099067687988), (0.17647059261798859,
0.82352942228317261, 0.82352942228317261), (0.18067227303981781,
0.81960785388946533, 0.81960785388946533), (0.18487395346164703,
0.81568628549575806, 0.81568628549575806), (0.18907563388347626,
0.81176471710205078, 0.81176471710205078), (0.19327731430530548,
0.80784314870834351, 0.80784314870834351), (0.1974789947271347,
0.80392158031463623, 0.80392158031463623), (0.20168067514896393,
0.80000001192092896, 0.80000001192092896), (0.20588235557079315,
0.79607844352722168, 0.79607844352722168), (0.21008403599262238,
0.7921568751335144, 0.7921568751335144), (0.2142857164144516,
0.78823530673980713, 0.78823530673980713), (0.21848739683628082,
0.78431373834609985, 0.78431373834609985), (0.22268907725811005,
0.7764706015586853, 0.7764706015586853), (0.22689075767993927,
0.77254903316497803, 0.77254903316497803), (0.23109243810176849,
0.76862746477127075, 0.76862746477127075), (0.23529411852359772,
0.76470589637756348, 0.76470589637756348), (0.23949579894542694,
0.7607843279838562, 0.7607843279838562), (0.24369747936725616,
0.75686275959014893, 0.75686275959014893), (0.24789915978908539,
0.75294119119644165, 0.75294119119644165), (0.25210085511207581,
0.74901962280273438, 0.74901962280273438), (0.25630253553390503,
0.7450980544090271, 0.7450980544090271), (0.26050421595573425,
0.74117648601531982, 0.74117648601531982), (0.26470589637756348,
0.73725491762161255, 0.73725491762161255), (0.2689075767993927,
0.73333334922790527, 0.73333334922790527), (0.27310925722122192,
0.729411780834198, 0.729411780834198), (0.27731093764305115,
0.72549021244049072, 0.72549021244049072), (0.28151261806488037,
0.72156864404678345, 0.72156864404678345), (0.28571429848670959,
0.7137255072593689, 0.7137255072593689), (0.28991597890853882,
0.70980393886566162, 0.70980393886566162), (0.29411765933036804,
0.70588237047195435, 0.70588237047195435), (0.29831933975219727,
0.70196080207824707, 0.70196080207824707), (0.30252102017402649,
0.69803923368453979, 0.69803923368453979), (0.30672270059585571,
0.69411766529083252, 0.69411766529083252), (0.31092438101768494,
0.69019609689712524, 0.69019609689712524), (0.31512606143951416,
0.68627452850341797, 0.68627452850341797), (0.31932774186134338,
0.68235296010971069, 0.68235296010971069), (0.32352942228317261,
0.67843139171600342, 0.67843139171600342), (0.32773110270500183,
0.67450982332229614, 0.67450982332229614), (0.33193278312683105,
0.67058825492858887, 0.67058825492858887), (0.33613446354866028,
0.66666668653488159, 0.66666668653488159), (0.3403361439704895,
0.66274511814117432, 0.66274511814117432), (0.34453782439231873,
0.65882354974746704, 0.65882354974746704), (0.34873950481414795,
0.65098041296005249, 0.65098041296005249), (0.35294118523597717,
0.64705884456634521, 0.64705884456634521), (0.3571428656578064,
0.64313727617263794, 0.64313727617263794), (0.36134454607963562,
0.63921570777893066, 0.63921570777893066), (0.36554622650146484,
0.63529413938522339, 0.63529413938522339), (0.36974790692329407,
0.63137257099151611, 0.63137257099151611), (0.37394958734512329,
0.62745100259780884, 0.62745100259780884), (0.37815126776695251,
0.62352943420410156, 0.62352943420410156), (0.38235294818878174,
0.61960786581039429, 0.61960786581039429), (0.38655462861061096,
0.61568629741668701, 0.61568629741668701), (0.39075630903244019,
0.61176472902297974, 0.61176472902297974), (0.39495798945426941,
0.60784316062927246, 0.60784316062927246), (0.39915966987609863,
0.60392159223556519, 0.60392159223556519), (0.40336135029792786,
0.60000002384185791, 0.60000002384185791), (0.40756303071975708,
0.59607845544815063, 0.59607845544815063), (0.4117647111415863,
0.58823531866073608, 0.58823531866073608), (0.41596639156341553,
0.58431375026702881, 0.58431375026702881), (0.42016807198524475,
0.58039218187332153, 0.58039218187332153), (0.42436975240707397,
0.57647061347961426, 0.57647061347961426), (0.4285714328289032,
0.57254904508590698, 0.57254904508590698), (0.43277311325073242,
0.56862747669219971, 0.56862747669219971), (0.43697479367256165,
0.56470590829849243, 0.56470590829849243), (0.44117647409439087,
0.56078433990478516, 0.56078433990478516), (0.44537815451622009,
0.55686277151107788, 0.55686277151107788), (0.44957983493804932,
0.55294120311737061, 0.55294120311737061), (0.45378151535987854,
0.54901963472366333, 0.54901963472366333), (0.45798319578170776,
0.54509806632995605, 0.54509806632995605), (0.46218487620353699,
0.54117649793624878, 0.54117649793624878), (0.46638655662536621,
0.5372549295425415, 0.5372549295425415), (0.47058823704719543,
0.53333336114883423, 0.53333336114883423), (0.47478991746902466,
0.52549022436141968, 0.52549022436141968), (0.47899159789085388,
0.5215686559677124, 0.5215686559677124), (0.48319327831268311,
0.51764708757400513, 0.51764708757400513), (0.48739495873451233,
0.51372551918029785, 0.51372551918029785), (0.49159663915634155,
0.50980395078659058, 0.50980395078659058), (0.49579831957817078,
0.5058823823928833, 0.5058823823928833), (0.5, 0.50196081399917603,
0.50196081399917603), (0.50420171022415161, 0.49803921580314636,
0.49803921580314636), (0.50840336084365845, 0.49411764740943909,
0.49411764740943909), (0.51260507106781006, 0.49019607901573181,
0.49019607901573181), (0.51680672168731689, 0.48627451062202454,
0.48627451062202454), (0.52100843191146851, 0.48235294222831726,
0.48235294222831726), (0.52521008253097534, 0.47843137383460999,
0.47843137383460999), (0.52941179275512695, 0.47450980544090271,
0.47450980544090271), (0.53361344337463379, 0.47058823704719543,
0.47058823704719543), (0.5378151535987854, 0.46274510025978088,
0.46274510025978088), (0.54201680421829224, 0.45882353186607361,
0.45882353186607361), (0.54621851444244385, 0.45490196347236633,
0.45490196347236633), (0.55042016506195068, 0.45098039507865906,
0.45098039507865906), (0.55462187528610229, 0.44705882668495178,
0.44705882668495178), (0.55882352590560913, 0.44313725829124451,
0.44313725829124451), (0.56302523612976074, 0.43921568989753723,
0.43921568989753723), (0.56722688674926758, 0.43529412150382996,
0.43529412150382996), (0.57142859697341919, 0.43137255311012268,
0.43137255311012268), (0.57563024759292603, 0.42745098471641541,
0.42745098471641541), (0.57983195781707764, 0.42352941632270813,
0.42352941632270813), (0.58403360843658447, 0.41960784792900085,
0.41960784792900085), (0.58823531866073608, 0.41568627953529358,
0.41568627953529358), (0.59243696928024292, 0.4117647111415863,
0.4117647111415863), (0.59663867950439453, 0.40784314274787903,
0.40784314274787903), (0.60084033012390137, 0.40000000596046448,
0.40000000596046448), (0.60504204034805298, 0.3960784375667572,
0.3960784375667572), (0.60924369096755981, 0.39215686917304993,
0.39215686917304993), (0.61344540119171143, 0.38823530077934265,
0.38823530077934265), (0.61764705181121826, 0.38431373238563538,
0.38431373238563538), (0.62184876203536987, 0.3803921639919281,
0.3803921639919281), (0.62605041265487671, 0.37647059559822083,
0.37647059559822083), (0.63025212287902832, 0.37254902720451355,
0.37254902720451355), (0.63445377349853516, 0.36862745881080627,
0.36862745881080627), (0.63865548372268677, 0.364705890417099,
0.364705890417099), (0.6428571343421936, 0.36078432202339172,
0.36078432202339172), (0.64705884456634521, 0.35686275362968445,
0.35686275362968445), (0.65126049518585205, 0.35294118523597717,
0.35294118523597717), (0.65546220541000366, 0.3490196168422699,
0.3490196168422699), (0.6596638560295105, 0.34509804844856262,
0.34509804844856262), (0.66386556625366211, 0.33725491166114807,
0.33725491166114807), (0.66806721687316895, 0.3333333432674408,
0.3333333432674408), (0.67226892709732056, 0.32941177487373352,
0.32941177487373352), (0.67647057771682739, 0.32549020648002625,
0.32549020648002625), (0.680672287940979, 0.32156863808631897,
0.32156863808631897), (0.68487393856048584, 0.31764706969261169,
0.31764706969261169), (0.68907564878463745, 0.31372550129890442,
0.31372550129890442), (0.69327729940414429, 0.30980393290519714,
0.30980393290519714), (0.6974790096282959, 0.30588236451148987,
0.30588236451148987), (0.70168066024780273, 0.30196079611778259,
0.30196079611778259), (0.70588237047195435, 0.29803922772407532,
0.29803922772407532), (0.71008402109146118, 0.29411765933036804,
0.29411765933036804), (0.71428573131561279, 0.29019609093666077,
0.29019609093666077), (0.71848738193511963, 0.28627452254295349,
0.28627452254295349), (0.72268909215927124, 0.28235295414924622,
0.28235295414924622), (0.72689074277877808, 0.27450981736183167,
0.27450981736183167), (0.73109245300292969, 0.27058824896812439,
0.27058824896812439), (0.73529410362243652, 0.26666668057441711,
0.26666668057441711), (0.73949581384658813, 0.26274511218070984,
0.26274511218070984), (0.74369746446609497, 0.25882354378700256,
0.25882354378700256), (0.74789917469024658, 0.25490197539329529,
0.25490197539329529), (0.75210082530975342, 0.25098040699958801,
0.25098040699958801), (0.75630253553390503, 0.24705882370471954,
0.24705882370471954), (0.76050418615341187, 0.24313725531101227,
0.24313725531101227), (0.76470589637756348, 0.23921568691730499,
0.23921568691730499), (0.76890754699707031, 0.23529411852359772,
0.23529411852359772), (0.77310925722122192, 0.23137255012989044,
0.23137255012989044), (0.77731090784072876, 0.22745098173618317,
0.22745098173618317), (0.78151261806488037, 0.22352941334247589,
0.22352941334247589), (0.78571426868438721, 0.21960784494876862,
0.21960784494876862), (0.78991597890853882, 0.21176470816135406,
0.21176470816135406), (0.79411762952804565, 0.20784313976764679,
0.20784313976764679), (0.79831933975219727, 0.20392157137393951,
0.20392157137393951), (0.8025209903717041, 0.20000000298023224,
0.20000000298023224), (0.80672270059585571, 0.19607843458652496,
0.19607843458652496), (0.81092435121536255, 0.19215686619281769,
0.19215686619281769), (0.81512606143951416, 0.18823529779911041,
0.18823529779911041), (0.819327712059021, 0.18431372940540314,
0.18431372940540314), (0.82352942228317261, 0.18039216101169586,
0.18039216101169586), (0.82773107290267944, 0.17647059261798859,
0.17647059261798859), (0.83193278312683105, 0.17254902422428131,
0.17254902422428131), (0.83613443374633789, 0.16862745583057404,
0.16862745583057404), (0.8403361439704895, 0.16470588743686676,
0.16470588743686676), (0.84453779458999634, 0.16078431904315948,
0.16078431904315948), (0.84873950481414795, 0.15686275064945221,
0.15686275064945221), (0.85294115543365479, 0.14901961386203766,
0.14901961386203766), (0.8571428656578064, 0.14509804546833038,
0.14509804546833038), (0.86134451627731323, 0.14117647707462311,
0.14117647707462311), (0.86554622650146484, 0.13725490868091583,
0.13725490868091583), (0.86974787712097168, 0.13333334028720856,
0.13333334028720856), (0.87394958734512329, 0.12941177189350128,
0.12941177189350128), (0.87815123796463013, 0.12549020349979401,
0.12549020349979401), (0.88235294818878174, 0.12156862765550613,
0.12156862765550613), (0.88655459880828857, 0.11764705926179886,
0.11764705926179886), (0.89075630903244019, 0.11372549086809158,
0.11372549086809158), (0.89495795965194702, 0.10980392247438431,
0.10980392247438431), (0.89915966987609863, 0.10588235408067703,
0.10588235408067703), (0.90336132049560547, 0.10196078568696976,
0.10196078568696976), (0.90756303071975708, 0.098039217293262482,
0.098039217293262482), (0.91176468133926392, 0.094117648899555206,
0.094117648899555206), (0.91596639156341553, 0.086274512112140656,
0.086274512112140656), (0.92016804218292236, 0.08235294371843338,
0.08235294371843338), (0.92436975240707397, 0.078431375324726105,
0.078431375324726105), (0.92857140302658081, 0.074509806931018829,
0.074509806931018829), (0.93277311325073242, 0.070588238537311554,
0.070588238537311554), (0.93697476387023926, 0.066666670143604279,
0.066666670143604279), (0.94117647409439087, 0.062745101749897003,
0.062745101749897003), (0.94537812471389771, 0.058823529630899429,
0.058823529630899429), (0.94957983493804932, 0.054901961237192154,
0.054901961237192154), (0.95378148555755615, 0.050980392843484879,
0.050980392843484879), (0.95798319578170776, 0.047058824449777603,
0.047058824449777603), (0.9621848464012146, 0.043137256056070328,
0.043137256056070328), (0.96638655662536621, 0.039215687662363052,
0.039215687662363052), (0.97058820724487305, 0.035294119268655777,
0.035294119268655777), (0.97478991746902466, 0.031372550874948502,
0.031372550874948502), (0.97899156808853149, 0.023529412224888802,
0.023529412224888802), (0.98319327831268311, 0.019607843831181526,
0.019607843831181526), (0.98739492893218994, 0.015686275437474251,
0.015686275437474251), (0.99159663915634155, 0.011764706112444401,
0.011764706112444401), (0.99579828977584839, 0.0078431377187371254,
0.0078431377187371254), (1.0, 0.0039215688593685627,
0.0039215688593685627)], 'green': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)], 'red': [(0.0, 1.0, 1.0),
(0.0042016808874905109, 0.99607843160629272, 0.99607843160629272),
(0.0084033617749810219, 0.99215686321258545, 0.99215686321258545),
(0.012605042196810246, 0.98823529481887817, 0.98823529481887817),
(0.016806723549962044, 0.9843137264251709, 0.9843137264251709),
(0.021008403971791267, 0.98039215803146362, 0.98039215803146362),
(0.025210084393620491, 0.97647058963775635, 0.97647058963775635),
(0.029411764815449715, 0.97254902124404907, 0.97254902124404907),
(0.033613447099924088, 0.96470588445663452, 0.96470588445663452),
(0.037815127521753311, 0.96078431606292725, 0.96078431606292725),
(0.042016807943582535, 0.95686274766921997, 0.95686274766921997),
(0.046218488365411758, 0.9529411792755127, 0.9529411792755127),
(0.050420168787240982, 0.94901961088180542, 0.94901961088180542),
(0.054621849209070206, 0.94509804248809814, 0.94509804248809814),
(0.058823529630899429, 0.94117647409439087, 0.94117647409439087),
(0.063025213778018951, 0.93725490570068359, 0.93725490570068359),
(0.067226894199848175, 0.93333333730697632, 0.93333333730697632),
(0.071428574621677399, 0.92941176891326904, 0.92941176891326904),
(0.075630255043506622, 0.92549020051956177, 0.92549020051956177),
(0.079831935465335846, 0.92156863212585449, 0.92156863212585449),
(0.08403361588716507, 0.91764706373214722, 0.91764706373214722),
(0.088235296308994293, 0.91372549533843994, 0.91372549533843994),
(0.092436976730823517, 0.90980392694473267, 0.90980392694473267),
(0.09663865715265274, 0.90196079015731812, 0.90196079015731812),
(0.10084033757448196, 0.89803922176361084, 0.89803922176361084),
(0.10504201799631119, 0.89411765336990356, 0.89411765336990356),
(0.10924369841814041, 0.89019608497619629, 0.89019608497619629),
(0.11344537883996964, 0.88627451658248901, 0.88627451658248901),
(0.11764705926179886, 0.88235294818878174, 0.88235294818878174),
(0.12184873968362808, 0.87843137979507446, 0.87843137979507446),
(0.1260504275560379, 0.87450981140136719, 0.87450981140136719),
(0.13025210797786713, 0.87058824300765991, 0.87058824300765991),
(0.13445378839969635, 0.86666667461395264, 0.86666667461395264),
(0.13865546882152557, 0.86274510622024536, 0.86274510622024536),
(0.1428571492433548, 0.85882353782653809, 0.85882353782653809),
(0.14705882966518402, 0.85490196943283081, 0.85490196943283081),
(0.15126051008701324, 0.85098040103912354, 0.85098040103912354),
(0.15546219050884247, 0.84705883264541626, 0.84705883264541626),
(0.15966387093067169, 0.83921569585800171, 0.83921569585800171),
(0.16386555135250092, 0.83529412746429443, 0.83529412746429443),
(0.16806723177433014, 0.83137255907058716, 0.83137255907058716),
(0.17226891219615936, 0.82745099067687988, 0.82745099067687988),
(0.17647059261798859, 0.82352942228317261, 0.82352942228317261),
(0.18067227303981781, 0.81960785388946533, 0.81960785388946533),
(0.18487395346164703, 0.81568628549575806, 0.81568628549575806),
(0.18907563388347626, 0.81176471710205078, 0.81176471710205078),
(0.19327731430530548, 0.80784314870834351, 0.80784314870834351),
(0.1974789947271347, 0.80392158031463623, 0.80392158031463623),
(0.20168067514896393, 0.80000001192092896, 0.80000001192092896),
(0.20588235557079315, 0.79607844352722168, 0.79607844352722168),
(0.21008403599262238, 0.7921568751335144, 0.7921568751335144),
(0.2142857164144516, 0.78823530673980713, 0.78823530673980713),
(0.21848739683628082, 0.78431373834609985, 0.78431373834609985),
(0.22268907725811005, 0.7764706015586853, 0.7764706015586853),
(0.22689075767993927, 0.77254903316497803, 0.77254903316497803),
(0.23109243810176849, 0.76862746477127075, 0.76862746477127075),
(0.23529411852359772, 0.76470589637756348, 0.76470589637756348),
(0.23949579894542694, 0.7607843279838562, 0.7607843279838562),
(0.24369747936725616, 0.75686275959014893, 0.75686275959014893),
(0.24789915978908539, 0.75294119119644165, 0.75294119119644165),
(0.25210085511207581, 0.74901962280273438, 0.74901962280273438),
(0.25630253553390503, 0.7450980544090271, 0.7450980544090271),
(0.26050421595573425, 0.74117648601531982, 0.74117648601531982),
(0.26470589637756348, 0.73725491762161255, 0.73725491762161255),
(0.2689075767993927, 0.73333334922790527, 0.73333334922790527),
(0.27310925722122192, 0.729411780834198, 0.729411780834198),
(0.27731093764305115, 0.72549021244049072, 0.72549021244049072),
(0.28151261806488037, 0.72156864404678345, 0.72156864404678345),
(0.28571429848670959, 0.7137255072593689, 0.7137255072593689),
(0.28991597890853882, 0.70980393886566162, 0.70980393886566162),
(0.29411765933036804, 0.70588237047195435, 0.70588237047195435),
(0.29831933975219727, 0.70196080207824707, 0.70196080207824707),
(0.30252102017402649, 0.69803923368453979, 0.69803923368453979),
(0.30672270059585571, 0.69411766529083252, 0.69411766529083252),
(0.31092438101768494, 0.69019609689712524, 0.69019609689712524),
(0.31512606143951416, 0.68627452850341797, 0.68627452850341797),
(0.31932774186134338, 0.68235296010971069, 0.68235296010971069),
(0.32352942228317261, 0.67843139171600342, 0.67843139171600342),
(0.32773110270500183, 0.67450982332229614, 0.67450982332229614),
(0.33193278312683105, 0.67058825492858887, 0.67058825492858887),
(0.33613446354866028, 0.66666668653488159, 0.66666668653488159),
(0.3403361439704895, 0.66274511814117432, 0.66274511814117432),
(0.34453782439231873, 0.65882354974746704, 0.65882354974746704),
(0.34873950481414795, 0.65098041296005249, 0.65098041296005249),
(0.35294118523597717, 0.64705884456634521, 0.64705884456634521),
(0.3571428656578064, 0.64313727617263794, 0.64313727617263794),
(0.36134454607963562, 0.63921570777893066, 0.63921570777893066),
(0.36554622650146484, 0.63529413938522339, 0.63529413938522339),
(0.36974790692329407, 0.63137257099151611, 0.63137257099151611),
(0.37394958734512329, 0.62745100259780884, 0.62745100259780884),
(0.37815126776695251, 0.62352943420410156, 0.62352943420410156),
(0.38235294818878174, 0.61960786581039429, 0.61960786581039429),
(0.38655462861061096, 0.61568629741668701, 0.61568629741668701),
(0.39075630903244019, 0.61176472902297974, 0.61176472902297974),
(0.39495798945426941, 0.60784316062927246, 0.60784316062927246),
(0.39915966987609863, 0.60392159223556519, 0.60392159223556519),
(0.40336135029792786, 0.60000002384185791, 0.60000002384185791),
(0.40756303071975708, 0.59607845544815063, 0.59607845544815063),
(0.4117647111415863, 0.58823531866073608, 0.58823531866073608),
(0.41596639156341553, 0.58431375026702881, 0.58431375026702881),
(0.42016807198524475, 0.58039218187332153, 0.58039218187332153),
(0.42436975240707397, 0.57647061347961426, 0.57647061347961426),
(0.4285714328289032, 0.57254904508590698, 0.57254904508590698),
(0.43277311325073242, 0.56862747669219971, 0.56862747669219971),
(0.43697479367256165, 0.56470590829849243, 0.56470590829849243),
(0.44117647409439087, 0.56078433990478516, 0.56078433990478516),
(0.44537815451622009, 0.55686277151107788, 0.55686277151107788),
(0.44957983493804932, 0.55294120311737061, 0.55294120311737061),
(0.45378151535987854, 0.54901963472366333, 0.54901963472366333),
(0.45798319578170776, 0.54509806632995605, 0.54509806632995605),
(0.46218487620353699, 0.54117649793624878, 0.54117649793624878),
(0.46638655662536621, 0.5372549295425415, 0.5372549295425415),
(0.47058823704719543, 0.53333336114883423, 0.53333336114883423),
(0.47478991746902466, 0.52549022436141968, 0.52549022436141968),
(0.47899159789085388, 0.5215686559677124, 0.5215686559677124),
(0.48319327831268311, 0.51764708757400513, 0.51764708757400513),
(0.48739495873451233, 0.51372551918029785, 0.51372551918029785),
(0.49159663915634155, 0.50980395078659058, 0.50980395078659058),
(0.49579831957817078, 0.5058823823928833, 0.5058823823928833), (0.5,
0.50196081399917603, 0.50196081399917603), (0.50420171022415161,
0.49803921580314636, 0.49803921580314636), (0.50840336084365845,
0.49411764740943909, 0.49411764740943909), (0.51260507106781006,
0.49019607901573181, 0.49019607901573181), (0.51680672168731689,
0.48627451062202454, 0.48627451062202454), (0.52100843191146851,
0.48235294222831726, 0.48235294222831726), (0.52521008253097534,
0.47843137383460999, 0.47843137383460999), (0.52941179275512695,
0.47450980544090271, 0.47450980544090271), (0.53361344337463379,
0.47058823704719543, 0.47058823704719543), (0.5378151535987854,
0.46274510025978088, 0.46274510025978088), (0.54201680421829224,
0.45882353186607361, 0.45882353186607361), (0.54621851444244385,
0.45490196347236633, 0.45490196347236633), (0.55042016506195068,
0.45098039507865906, 0.45098039507865906), (0.55462187528610229,
0.44705882668495178, 0.44705882668495178), (0.55882352590560913,
0.44313725829124451, 0.44313725829124451), (0.56302523612976074,
0.43921568989753723, 0.43921568989753723), (0.56722688674926758,
0.43529412150382996, 0.43529412150382996), (0.57142859697341919,
0.43137255311012268, 0.43137255311012268), (0.57563024759292603,
0.42745098471641541, 0.42745098471641541), (0.57983195781707764,
0.42352941632270813, 0.42352941632270813), (0.58403360843658447,
0.41960784792900085, 0.41960784792900085), (0.58823531866073608,
0.41568627953529358, 0.41568627953529358), (0.59243696928024292,
0.4117647111415863, 0.4117647111415863), (0.59663867950439453,
0.40784314274787903, 0.40784314274787903), (0.60084033012390137,
0.40000000596046448, 0.40000000596046448), (0.60504204034805298,
0.3960784375667572, 0.3960784375667572), (0.60924369096755981,
0.39215686917304993, 0.39215686917304993), (0.61344540119171143,
0.38823530077934265, 0.38823530077934265), (0.61764705181121826,
0.38431373238563538, 0.38431373238563538), (0.62184876203536987,
0.3803921639919281, 0.3803921639919281), (0.62605041265487671,
0.37647059559822083, 0.37647059559822083), (0.63025212287902832,
0.37254902720451355, 0.37254902720451355), (0.63445377349853516,
0.36862745881080627, 0.36862745881080627), (0.63865548372268677,
0.364705890417099, 0.364705890417099), (0.6428571343421936,
0.36078432202339172, 0.36078432202339172), (0.64705884456634521,
0.35686275362968445, 0.35686275362968445), (0.65126049518585205,
0.35294118523597717, 0.35294118523597717), (0.65546220541000366,
0.3490196168422699, 0.3490196168422699), (0.6596638560295105,
0.34509804844856262, 0.34509804844856262), (0.66386556625366211,
0.33725491166114807, 0.33725491166114807), (0.66806721687316895,
0.3333333432674408, 0.3333333432674408), (0.67226892709732056,
0.32941177487373352, 0.32941177487373352), (0.67647057771682739,
0.32549020648002625, 0.32549020648002625), (0.680672287940979,
0.32156863808631897, 0.32156863808631897), (0.68487393856048584,
0.31764706969261169, 0.31764706969261169), (0.68907564878463745,
0.31372550129890442, 0.31372550129890442), (0.69327729940414429,
0.30980393290519714, 0.30980393290519714), (0.6974790096282959,
0.30588236451148987, 0.30588236451148987), (0.70168066024780273,
0.30196079611778259, 0.30196079611778259), (0.70588237047195435,
0.29803922772407532, 0.29803922772407532), (0.71008402109146118,
0.29411765933036804, 0.29411765933036804), (0.71428573131561279,
0.29019609093666077, 0.29019609093666077), (0.71848738193511963,
0.28627452254295349, 0.28627452254295349), (0.72268909215927124,
0.28235295414924622, 0.28235295414924622), (0.72689074277877808,
0.27450981736183167, 0.27450981736183167), (0.73109245300292969,
0.27058824896812439, 0.27058824896812439), (0.73529410362243652,
0.26666668057441711, 0.26666668057441711), (0.73949581384658813,
0.26274511218070984, 0.26274511218070984), (0.74369746446609497,
0.25882354378700256, 0.25882354378700256), (0.74789917469024658,
0.25490197539329529, 0.25490197539329529), (0.75210082530975342,
0.25098040699958801, 0.25098040699958801), (0.75630253553390503,
0.24705882370471954, 0.24705882370471954), (0.76050418615341187,
0.24313725531101227, 0.24313725531101227), (0.76470589637756348,
0.23921568691730499, 0.23921568691730499), (0.76890754699707031,
0.23529411852359772, 0.23529411852359772), (0.77310925722122192,
0.23137255012989044, 0.23137255012989044), (0.77731090784072876,
0.22745098173618317, 0.22745098173618317), (0.78151261806488037,
0.22352941334247589, 0.22352941334247589), (0.78571426868438721,
0.21960784494876862, 0.21960784494876862), (0.78991597890853882,
0.21176470816135406, 0.21176470816135406), (0.79411762952804565,
0.20784313976764679, 0.20784313976764679), (0.79831933975219727,
0.20392157137393951, 0.20392157137393951), (0.8025209903717041,
0.20000000298023224, 0.20000000298023224), (0.80672270059585571,
0.19607843458652496, 0.19607843458652496), (0.81092435121536255,
0.19215686619281769, 0.19215686619281769), (0.81512606143951416,
0.18823529779911041, 0.18823529779911041), (0.819327712059021,
0.18431372940540314, 0.18431372940540314), (0.82352942228317261,
0.18039216101169586, 0.18039216101169586), (0.82773107290267944,
0.17647059261798859, 0.17647059261798859), (0.83193278312683105,
0.17254902422428131, 0.17254902422428131), (0.83613443374633789,
0.16862745583057404, 0.16862745583057404), (0.8403361439704895,
0.16470588743686676, 0.16470588743686676), (0.84453779458999634,
0.16078431904315948, 0.16078431904315948), (0.84873950481414795,
0.15686275064945221, 0.15686275064945221), (0.85294115543365479,
0.14901961386203766, 0.14901961386203766), (0.8571428656578064,
0.14509804546833038, 0.14509804546833038), (0.86134451627731323,
0.14117647707462311, 0.14117647707462311), (0.86554622650146484,
0.13725490868091583, 0.13725490868091583), (0.86974787712097168,
0.13333334028720856, 0.13333334028720856), (0.87394958734512329,
0.12941177189350128, 0.12941177189350128), (0.87815123796463013,
0.12549020349979401, 0.12549020349979401), (0.88235294818878174,
0.12156862765550613, 0.12156862765550613), (0.88655459880828857,
0.11764705926179886, 0.11764705926179886), (0.89075630903244019,
0.11372549086809158, 0.11372549086809158), (0.89495795965194702,
0.10980392247438431, 0.10980392247438431), (0.89915966987609863,
0.10588235408067703, 0.10588235408067703), (0.90336132049560547,
0.10196078568696976, 0.10196078568696976), (0.90756303071975708,
0.098039217293262482, 0.098039217293262482), (0.91176468133926392,
0.094117648899555206, 0.094117648899555206), (0.91596639156341553,
0.086274512112140656, 0.086274512112140656), (0.92016804218292236,
0.08235294371843338, 0.08235294371843338), (0.92436975240707397,
0.078431375324726105, 0.078431375324726105), (0.92857140302658081,
0.074509806931018829, 0.074509806931018829), (0.93277311325073242,
0.070588238537311554, 0.070588238537311554), (0.93697476387023926,
0.066666670143604279, 0.066666670143604279), (0.94117647409439087,
0.062745101749897003, 0.062745101749897003), (0.94537812471389771,
0.058823529630899429, 0.058823529630899429), (0.94957983493804932,
0.054901961237192154, 0.054901961237192154), (0.95378148555755615,
0.050980392843484879, 0.050980392843484879), (0.95798319578170776,
0.047058824449777603, 0.047058824449777603), (0.9621848464012146,
0.043137256056070328, 0.043137256056070328), (0.96638655662536621,
0.039215687662363052, 0.039215687662363052), (0.97058820724487305,
0.035294119268655777, 0.035294119268655777), (0.97478991746902466,
0.031372550874948502, 0.031372550874948502), (0.97899156808853149,
0.023529412224888802, 0.023529412224888802), (0.98319327831268311,
0.019607843831181526, 0.019607843831181526), (0.98739492893218994,
0.015686275437474251, 0.015686275437474251), (0.99159663915634155,
0.011764706112444401, 0.011764706112444401), (0.99579828977584839,
0.0078431377187371254, 0.0078431377187371254), (1.0,
0.0039215688593685627, 0.0039215688593685627)]}
Accent = colors.LinearSegmentedColormap('Accent', _Accent_data, LUTSIZE)
Blues = colors.LinearSegmentedColormap('Blues', _Blues_data, LUTSIZE)
BrBG = colors.LinearSegmentedColormap('BrBG', _BrBG_data, LUTSIZE)
BuGn = colors.LinearSegmentedColormap('BuGn', _BuGn_data, LUTSIZE)
BuPu = colors.LinearSegmentedColormap('BuPu', _BuPu_data, LUTSIZE)
Dark2 = colors.LinearSegmentedColormap('Dark2', _Dark2_data, LUTSIZE)
GnBu = colors.LinearSegmentedColormap('GnBu', _GnBu_data, LUTSIZE)
Greens = colors.LinearSegmentedColormap('Greens', _Greens_data, LUTSIZE)
Greys = colors.LinearSegmentedColormap('Greys', _Greys_data, LUTSIZE)
Oranges = colors.LinearSegmentedColormap('Oranges', _Oranges_data, LUTSIZE)
OrRd = colors.LinearSegmentedColormap('OrRd', _OrRd_data, LUTSIZE)
Paired = colors.LinearSegmentedColormap('Paired', _Paired_data, LUTSIZE)
Pastel1 = colors.LinearSegmentedColormap('Pastel1', _Pastel1_data, LUTSIZE)
Pastel2 = colors.LinearSegmentedColormap('Pastel2', _Pastel2_data, LUTSIZE)
PiYG = colors.LinearSegmentedColormap('PiYG', _PiYG_data, LUTSIZE)
PRGn = colors.LinearSegmentedColormap('PRGn', _PRGn_data, LUTSIZE)
PuBu = colors.LinearSegmentedColormap('PuBu', _PuBu_data, LUTSIZE)
PuBuGn = colors.LinearSegmentedColormap('PuBuGn', _PuBuGn_data, LUTSIZE)
PuOr = colors.LinearSegmentedColormap('PuOr', _PuOr_data, LUTSIZE)
PuRd = colors.LinearSegmentedColormap('PuRd', _PuRd_data, LUTSIZE)
Purples = colors.LinearSegmentedColormap('Purples', _Purples_data, LUTSIZE)
RdBu = colors.LinearSegmentedColormap('RdBu', _RdBu_data, LUTSIZE)
RdGy = colors.LinearSegmentedColormap('RdGy', _RdGy_data, LUTSIZE)
RdPu = colors.LinearSegmentedColormap('RdPu', _RdPu_data, LUTSIZE)
RdYlBu = colors.LinearSegmentedColormap('RdYlBu', _RdYlBu_data, LUTSIZE)
RdYlGn = colors.LinearSegmentedColormap('RdYlGn', _RdYlGn_data, LUTSIZE)
Reds = colors.LinearSegmentedColormap('Reds', _Reds_data, LUTSIZE)
Set1 = colors.LinearSegmentedColormap('Set1', _Set1_data, LUTSIZE)
Set2 = colors.LinearSegmentedColormap('Set2', _Set2_data, LUTSIZE)
Set3 = colors.LinearSegmentedColormap('Set3', _Set3_data, LUTSIZE)
Spectral = colors.LinearSegmentedColormap('Spectral', _Spectral_data, LUTSIZE)
YlGn = colors.LinearSegmentedColormap('YlGn', _YlGn_data, LUTSIZE)
YlGnBu = colors.LinearSegmentedColormap('YlGnBu', _YlGnBu_data, LUTSIZE)
YlOrBr = colors.LinearSegmentedColormap('YlOrBr', _YlOrBr_data, LUTSIZE)
YlOrRd = colors.LinearSegmentedColormap('YlOrRd', _YlOrRd_data, LUTSIZE)
gist_earth = colors.LinearSegmentedColormap('gist_earth', _gist_earth_data, LUTSIZE)
gist_gray = colors.LinearSegmentedColormap('gist_gray', _gist_gray_data, LUTSIZE)
gist_heat = colors.LinearSegmentedColormap('gist_heat', _gist_heat_data, LUTSIZE)
gist_ncar = colors.LinearSegmentedColormap('gist_ncar', _gist_ncar_data, LUTSIZE)
gist_rainbow = colors.LinearSegmentedColormap('gist_rainbow', _gist_rainbow_data, LUTSIZE)
gist_stern = colors.LinearSegmentedColormap('gist_stern', _gist_stern_data, LUTSIZE)
gist_yarg = colors.LinearSegmentedColormap('gist_yarg', _gist_yarg_data, LUTSIZE)
datad['Accent']=_Accent_data
datad['Blues']=_Blues_data
datad['BrBG']=_BrBG_data
datad['BuGn']=_BuGn_data
datad['BuPu']=_BuPu_data
datad['Dark2']=_Dark2_data
datad['GnBu']=_GnBu_data
datad['Greens']=_Greens_data
datad['Greys']=_Greys_data
datad['Oranges']=_Oranges_data
datad['OrRd']=_OrRd_data
datad['Paired']=_Paired_data
datad['Pastel1']=_Pastel1_data
datad['Pastel2']=_Pastel2_data
datad['PiYG']=_PiYG_data
datad['PRGn']=_PRGn_data
datad['PuBu']=_PuBu_data
datad['PuBuGn']=_PuBuGn_data
datad['PuOr']=_PuOr_data
datad['PuRd']=_PuRd_data
datad['Purples']=_Purples_data
datad['RdBu']=_RdBu_data
datad['RdGy']=_RdGy_data
datad['RdPu']=_RdPu_data
datad['RdYlBu']=_RdYlBu_data
datad['RdYlGn']=_RdYlGn_data
datad['Reds']=_Reds_data
datad['Set1']=_Set1_data
datad['Set2']=_Set2_data
datad['Set3']=_Set3_data
datad['Spectral']=_Spectral_data
datad['YlGn']=_YlGn_data
datad['YlGnBu']=_YlGnBu_data
datad['YlOrBr']=_YlOrBr_data
datad['YlOrRd']=_YlOrRd_data
datad['gist_earth']=_gist_earth_data
datad['gist_gray']=_gist_gray_data
datad['gist_heat']=_gist_heat_data
datad['gist_ncar']=_gist_ncar_data
datad['gist_rainbow']=_gist_rainbow_data
datad['gist_stern']=_gist_stern_data
datad['gist_yarg']=_gist_yarg_data
# reverse all the colormaps.
# reversed colormaps have '_r' appended to the name.
def revcmap(data):
data_r = {}
for key, val in data.iteritems():
valnew = [(1.-a, b, c) for a, b, c in reversed(val)]
data_r[key] = valnew
return data_r
cmapnames = datad.keys()
for cmapname in cmapnames:
cmapname_r = cmapname+'_r'
cmapdat_r = revcmap(datad[cmapname])
datad[cmapname_r] = cmapdat_r
locals()[cmapname_r] = colors.LinearSegmentedColormap(cmapname_r, cmapdat_r, LUTSIZE)
| gpl-3.0 |
iarroyof/nlp-pipeline | attention_lstm_1.py | 1 | 15720 | """
A keras attention layer that wraps RNN layers.
Based on tensorflows [attention_decoder](https://github.com/tensorflow/tensorflow/blob/c8a45a8e236776bed1d14fd71f3b6755bd63cc58/tensorflow/python/ops/seq2seq.py#L506)
and [Grammar as a Foreign Language](https://arxiv.org/abs/1412.7449).
date: 20161101
author: wassname
url: https://gist.github.com/wassname/5292f95000e409e239b9dc973295327a
"""
# test likes in https://github.com/fchollet/keras/blob/master/tests/keras/layers/test_wrappers.py
import numpy as np
from numpy.testing import assert_allclose
from keras.utils.test_utils import keras_test
from keras.layers import wrappers, Input, recurrent, InputLayer, Merge,MaxoutDense
from keras.layers import core, convolutional, recurrent, Embedding, Dense,Flatten
from keras.models import Sequential, Model, model_from_json
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.callbacks import ModelCheckpoint
from argparse import ArgumentParser as ap
from attention_lstm_ import *
from load_sts_data import *
parser = ap()
parser.add_argument("-s", help="Number of hidden states (time steps of the moving filter)", default=10, type=int)
parser.add_argument("-e", help="Number of training epochs", default=25, type=int)
parser.add_argument("-d", help="Number of hidden output perceptron nodes", default=20, type=int)
parser.add_argument("-m", help="Model type", default="base_line")
parser.add_argument("-D", help="Embeddings dimensions", default=100, type=int)
parser.add_argument("-t", help="Toggles train mode. Default is False.", action="store_true")
parser.add_argument("-E", help="Embedding type. 'word2vec', 'glove' 'fastText'.", default="fastText")
args = parser.parse_args()
# ----------
train=args.t
# ----------
h_STATES = args.s
DENSES = args.d
EMBEDDING_DIM = args.D
MODEL_TYPE=args.m
EPOCHS = args.e
EMBEDDING = args.E
MODEL_DIR = "/almac/ignacio"
dummy=""
if train:
if dummy== "":
YEARS_TRAIN=["2012", "2013", "2015", "2016"]
#YEARS_TRAIN=["2013"]
else:
YEARS_TRAIN=["2013-t"]
YEAR_VALID="2017"
MAX_SEQUENCE_LENGTH=50
VALIDATION_SPLIT=0.30
VECTOR_DIR=MODEL_DIR + "/data/" + EMBEDDING + dummy
MAX_NB_WORDS=20000
params="%s_Ts%d_Ds%d_%s_H%d_Sl%d"% (MODEL_TYPE, h_STATES,DENSES, EMBEDDING,
EMBEDDING_DIM, MAX_SEQUENCE_LENGTH)
model_file=MODEL_DIR + "/%s.hdf5" % params
if train:
TRAIN_DIRS = []
for year in YEARS_TRAIN:
TRAIN_DIRS.append(( VECTOR_DIR.rsplit('/', 1)[0]
+ "/sts_all/train-" + year, None, False) )
VALID_DIRS=[(VECTOR_DIR.rsplit('/', 1)[0]
+ "/sts_all/valid-" + YEAR_VALID, "validation", False)]
# --------------------------
if train:
print "Loanding train and valid dirs......"
train_data_, gs_data=load_train_dirs(TRAIN_DIRS)
print "Loanding validation dirs......"
valid_data_, gs_test=load_train_dirs(VALID_DIRS)
print "Spliting tab-separated files..."
if train:
train_data_A, train_data_B = train_data_[1::2], train_data_[::2]
labels=np.asarray(gs_data)
valid_data_A, valid_data_B = valid_data_[1::2], valid_data_[::2]
#labels = to_categorical(np.asarray(gs_data))
if not train:
test_labels=np.asarray(gs_test)
indices_test = np.arange(test_labels.shape[0])
else:
indices = np.arange(labels.shape[0])
np.random.shuffle(indices)
nb_validation_samples = int(VALIDATION_SPLIT * labels.shape[0])
print "Labels shape: ", labels.shape
print "Tokenizing files... [A]"
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
if train:
tokenizer.fit_on_texts(train_data_A + valid_data_A)
sequences_A = tokenizer.texts_to_sequences(train_data_A)
else:
tokenizer.fit_on_texts(valid_data_A)
sequences_Av = tokenizer.texts_to_sequences(valid_data_A)
word_index_A = tokenizer.word_index
if train:
data_A = pad_sequences(sequences_A, maxlen=MAX_SEQUENCE_LENGTH)
data_A = data_A[indices]
x_data_Av = pad_sequences(sequences_Av, maxlen=MAX_SEQUENCE_LENGTH)
if train:
print "Split training set into train and val... [A]"
x_train_A = data_A[:-nb_validation_samples]
x_val_A = data_A[-nb_validation_samples:]
print "Tokenizing files... [B]"
tokenizer = Tokenizer(nb_words=MAX_NB_WORDS)
if train:
tokenizer.fit_on_texts(train_data_B + valid_data_B)
sequences_B = tokenizer.texts_to_sequences(train_data_B)
else:
tokenizer.fit_on_texts(valid_data_B)
sequences_Bv = tokenizer.texts_to_sequences(valid_data_B)
word_index_B = tokenizer.word_index
if train:
data_B = pad_sequences(sequences_B, maxlen=MAX_SEQUENCE_LENGTH)
data_B = data_B[indices]
x_data_Bv = pad_sequences(sequences_Bv, maxlen=MAX_SEQUENCE_LENGTH)
if train:
print "Split training set into train and val... [B]"
x_train_B = data_B[:-nb_validation_samples]
x_val_B = data_B[-nb_validation_samples:]
labels = labels[indices]
y_train = labels[:-nb_validation_samples]
y_val = labels[-nb_validation_samples:]
embeddings_index = {}
if EMBEDDING == "glove":
vectors_file = VECTOR_DIR + '/glove.6B.%dd.txt' % EMBEDDING_DIM
elif EMBEDDING == "fastText":
vectors_file = VECTOR_DIR + '/wikiEn_Full_H%d.model.vec' % EMBEDDING_DIM
elif EMBEDDING == "word2vec":
vectors_file = VECTOR_DIR + '/w2v_En_vector_space_H%d.vec' % EMBEDDING_DIM
f = open(vectors_file)
print "Getting embedding matrix... from %s" % vectors_file
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Found %s word vectors.' % len(embeddings_index))
#embeddings_index['###'] = np.zeros(100)
print "Filling embedding matrices..."
embedding_matrix_A = np.zeros((len(word_index_A) + 1, EMBEDDING_DIM))
for word, i in word_index_A.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix_A[i] = embedding_vector
embedding_matrix_B = np.zeros((len(word_index_B) + 1, EMBEDDING_DIM))
for word, i in word_index_B.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix_B[i] = embedding_vector
# --------------------------
def load_trained_model(model_file, model_type, vocab_a, vocab_b, timesteps, embedding_dim):
model = build_model(model_type, vocab_a, vocab_b, timesteps, embedding_dim)
model.load_weights(model_file)
return model
def models(M, nb_samples, timesteps, embedding_dim):#, output_dim): # For returning sequences only
embedding_layer = Embedding(input_dim=nb_samples + 1,
output_dim=embedding_dim,
input_length=MAX_SEQUENCE_LENGTH,
dropout=0.2,
trainable=False)
if M == "base_line":
model = Sequential()
model.add(embedding_layer)
model.add(Attention(recurrent.LSTM(output_dim=timesteps, return_sequences=False, consume_less='mem')))
model.add(core.Activation('relu'))
elif M == "multi_att":
model = Sequential()
model.add(embedding_layer)
model.add(Attention(recurrent.LSTM(output_dim=timesteps, return_sequences=True, consume_less='mem')))
model.add(recurrent.LSTM(output_dim=timesteps, return_sequences=True, consume_less='mem'))
model.add(Attention(recurrent.LSTM(output_dim=timesteps, return_sequences=True, consume_less='mem')))
model.add(recurrent.LSTM(output_dim=timesteps, return_sequences=True, consume_less='mem'))
model.add(Attention(recurrent.LSTM(output_dim=timesteps, return_sequences=True, consume_less='mem')))
model.add(recurrent.LSTM(output_dim=timesteps, return_sequences=True, consume_less='mem'))
model.add(Attention(recurrent.LSTM(output_dim=timesteps, return_sequences=True, consume_less='mem')))
model.add(recurrent.LSTM(output_dim=timesteps, return_sequences=True, consume_less='mem'))
model.add(Attention(recurrent.LSTM(output_dim=timesteps, return_sequences=False, consume_less='mem')))
model.add(core.Activation('relu'))
elif M == "stacked":
# test stacked with all RNN layers and consume_less options
model = Sequential()
model.add(embedding_layer)
# model.add(Attention(recurrent.LSTM(embedding_dim, input_dim=embedding_dim,, consume_less='cpu' return_sequences=True))) # not supported
model.add(Attention(recurrent.LSTM(output_dim=timesteps, consume_less='gpu', return_sequences=True)))
model.add(Attention(recurrent.LSTM(output_dim=timesteps, consume_less='mem', return_sequences=True)))
# test each other RNN type
model.add(Attention(recurrent.GRU(output_dim=timesteps, consume_less='mem', return_sequences=True)))
model.add(Attention(recurrent.SimpleRNN(output_dim=timesteps, consume_less='mem', return_sequences=False)))
model.add(core.Activation('relu'))
elif M == "stacked_1":
# test stacked with all RNN layers and consume_less options
model = Sequential()
model.add(embedding_layer)
# model.add(Attention(recurrent.LSTM(embedding_dim, input_dim=embedding_dim,, consume_less='cpu' return_sequences=True))) # not supported
model.add(Attention(recurrent.LSTM(output_dim=timesteps, consume_less='gpu', return_sequences=True)))
model.add(Attention(recurrent.LSTM(output_dim=timesteps, consume_less='mem', return_sequences=True)))
# test each other RNN type
model.add(Attention(recurrent.GRU(output_dim=timesteps, consume_less='mem', return_sequences=True)))
model.add(Attention(recurrent.SimpleRNN(output_dim=timesteps, consume_less='mem', return_sequences=False)))
elif M == "stacked_bidir":
# test stacked with all RNN layers and consume_less options
model = Sequential()
model.add(embedding_layer)
# model.add(Attention(recurrent.LSTM(embedding_dim, input_dim=embedding_dim,, consume_less='cpu' return_sequences=True))) # not supported
model.add(Attention(recurrent.LSTM(output_dim=timesteps, consume_less='gpu', return_sequences=True)))
model.add(Attention(recurrent.LSTM(output_dim=timesteps, consume_less='mem', return_sequences=True, go_backwards=True)))
# test each other RNN type
model.add(Attention(recurrent.GRU(output_dim=timesteps, consume_less='mem', return_sequences=True)))
model.add(Attention(recurrent.SimpleRNN(output_dim=timesteps, consume_less='mem', return_sequences=False)))
model.add(core.Activation('relu'))
elif M == "simple_att":
# test with return_sequence = False
model = Sequential()
model.add(embedding_layer)
model.add(Attention(recurrent.LSTM(output_dim=timesteps, consume_less='mem',dropout_W=0.2, dropout_U=0.2)))
model.add(core.Activation('relu'))
elif M == "bidir_att":
# with bidirectional encoder
model = Sequential()
model.add(embedding_layer)
model.add(wrappers.Bidirectional(recurrent.LSTM(output_dim=timesteps, return_sequences=True)))
model.add(Attention(recurrent.LSTM(output_dim=timesteps, return_sequences=False, consume_less='mem')))
model.add(core.Activation('relu'))
return model
# Building symbolic sentence models for [A] and [B] sides separately
def build_model(model_type, len_vocab_A, len_vocab_B, hidden_states, embedding_dim):
sent_A=models(model_type, len_vocab_A, hidden_states, embedding_dim)#, DENSES)
sent_B=models(model_type, len_vocab_B, hidden_states, embedding_dim)#, DENSES)
if DENSES != 0:
pair_sents=Merge([sent_A, sent_B], mode='concat', concat_axis=-1)
# -----------------------------------------------------------------------
similarity = Sequential()
similarity.add(pair_sents)
similarity.add(MaxoutDense(DENSES))
similarity.add(MaxoutDense(1))
else:
pair_sents=Merge([sent_A, sent_B], mode='concat', concat_axis=-1)
similarity = Sequential()
similarity.add(pair_sents)
similarity.add(MaxoutDense(1))
#similarity.add(core.Activation("sigmoid"))
return similarity
import subprocess
def pearsons(gl, el):
with open("GL.txt", "w") as f:
for p in gl:
f.write("%s\n" % p)
with open("EL.txt", "w") as f:
for p in el:
f.write("%s\n" % p)
gs="GL.txt"
est="EL.txt"
pipe = subprocess.Popen(["perl", "./correlation-noconfidence.pl", gs, est], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
pipe.stdin.write("%s %s" % (gs, est))
try:
pearson= float(str(pipe.stdout.read()).split()[1])
except:
print str(pipe.stdout.read())
exit()
remove(gs)
remove(est)
pipe.stdin.close()
pipe.stdout.close()
return pearson
if train:
from pdb import set_trace as st
similarity = build_model(MODEL_TYPE, len(word_index_A),
len(word_index_B), h_STATES, EMBEDDING_DIM)
print "Compiling the model..."
similarity.compile(loss='mean_squared_error', optimizer='rmsprop',
metrics=['acc','mean_squared_error'])
similarity.get_config()
summ=model_from_json(similarity.to_json(),custom_objects=dict(Attention=Attention))
summ.summary()
print "Happy learning!!!"
checkpointer = ModelCheckpoint(filepath=model_file, monitor='val_acc', verbose=1, save_best_only=True)
similarity.fit([x_train_A, x_train_B], y_train, validation_data=([x_val_A, x_val_B], y_val),
nb_epoch=EPOCHS, batch_size=20, callbacks=[checkpointer])
print "\nParameters:\n---------------------\nh_STATES=%d\nEPOCHS=%d\nDENSES=%d\nEMBEDDING=%s\nEMBEDDING_DIM=%d\nMAX_SEQUENCE_LENGTH=%d\nMODEL_TYPE=%s\n" % (h_STATES,
EPOCHS,
DENSES,
EMBEDDING,
EMBEDDING_DIM,MAX_SEQUENCE_LENGTH,
MODEL_TYPE)
elif not train:
import sys
from os import remove
from math import sqrt
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import r2_score
test_gs_file="gs_test_file"
similarity=load_trained_model(model_file, MODEL_TYPE,len(word_index_A),
len(word_index_B), h_STATES, EMBEDDING_DIM)
y=similarity.predict([x_data_Av, x_data_Bv])
with open("/almac/ignacio/%s.pred" % params, "wb") as f:
for score in y[:,0]:
f.write("%f.4\n" % score)
with open(test_gs_file, "wb") as f:
for score in test_labels:
f.write("%f.4\n" % score)
# EVALUATING
testRMSE = sqrt(mse(test_labels, y[:,0]))
testR2 = r2_score(test_labels,y[:,0])
testPea = pearsons(test_labels, y[:,0])
sys.stderr.write('Test RMSE Score: %.4f\n' % (testRMSE))
sys.stderr.write('Test R2 Score: %.4f\n' % (testR2))
sys.stderr.write('Test wPearson Score: %.4f\n' % (testPea))
remove(test_gs_file)
| gpl-3.0 |
ratschlab/RiboDiff | src/ribodiff/plot.py | 1 | 12939 | #!/usr/bin/env python
"""
Plotting the data and results.
"""
import os
import sys
import cPickle as pickle
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from optparse import OptionParser, OptionGroup
def parse_options(argv):
parser = OptionParser()
required = OptionGroup(parser, 'REQUIRED')
required.add_option('-k', action='store', type='string', dest='dataPkl', help='Binary file containing all source data and results of TE change analysis.')
required.add_option('-o', action='store', type='string', dest='outputPrefix', help='Specify the prefix of the output file name.')
optional = OptionGroup(parser, 'OPTIONAL')
optional.add_option('-p', action='store', type='string', dest='plotWhich', default='All', help='Which figure to be plotted. Options: EmpDisp, TE or All. [default: All]')
optional.add_option('-q', action='store', type='float', dest='cutoffFDR', default=0.1, help='Set the FDR cutoff for significant case to plot. [default: 0.1]')
parser.add_option_group(required)
parser.add_option_group(optional)
(opts, args) = parser.parse_args(argv)
if len(argv) < 2:
parser.print_help()
sys.exit()
mandatories = ['dataPkl', 'outputPrefix']
for eachOpt in mandatories:
if not opts.__dict__[eachOpt]:
parser.error('-%s is a required option.\n' % eachOpt[0])
if eachOpt in mandatories[:1] and not os.path.exists(opts.__dict__[eachOpt]):
sys.stderr.write('\nError: File \'%s\' does not exist.\n\n' % opts.__dict__[eachOpt])
sys.exit()
if eachOpt == mandatories[1]:
if not os.path.dirname(opts.__dict__[eachOpt]):
opts.__dict__[eachOpt] = os.getcwd() + os.getcwd()[0] + opts.__dict__[eachOpt]
if not os.path.exists(os.path.dirname(opts.__dict__[eachOpt])):
try:
os.makedirs(os.path.dirname(opts.__dict__[eachOpt]))
except OSError:
sys.stderr.write('\nError: Failed to create directory: \'%s\' \n\n' % os.path.dirname(opts.__dict__[eachOpt]))
sys.exit()
if opts.plotWhich not in ['EmpDisp', 'TE', 'All']:
parser.error('-p option can only take \'EmpDisp\', \'TE\' or \'All\' as argument.\n')
return opts
def empDisp_scatter(data, fileOutName):
cntRiboNorm = data.countRibo / data.libSizesRibo
cntRnaNorm = data.countRna / data.libSizesRna
idx = np.logical_and(np.sum(cntRiboNorm, axis=1)/data.libSizesRibo.size > 1, np.sum(cntRnaNorm, axis=1)/data.libSizesRna.size > 1).nonzero()[0]
cntRiboMean = np.mean(cntRiboNorm[idx], axis=1)
cntRnaMean = np.mean(cntRnaNorm[idx], axis=1)
varRibo = np.var(cntRiboNorm[idx], axis=1, ddof=0)
varRna = np.var(cntRnaNorm[idx], axis=1, ddof=0)
dispRibo = (varRibo - cntRiboMean) / cntRiboMean ** 2
dispRna = (varRna - cntRnaMean ) / cntRnaMean ** 2
stdDispRibo = np.std(np.log10(dispRibo[dispRibo > 0]), ddof=1)
stdDispRna = np.std(np.log10(dispRna[dispRna > 0]), ddof=1)
if np.percentile(np.log2(cntRnaMean), 99.0) >= np.percentile(np.log2(cntRiboMean), 99.0):
maxCnt = np.percentile(np.log2(cntRnaMean), 99.0)
else:
maxCnt = np.percentile(np.log2(cntRiboMean), 99.0)
if np.percentile(np.log2(cntRnaMean), 1.0) <= np.percentile(np.log2(cntRiboMean), 1.0):
minCnt = np.percentile(np.log2(cntRnaMean), 1.0)
else:
minCnt = np.percentile(np.log2(cntRiboMean), 1.0)
winSize = (maxCnt - minCnt) / 15.0
dispWinMedRibo = []
dispWinMedRna = []
cntWinRibo = []
cntWinRna = []
for i in np.arange(minCnt, maxCnt, winSize):
IDX1 = np.logical_and(np.logical_and(np.log2(cntRiboMean) > i, np.log2(cntRiboMean) < i + winSize), dispRibo > 0).nonzero()[0]
IDX2 = np.logical_and(np.logical_and(np.log2(cntRnaMean) > i, np.log2(cntRnaMean) < i + winSize), dispRna > 0).nonzero()[0]
if i + winSize / 2.0 >= np.percentile(np.log2(cntRiboMean), 2.5) and i + winSize / 2.0 <= np.percentile(np.log2(cntRiboMean), 97.5):
dispWinMedRibo.extend([np.median(dispRibo[IDX1])])
cntWinRibo.extend([i + winSize / 2.0])
if i + winSize / 2.0 >= np.percentile(np.log2(cntRnaMean), 2.5) and i + winSize / 2.0 <= np.percentile(np.log2(cntRnaMean), 97.5):
dispWinMedRna.extend([np.median(dispRna[IDX2])])
cntWinRna.extend([i + winSize / 2.0])
fig, ax = plt.subplots()
ax.scatter(np.log2(cntRnaMean[dispRna > 0]), np.log10(dispRna[dispRna > 0]), marker='o', color='lightsalmon', s=0.5, lw=0, label='dispersion, RNA-Seq' )
ax.scatter(np.log2(cntRiboMean[dispRibo > 0]), np.log10(dispRibo[dispRibo > 0]), marker='o', color='lightskyblue', s=0.5, lw=0, label='dispersion, Ribo-Seq')
ax.plot(cntWinRna, np.log10(dispWinMedRna), color='crimson', linestyle='-', marker='^', markersize=4, markeredgewidth=0, label='window mean, RNA-Seq' )
ax.plot(cntWinRibo, np.log10(dispWinMedRibo), color='dodgerblue', linestyle='-', marker='*', markersize=5, markeredgewidth=0, label='window mean, Ribo-Seq')
smallestDisp = min(np.hstack([np.log10(dispRna[dispRna > 0]), np.log10(dispRibo[dispRibo > 0])]))
largestDisp = max(np.hstack([np.log10(dispRna[dispRna > 0]), np.log10(dispRibo[dispRibo > 0])]))
lowerBound = np.floor(smallestDisp) - 3
upperBound = np.ceil(largestDisp) + 4
ax.scatter(np.log2(cntRnaMean[dispRna <= 0]), np.repeat(lowerBound + 1.0, cntRnaMean[dispRna <= 0].size), marker='o', color='lightsalmon', s=0.5, lw=0)
ax.scatter(np.log2(cntRiboMean[dispRibo <= 0]), np.repeat(lowerBound + 0.8, cntRiboMean[dispRibo <= 0].size), marker='o', color='lightskyblue', s=0.5, lw=0)
if np.mod(np.floor(smallestDisp), 2) == 1:
lowerEndTick = np.floor(smallestDisp) - 1
else:
lowerEndTick = np.floor(smallestDisp)
if np.mod(upperBound, 2) == 1:
upperEndTick = upperBound - 1
else:
upperEndTick = upperBound
ax.set_ylim(lowerBound, upperBound)
ax.set_xlim(0, None)
ax.spines['left'].set_visible(False)
breakPoint = lowerEndTick + 1
ax.plot((0, 0), (breakPoint+0.15, upperBound), color='black', lw=1.5)
ax.plot((0, 0), (breakPoint-0.1, lowerBound), color='black', lw=1.5)
ax.plot((-0.1, 0.1), (breakPoint, breakPoint+0.2), color='black', lw=1, clip_on=False)
ax.plot((-0.1, 0.1), (breakPoint-0.2, breakPoint), color='black', lw=1, clip_on=False)
lowerEndTick = lowerEndTick.astype(int)
upperEndTick = upperEndTick.astype(int)
plt.yticks(np.arange(lowerEndTick, upperEndTick+1, 2))
tklabels = ax.axes.get_yticks().tolist()
tklabels[0] = '-Inf'
ax.axes.set_yticklabels(tklabels)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='x', labelsize=10)
ax.tick_params(axis='y', labelsize=10)
ax.legend(loc='upper right', prop={'size':10})
ax.set_xlabel(r'$log_{2}(mean\/counts)$', fontsize=15)
ax.set_ylabel(r'$log_{10}(dispersion)$', fontsize=15)
ax.set_title('Empirical Dispersion')
ax.text(0.03, 0.96, r'$\sigma_{Ribo\/}=\,%1.2f$' % stdDispRibo, horizontalalignment='left', verticalalignment='center', transform = ax.transAxes, fontsize=12)
ax.text(0.03, 0.92, r'$\sigma_{RNA}=\,%1.2f$' % stdDispRna, horizontalalignment='left', verticalalignment='center', transform = ax.transAxes, fontsize=12)
plt.savefig(fileOutName, format='pdf', bbox_inches='tight')
def cnt_deltaTE_scatter(data, fdr, fileOutName):
cntRiboNorm = data.countRibo / data.libSizesRibo
cntRnaNorm = data.countRna / data.libSizesRna
padj = data.padj.flatten()
with np.errstate(invalid='ignore'):
idx = np.logical_and(~np.isnan(padj), np.logical_and(np.sum(cntRiboNorm, axis=1)/data.libSizesRibo.size > 2, np.sum(cntRnaNorm, axis=1)/data.libSizesRna.size > 2)).nonzero()[0]
cntRiboMean = np.mean(cntRiboNorm[idx], axis=1)
logFoldChangeTE = data.logFoldChangeTE[idx]
index = np.logical_and(padj < fdr, np.logical_and(np.sum(cntRiboNorm, axis=1)/data.libSizesRibo.size > 2, np.sum(cntRnaNorm, axis=1)/data.libSizesRna.size > 2)).nonzero()[0]
cntRiboMeanSig = np.mean(cntRiboNorm[index], axis=1)
logFoldChangeTEsig = data.logFoldChangeTE[index]
fig, ax = plt.subplots()
ax.scatter(cntRiboMean, logFoldChangeTE, marker='o', color='silver', s=1, lw=0, label='Tested genes')
ax.scatter(cntRiboMeanSig, logFoldChangeTEsig, marker='o', color='darkorange', s=1, lw=0, label='Significant genes')
ax.legend(loc='upper right', prop={'size':10})
xLowerBound = (np.percentile(cntRiboMean, 99.0) - min(cntRiboMean)) * -0.02
xUpperBound = np.percentile(cntRiboMean, 99.0)
ax.set_xlim(xLowerBound, xUpperBound)
ax.set_ylim(np.percentile(logFoldChangeTE, 0.5), np.percentile(logFoldChangeTE, 99.5))
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='x', labelsize=10)
ax.tick_params(axis='y', labelsize=10)
ax.set_title(r'Translation Efficiency Change')
ax.set_xlabel(r'$Mean\/count\/of\/Ribo$-$Seq$', fontsize=15)
ax.set_ylabel(r'$log_{2}(TE_{%s}/TE_{%s})$' % (data.nameCondB, data.nameCondA), fontsize=15)
plt.savefig(fileOutName, format='pdf', bbox_inches='tight')
def deltaTE_hist(data, fdr, fileOutName):
cntRiboNorm = data.countRibo / data.libSizesRibo
cntRnaNorm = data.countRna / data.libSizesRna
padj = data.padj.flatten()
deltaTE = data.logFoldChangeTE.flatten()
with np.errstate(invalid='ignore'):
idxNaN = np.nonzero(~np.isnan(padj))[0]
idxSigDn = np.nonzero(np.logical_and(padj <= fdr, deltaTE < 0))[0]
idxSigUp = np.nonzero(np.logical_and(padj <= fdr, deltaTE > 0))[0]
num = idxNaN.size
muDeltaTE = np.mean(deltaTE[idxNaN])
stdDeltaTE = np.std(deltaTE[idxNaN], ddof=0)
fig, ax = plt.subplots()
maxExtreme = max(deltaTE)
minExtreme = min(deltaTE)
stepSize = (np.percentile(deltaTE, 97.5) - np.percentile(deltaTE, 2.5)) / 25.0
ax.hist(deltaTE[idxNaN], np.arange(minExtreme, maxExtreme, stepSize), histtype='bar', color='darkgrey', rwidth=1.0, linewidth=0.5, edgecolor='white', align='mid', label='All')
ax.hist(deltaTE[idxSigDn], np.arange(minExtreme, maxExtreme, stepSize), histtype='bar', color='crimson', rwidth=1.0, linewidth=0.5, edgecolor='white', align='mid', label='TE down')
ax.hist(deltaTE[idxSigUp], np.arange(minExtreme, maxExtreme, stepSize), histtype='bar', color='dodgerblue', rwidth=1.0, linewidth=0.5, edgecolor='white', align='mid', label='TE up')
ax.legend(loc='upper right', prop={'size':10})
ax.set_title(r'Histogram of Translation Efficiency Change')
ax.set_xlabel(r'$log_{2}(TE_{%s}/TE_{%s})$' % (data.nameCondB, data.nameCondA), fontsize=15)
ax.set_ylabel(r'$Frequency$', fontsize=15)
ax.text(0.02, 0.97, r'$n\,=\,%i$' % num, horizontalalignment='left', verticalalignment='center', transform = ax.transAxes, fontsize=12)
ax.text(0.02, 0.93, r'$\mu\,=\,%1.2f$' % muDeltaTE, horizontalalignment='left', verticalalignment='center', transform = ax.transAxes, fontsize=12)
ax.text(0.02, 0.89, r'$\sigma\,=\,%1.2f$' % stdDeltaTE, horizontalalignment='left', verticalalignment='center', transform = ax.transAxes, fontsize=12)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis='x', labelsize=10)
ax.tick_params(axis='y', labelsize=10)
plt.savefig(fileOutName, format='pdf', bbox_inches='tight')
def make_plots(data, opts):
if '.' not in opts.outFile or opts.outFile.endswith('.pkl'):
outputNamePrefix = opts.outFile
else:
pos = opts.outFile.rfind('.')
if opts.outFile[pos:pos+2] == './':
outputNamePrefix = opts.outFile
else:
outputNamePrefix = opts.outFile[:pos]
fileOutName = outputNamePrefix + '.EmpDisp.scatter.pdf'
empDisp_scatter(data, fileOutName)
if opts.__dict__['cutoffFDR']:
fdr = opts.cutoffFDR
else:
fdr = 0.1
fileOutName = outputNamePrefix + '.TEchange.scatter.pdf'
cnt_deltaTE_scatter(data, fdr, fileOutName)
fileOutName = outputNamePrefix + '.TEchange.hist.pdf'
deltaTE_hist(data, fdr, fileOutName)
if __name__ == '__main__':
opts = parse_options(sys.argv)
with open(opts.dataPkl, 'rb') as FileIn:
data = pickle.load(FileIn)
if opts.plotWhich in ['EmpDisp', 'All']:
fileOutName = opts.outputPrefix + '.EmpDisp.scatter.pdf'
empDisp_scatter(data, fileOutName)
if opts.plotWhich in ['TE', 'All']:
fdr = opts.cutoffFDR
fileOutName = opts.outputPrefix + '.TEchange.scatter.pdf'
cnt_deltaTE_scatter(data, fdr, fileOutName)
fileOutName = opts.outputPrefix + '.TEchange.hist.pdf'
deltaTE_hist(data, fdr, fileOutName)
| gpl-3.0 |
pratapvardhan/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 73 | 6451 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
pp-mo/iris | docs/iris/src/sphinxext/gen_gallery.py | 2 | 6174 | #
# (C) Copyright 2012 MATPLOTLIB (vn 1.2.0)
#
'''
Generate a thumbnail gallery of examples.
'''
import os
import glob
import re
import warnings
import matplotlib.image as image
from sphinx.util import status_iterator
from sphinx.util import status_iterator
template = '''\
{{% extends "layout.html" %}}
{{% set title = "Thumbnail gallery" %}}
{{% block body %}}
<h3>Click on any image to see full size image and source code</h3>
<br/>
<ul>
<li><a class="reference internal" href="#">Gallery</a>
<ul>
{}
</ul>
</li>
</ul>
{}
{{% endblock %}}
'''
multiimage = re.compile('(.*?)(_\d\d){1,2}')
def make_thumbnail(args):
image.thumbnail(args[0], args[1], 0.4)
def out_of_date(original, derived):
return (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime)
def gen_gallery(app, doctree):
if app.builder.name != 'html':
return
outdir = app.builder.outdir
rootdir = 'examples'
# Images we want to skip for the gallery because they are an unusual
# size that doesn't layout well in a table, or because they may be
# redundant with other images or uninteresting.
skips = set([
'mathtext_examples',
'matshow_02',
'matshow_03',
'matplotlib_icon'])
thumbnails = {}
rows = []
random_image = []
toc_rows = []
link_template = ('<a href="{href}">'
'<img src="{thumb_file}" border="0"'
' alt="{alternative_text}"/>'
'</a>')
header_template = ('<div class="section" id="{}">'
'<h4>{}'
'<a class="headerlink" href="#{}"'
' title="Permalink to this headline">¶</a>'
'</h4>')
toc_template = ('<li>'
'<a class="reference internal" href="#{}">{}</a>'
'</li>')
random_image_content_template = '''
// This file was automatically generated by gen_gallery.py & should not be
// modified directly.
images = new Array();
{}
'''
random_image_template = "['{thumbfile}', '{full_image}', '{link}'];"
random_image_join = 'images[{}] = {}'
dirs = ('General', 'Meteorology', 'Oceanography')
for subdir in dirs:
rows.append(header_template.format(subdir, subdir, subdir))
toc_rows.append(toc_template.format(subdir, subdir))
origdir = os.path.join(os.path.dirname(outdir), rootdir, subdir)
if not os.path.exists(origdir):
origdir = os.path.join(os.path.dirname(outdir), 'plot_directive',
rootdir, subdir)
thumbdir = os.path.join(outdir, rootdir, subdir, 'thumbnails')
if not os.path.exists(thumbdir):
os.makedirs(thumbdir)
data = []
for filename in sorted(glob.glob(os.path.join(origdir, '*.png'))):
if filename.endswith('hires.png'):
continue
path, filename = os.path.split(filename)
basename, ext = os.path.splitext(filename)
if basename in skips:
continue
# Create thumbnails based on images in tmpdir, and place them
# within the build tree.
orig_path = str(os.path.join(origdir, filename))
thumb_path = str(os.path.join(thumbdir, filename))
if out_of_date(orig_path, thumb_path) or True:
thumbnails[orig_path] = thumb_path
m = multiimage.match(basename)
if m is not None:
basename = m.group(1)
data.append((subdir, basename,
os.path.join(rootdir, subdir, 'thumbnails',
filename)))
for (subdir, basename, thumbfile) in data:
if thumbfile is not None:
anchor = os.path.basename(thumbfile)
anchor = os.path.splitext(anchor)[0].replace('_', '-')
link = 'examples/{}/{}.html#{}'.format(
subdir,
basename,
anchor)
rows.append(link_template.format(
href=link,
thumb_file=thumbfile,
alternative_text=basename))
random_image.append(random_image_template.format(
link=link,
thumbfile=thumbfile,
basename=basename,
full_image='_images/' + os.path.basename(thumbfile)))
if len(data) == 0:
warnings.warn('No thumbnails were found in {}'.format(subdir))
# Close out the <div> opened up at the top of this loop.
rows.append('</div>')
# Generate JS list of images for front page.
random_image_content = '\n'.join([random_image_join.format(i, line)
for i, line in enumerate(random_image)])
random_image_content = random_image_content_template.format(
random_image_content)
random_image_script_path = os.path.join(app.builder.srcdir,
'_static',
'random_image.js')
with open(random_image_script_path, 'w') as fh:
fh.write(random_image_content)
content = template.format('\n'.join(toc_rows),
'\n'.join(rows))
# Only write out the file if the contents have actually changed.
# Otherwise, this triggers a full rebuild of the docs.
gallery_path = os.path.join(app.builder.srcdir,
'_templates',
'gallery.html')
if os.path.exists(gallery_path):
with open(gallery_path, 'r') as fh:
regenerate = fh.read() != content
else:
regenerate = True
if regenerate:
with open(gallery_path, 'w') as fh:
fh.write(content)
for key in status_iterator(thumbnails, 'generating thumbnails... ',
length=len(thumbnails)):
image.thumbnail(key, thumbnails[key], 0.3)
def setup(app):
app.connect('env-updated', gen_gallery)
| lgpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/groupby/test_nth.py | 6 | 9976 | import numpy as np
import pandas as pd
from pandas import DataFrame, MultiIndex, Index, Series, isnull
from pandas.compat import lrange
from pandas.util.testing import assert_frame_equal, assert_series_equal
from .common import MixIn
class TestNth(MixIn):
def test_first_last_nth(self):
# tests for first / last / nth
grouped = self.df.groupby('A')
first = grouped.first()
expected = self.df.loc[[1, 0], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
nth = grouped.nth(0)
assert_frame_equal(nth, expected)
last = grouped.last()
expected = self.df.loc[[5, 7], ['B', 'C', 'D']]
expected.index = Index(['bar', 'foo'], name='A')
assert_frame_equal(last, expected)
nth = grouped.nth(-1)
assert_frame_equal(nth, expected)
nth = grouped.nth(1)
expected = self.df.loc[[2, 3], ['B', 'C', 'D']].copy()
expected.index = Index(['foo', 'bar'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# it works!
grouped['B'].first()
grouped['B'].last()
grouped['B'].nth(0)
self.df.loc[self.df['A'] == 'foo', 'B'] = np.nan
assert isnull(grouped['B'].first()['foo'])
assert isnull(grouped['B'].last()['foo'])
assert isnull(grouped['B'].nth(0)['foo'])
# v0.14.0 whatsnew
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.first()
expected = df.iloc[[1, 2]].set_index('A')
assert_frame_equal(result, expected)
expected = df.iloc[[1, 2]].set_index('A')
result = g.nth(0, dropna='any')
assert_frame_equal(result, expected)
def test_first_last_nth_dtypes(self):
df = self.df_mixed_floats.copy()
df['E'] = True
df['F'] = 1
# tests for first / last / nth
grouped = df.groupby('A')
first = grouped.first()
expected = df.loc[[1, 0], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(first, expected)
last = grouped.last()
expected = df.loc[[5, 7], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(last, expected)
nth = grouped.nth(1)
expected = df.loc[[3, 2], ['B', 'C', 'D', 'E', 'F']]
expected.index = Index(['bar', 'foo'], name='A')
expected = expected.sort_index()
assert_frame_equal(nth, expected)
# GH 2763, first/last shifting dtypes
idx = lrange(10)
idx.append(9)
s = Series(data=lrange(11), index=idx, name='IntCol')
assert s.dtype == 'int64'
f = s.groupby(level=0).first()
assert f.dtype == 'int64'
def test_nth(self):
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 2]].set_index('A'))
assert_frame_equal(g.nth(1), df.iloc[[1]].set_index('A'))
assert_frame_equal(g.nth(2), df.loc[[]].set_index('A'))
assert_frame_equal(g.nth(-1), df.iloc[[1, 2]].set_index('A'))
assert_frame_equal(g.nth(-2), df.iloc[[0]].set_index('A'))
assert_frame_equal(g.nth(-3), df.loc[[]].set_index('A'))
assert_series_equal(g.B.nth(0), df.set_index('A').B.iloc[[0, 2]])
assert_series_equal(g.B.nth(1), df.set_index('A').B.iloc[[1]])
assert_frame_equal(g[['B']].nth(0),
df.loc[[0, 2], ['A', 'B']].set_index('A'))
exp = df.set_index('A')
assert_frame_equal(g.nth(0, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(-1, dropna='any'), exp.iloc[[1, 2]])
exp['B'] = np.nan
assert_frame_equal(g.nth(7, dropna='any'), exp.iloc[[1, 2]])
assert_frame_equal(g.nth(2, dropna='any'), exp.iloc[[1, 2]])
# out of bounds, regression from 0.13.1
# GH 6621
df = DataFrame({'color': {0: 'green',
1: 'green',
2: 'red',
3: 'red',
4: 'red'},
'food': {0: 'ham',
1: 'eggs',
2: 'eggs',
3: 'ham',
4: 'pork'},
'two': {0: 1.5456590000000001,
1: -0.070345000000000005,
2: -2.4004539999999999,
3: 0.46206000000000003,
4: 0.52350799999999997},
'one': {0: 0.56573799999999996,
1: -0.9742360000000001,
2: 1.033801,
3: -0.78543499999999999,
4: 0.70422799999999997}}).set_index(['color',
'food'])
result = df.groupby(level=0, as_index=False).nth(2)
expected = df.iloc[[-1]]
assert_frame_equal(result, expected)
result = df.groupby(level=0, as_index=False).nth(3)
expected = df.loc[[]]
assert_frame_equal(result, expected)
# GH 7559
# from the vbench
df = DataFrame(np.random.randint(1, 10, (100, 2)), dtype='int64')
s = df[1]
g = df[0]
expected = s.groupby(g).first()
expected2 = s.groupby(g).apply(lambda x: x.iloc[0])
assert_series_equal(expected2, expected, check_names=False)
assert expected.name, 0
assert expected.name == 1
# validate first
v = s[g == 1].iloc[0]
assert expected.iloc[0] == v
assert expected2.iloc[0] == v
# this is NOT the same as .first (as sorted is default!)
# as it keeps the order in the series (and not the group order)
# related GH 7287
expected = s.groupby(g, sort=False).first()
result = s.groupby(g, sort=False).nth(0, dropna='all')
assert_series_equal(result, expected)
# doc example
df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
g = df.groupby('A')
result = g.B.nth(0, dropna=True)
expected = g.B.first()
assert_series_equal(result, expected)
# test multiple nth values
df = DataFrame([[1, np.nan], [1, 3], [1, 4], [5, 6], [5, 7]],
columns=['A', 'B'])
g = df.groupby('A')
assert_frame_equal(g.nth(0), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0]), df.iloc[[0, 3]].set_index('A'))
assert_frame_equal(g.nth([0, 1]), df.iloc[[0, 1, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, -1]), df.iloc[[0, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, 2]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(
g.nth([0, 1, -1]), df.iloc[[0, 1, 2, 3, 4]].set_index('A'))
assert_frame_equal(g.nth([2]), df.iloc[[2]].set_index('A'))
assert_frame_equal(g.nth([3, 4]), df.loc[[]].set_index('A'))
business_dates = pd.date_range(start='4/1/2014', end='6/30/2014',
freq='B')
df = DataFrame(1, index=business_dates, columns=['a', 'b'])
# get the first, fourth and last two business days for each month
key = (df.index.year, df.index.month)
result = df.groupby(key, as_index=False).nth([0, 3, -2, -1])
expected_dates = pd.to_datetime(
['2014/4/1', '2014/4/4', '2014/4/29', '2014/4/30', '2014/5/1',
'2014/5/6', '2014/5/29', '2014/5/30', '2014/6/2', '2014/6/5',
'2014/6/27', '2014/6/30'])
expected = DataFrame(1, columns=['a', 'b'], index=expected_dates)
assert_frame_equal(result, expected)
def test_nth_multi_index(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex, should match .first()
grouped = self.three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = grouped.first()
assert_frame_equal(result, expected)
def test_nth_multi_index_as_expected(self):
# PR 9090, related to issue 8979
# test nth on MultiIndex
three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny']})
grouped = three_group.groupby(['A', 'B'])
result = grouped.nth(0)
expected = DataFrame(
{'C': ['dull', 'dull', 'dull', 'dull']},
index=MultiIndex.from_arrays([['bar', 'bar', 'foo', 'foo'],
['one', 'two', 'one', 'two']],
names=['A', 'B']))
assert_frame_equal(result, expected)
def test_nth_empty():
# GH 16064
df = DataFrame(index=[0], columns=['a', 'b', 'c'])
result = df.groupby('a').nth(10)
expected = DataFrame(index=Index([], name='a'), columns=['b', 'c'])
assert_frame_equal(result, expected)
result = df.groupby(['a', 'b']).nth(10)
expected = DataFrame(index=MultiIndex([[], []], [[], []],
names=['a', 'b']),
columns=['c'])
assert_frame_equal(result, expected)
| mit |
harisbal/pandas | pandas/tests/indexes/multi/test_equivalence.py | 1 | 6994 | # -*- coding: utf-8 -*-
import numpy as np
from pandas.compat import lrange, lzip, range
import pandas as pd
from pandas import Index, MultiIndex, Series
import pandas.util.testing as tm
def test_equals(idx):
assert idx.equals(idx)
assert idx.equals(idx.copy())
assert idx.equals(idx.astype(object))
assert not idx.equals(list(idx))
assert not idx.equals(np.array(idx))
same_values = Index(idx, dtype=object)
assert idx.equals(same_values)
assert same_values.equals(idx)
if idx.nlevels == 1:
# do not test MultiIndex
assert not idx.equals(pd.Series(idx))
def test_equals_op(idx):
# GH9947, GH10637
index_a = idx
n = len(index_a)
index_b = index_a[0:-1]
index_c = index_a[0:-1].append(index_a[-2:-1])
index_d = index_a[0:1]
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == index_b
expected1 = np.array([True] * n)
expected2 = np.array([True] * (n - 1) + [False])
tm.assert_numpy_array_equal(index_a == index_a, expected1)
tm.assert_numpy_array_equal(index_a == index_c, expected2)
# test comparisons with numpy arrays
array_a = np.array(index_a)
array_b = np.array(index_a[0:-1])
array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))
array_d = np.array(index_a[0:1])
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == array_b
tm.assert_numpy_array_equal(index_a == array_a, expected1)
tm.assert_numpy_array_equal(index_a == array_c, expected2)
# test comparisons with Series
series_a = Series(array_a)
series_b = Series(array_b)
series_c = Series(array_c)
series_d = Series(array_d)
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == series_b
tm.assert_numpy_array_equal(index_a == series_a, expected1)
tm.assert_numpy_array_equal(index_a == series_c, expected2)
# cases where length is 1 for one of them
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == index_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == series_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
index_a == array_d
msg = "Can only compare identically-labeled Series objects"
with tm.assert_raises_regex(ValueError, msg):
series_a == series_d
with tm.assert_raises_regex(ValueError, "Lengths must match"):
series_a == array_d
# comparing with a scalar should broadcast; note that we are excluding
# MultiIndex because in this case each item in the index is a tuple of
# length 2, and therefore is considered an array of length 2 in the
# comparison instead of a scalar
if not isinstance(index_a, MultiIndex):
expected3 = np.array([False] * (len(index_a) - 2) + [True, False])
# assuming the 2nd to last item is unique in the data
item = index_a[-2]
tm.assert_numpy_array_equal(index_a == item, expected3)
tm.assert_series_equal(series_a == item, Series(expected3))
def test_equals_multi(idx):
assert idx.equals(idx)
assert not idx.equals(idx.values)
assert idx.equals(Index(idx.values))
assert idx.equal_levels(idx)
assert not idx.equals(idx[:-1])
assert not idx.equals(idx[-1])
# different number of levels
index = MultiIndex(levels=[Index(lrange(4)), Index(lrange(4)), Index(
lrange(4))], labels=[np.array([0, 0, 1, 2, 2, 2, 3, 3]), np.array(
[0, 1, 0, 0, 0, 1, 0, 1]), np.array([1, 0, 1, 1, 0, 0, 1, 0])])
index2 = MultiIndex(levels=index.levels[:-1], labels=index.labels[:-1])
assert not index.equals(index2)
assert not index.equal_levels(index2)
# levels are different
major_axis = Index(lrange(4))
minor_axis = Index(lrange(2))
major_labels = np.array([0, 0, 1, 2, 2, 3])
minor_labels = np.array([0, 1, 0, 0, 1, 0])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not idx.equals(index)
assert not idx.equal_levels(index)
# some of the labels are different
major_axis = Index(['foo', 'bar', 'baz', 'qux'])
minor_axis = Index(['one', 'two'])
major_labels = np.array([0, 0, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not idx.equals(index)
def test_identical(idx):
mi = idx.copy()
mi2 = idx.copy()
assert mi.identical(mi2)
mi = mi.set_names(['new1', 'new2'])
assert mi.equals(mi2)
assert not mi.identical(mi2)
mi2 = mi2.set_names(['new1', 'new2'])
assert mi.identical(mi2)
mi3 = Index(mi.tolist(), names=mi.names)
mi4 = Index(mi.tolist(), names=mi.names, tupleize_cols=False)
assert mi.identical(mi3)
assert not mi.identical(mi4)
assert mi.equals(mi4)
def test_equals_operator(idx):
# GH9785
assert (idx == idx).all()
def test_equals_missing_values():
# make sure take is not using -1
i = pd.MultiIndex.from_tuples([(0, pd.NaT),
(0, pd.Timestamp('20130101'))])
result = i[0:1].equals(i[0])
assert not result
result = i[1:2].equals(i[1])
assert not result
def test_is_():
mi = MultiIndex.from_tuples(lzip(range(10), range(10)))
assert mi.is_(mi)
assert mi.is_(mi.view())
assert mi.is_(mi.view().view().view().view())
mi2 = mi.view()
# names are metadata, they don't change id
mi2.names = ["A", "B"]
assert mi2.is_(mi)
assert mi.is_(mi2)
assert mi.is_(mi.set_names(["C", "D"]))
mi2 = mi.view()
mi2.set_names(["E", "F"], inplace=True)
assert mi.is_(mi2)
# levels are inherent properties, they change identity
mi3 = mi2.set_levels([lrange(10), lrange(10)])
assert not mi3.is_(mi2)
# shouldn't change
assert mi2.is_(mi)
mi4 = mi3.view()
# GH 17464 - Remove duplicate MultiIndex levels
mi4.set_levels([lrange(10), lrange(10)], inplace=True)
assert not mi4.is_(mi3)
mi5 = mi.view()
mi5.set_levels(mi5.levels, inplace=True)
assert not mi5.is_(mi)
def test_is_all_dates(idx):
assert not idx.is_all_dates
def test_is_numeric(idx):
# MultiIndex is never numeric
assert not idx.is_numeric()
def test_multiindex_compare():
# GH 21149
# Ensure comparison operations for MultiIndex with nlevels == 1
# behave consistently with those for MultiIndex with nlevels > 1
midx = pd.MultiIndex.from_product([[0, 1]])
# Equality self-test: MultiIndex object vs self
expected = pd.Series([True, True])
result = pd.Series(midx == midx)
tm.assert_series_equal(result, expected)
# Greater than comparison: MultiIndex object vs self
expected = pd.Series([False, False])
result = pd.Series(midx > midx)
tm.assert_series_equal(result, expected)
| bsd-3-clause |
IndraVikas/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
Traecp/MCA_GUI | McaGUI_v17.py | 3 | 73468 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import scipy.ndimage
from scipy import stats
from scipy.fftpack import fft, fftfreq, fftshift
import os, sys
import gc
from os import listdir
from os.path import isfile,join
import gtk
import matplotlib as mpl
import matplotlib.pyplot as plt
#mpl.use('GtkAgg')
from matplotlib.figure import Figure
#from matplotlib.axes import Subplot
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
from matplotlib.backends.backend_gtkagg import NavigationToolbar2GTKAgg as NavigationToolbar
from matplotlib.cm import jet#, gist_rainbow # colormap
from matplotlib.widgets import Cursor
#from matplotlib.patches import Rectangle
from matplotlib import path
#import matplotlib.patches as patches
from matplotlib.ticker import MaxNLocator
import xrayutilities as xu
from lmfit import Parameters, minimize
import h5py as h5
from MCA_GUI import mca_spec as SP
__version__ = "1.1.8"
__date__ = "06/11/2014"
__author__ = "Thanh-Tra NGUYEN"
__email__ = "[email protected]"
#mpl.rcParams['font.size'] = 18.0
#mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['legend.fancybox'] = True
mpl.rcParams['legend.handletextpad'] = 0.5
mpl.rcParams['legend.fontsize'] = 'medium'
mpl.rcParams['figure.subplot.bottom'] = 0.13
mpl.rcParams['figure.subplot.top'] = 0.93
mpl.rcParams['figure.subplot.left'] = 0.14
mpl.rcParams['figure.subplot.right'] = 0.915
mpl.rcParams['savefig.dpi'] = 300
def Fourier(X,vect):
N = vect.size #number of data points
T = X[1] - X[0] #sample spacing
TF = fft(vect)
xf = fftfreq(N,T)
xf = fftshift(xf)
yplot = fftshift(TF)
yplot = np.abs(yplot)
yplot = yplot[N/2:]
xf = xf[N/2:]
return xf, yplot/yplot.max()
def flat_data(data,dynlow, dynhigh, log):
""" Returns data where maximum superior than 10^dynhigh will be replaced by 10^dynhigh, inferior than 10^dynlow will be replaced by 10^dynlow"""
if log:
mi = 10**dynlow
ma = 10**dynhigh
data=np.minimum(np.maximum(data,mi),ma)
data=np.log10(data)
else:
mi = dynlow
ma = dynhigh
data=np.minimum(np.maximum(data,mi),ma)
return data
def psdVoigt(parameters,x):
"""Define pseudovoigt function"""
y0 = parameters['y0'].value
xc = parameters['xc'].value
A = parameters['A'].value
w = parameters['w'].value
mu = parameters['mu'].value
y = y0 + A * ( mu * (2/np.pi) * (w / (4*(x-xc)**2 + w**2)) + (1 - mu) * (np.sqrt(4*np.log(2)) / (np.sqrt(np.pi) * w)) * np.exp(-(4*np.log(2)/w**2)*(x-xc)**2) )
return y
def objective(pars,y,x):
#we will minimize this function
err = y - psdVoigt(pars,x)
return err
def init(data_x,data_y,xc,arbitrary=False):
""" param = [y0, xc, A, w, mu]
Je veux que Xc soit la position que l'utilisateur pointe sur l'image pour tracer les profiles"""
param = Parameters()
#idA=np.where(data_x - xc < 1e-4)[0]
if arbitrary:
A = data_y.max()
else:
idA=np.where(data_x==xc)[0][0]
A = data_y[idA]
y0 = 1.0
w = 0.5
mu = 0.5
param.add('y0', value=y0)
param.add('xc', value=xc)
param.add('A', value=A)
param.add('w', value=w)
param.add('mu', value=mu, min=0., max=1.)
return param
def fit(data_x,data_y,xc, arbitrary=False):
""" return: fitted data y, fitted parameters """
param_init = init(data_x,data_y,xc,arbitrary)
if data_x[0] > data_x[-1]:
data_x = data_x[::-1]
result = minimize(objective, param_init, args=(data_y,data_x))
x = np.linspace(data_x.min(),data_x.max(),data_x.shape[0])
y = psdVoigt(param_init,x)
return param_init, y
class PopUpFringes(object):
def __init__(self, xdata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
xi = np.arange(len(self.xdata))
slope, intercept, r_value, p_value, std_err = stats.linregress(self.xdata,xi)
fitline = slope*self.xdata+intercept
self.ax.plot(self.xdata, fitline, 'r-',self.xdata,xi, 'bo')
self.ax.axis([self.xdata.min(),self.xdata.max(),xi.min()-1, xi.max()+1])
self.ax.text(0.3, 0.9,'Slope = %.4f +- %.4f' % (slope, std_err),
horizontalalignment='center',
verticalalignment='center',
transform = self.ax.transAxes,
color='red')
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
class PopUpImage(object):
def __init__(self, xdata, ydata, xlabel, ylabel, title):
self.popupwin=gtk.Window()
self.popupwin.set_size_request(600,550)
self.popupwin.set_position(gtk.WIN_POS_CENTER)
self.popupwin.set_border_width(10)
self.xdata = xdata
self.ydata = ydata
vbox = gtk.VBox()
self.fig=Figure(dpi=100)
self.ax = self.fig.add_subplot(111)
self.canvas = FigureCanvas(self.fig)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
self.canvas.mpl_connect("button_press_event",self.on_press)
self.ax.set_xlabel(xlabel, fontsize = 18)
self.ax.set_ylabel(ylabel, fontsize = 18)
self.ax.set_title(title, fontsize = 18)
self.ax.plot(self.xdata, self.ydata, 'b-', lw=2)
self.textes = []
self.plots = []
vbox.pack_start(self.main_figure_navBar, False, False, 0)
vbox.pack_start(self.canvas, True, True, 2)
self.popupwin.add(vbox)
self.popupwin.connect("destroy", self.dest)
self.popupwin.show_all()
def dest(self,widget):
self.popupwin.destroy()
def on_press(self, event):
if event.inaxes == self.ax and event.button==3:
self.clear_notes()
xc = event.xdata
#***** Find the closest x value *****
residuel = self.xdata - xc
residuel = np.abs(residuel)
j = np.argmin(residuel)
#y = self.ydata[i-1:i+1]
#yc= y.max()
#j = np.where(self.ydata == yc)
#j = j[0][0]
xc= self.xdata[j]
x_fit = self.xdata[j-3:j+3]
y_fit = self.ydata[j-3:j+3]
fitted_param, fitted_data = fit(x_fit, y_fit, xc, True)
x_fit = np.linspace(x_fit.min(), x_fit.max(), 200)
y_fit = psdVoigt(fitted_param, x_fit)
period = fitted_param['xc'].value
std_err= fitted_param['xc'].stderr
p = self.ax.plot(x_fit, y_fit,'r-')
p2 = self.ax.axvline(period,color='green',lw=2)
txt=self.ax.text(0.05, 0.9, 'Period = %.4f +- %.4f (nm)'%(period, std_err), transform = self.ax.transAxes, color='red')
self.textes.append(txt)
self.plots.append(p[0])
self.plots.append(p2)
elif event.inaxes == self.ax and event.button==2:
dif = np.diff(self.ydata)
dif = dif/dif.max()
p3=self.ax.plot(dif,'r-')
self.plots.append(p3[0])
self.canvas.draw()
def clear_notes(self):
if len(self.textes)>0:
for t in self.textes:
t.remove()
if len(self.plots)>0:
for p in self.plots:
p.remove()
self.textes = []
self.plots = []
class MyMainWindow(gtk.Window):
def __init__(self):
super(MyMainWindow, self).__init__()
self.set_title("MCA Reciprocal space map processing. Version %s - last update on: %s"%(__version__,__date__))
self.set_size_request(1200,900)
self.set_position(gtk.WIN_POS_CENTER)
self.set_border_width(10)
self.toolbar = gtk.Toolbar()
self.toolbar.set_style(gtk.TOOLBAR_ICONS)
self.refreshtb = gtk.ToolButton(gtk.STOCK_REFRESH)
self.opentb = gtk.ToolButton(gtk.STOCK_OPEN)
self.sep = gtk.SeparatorToolItem()
self.aspecttb = gtk.ToolButton(gtk.STOCK_PAGE_SETUP)
self.quittb = gtk.ToolButton(gtk.STOCK_QUIT)
self.toolbar.insert(self.opentb, 0)
self.toolbar.insert(self.refreshtb, 1)
self.toolbar.insert(self.aspecttb, 2)
self.toolbar.insert(self.sep, 3)
self.toolbar.insert(self.quittb, 4)
self.tooltips = gtk.Tooltips()
self.tooltips.set_tip(self.refreshtb,"Reload data files")
self.tooltips.set_tip(self.opentb,"Open a folder containing HDF5 (*.h5) data files")
self.tooltips.set_tip(self.aspecttb,"Change the graph's aspect ratio")
self.tooltips.set_tip(self.quittb,"Quit the program")
self.opentb.connect("clicked", self.choose_folder)
self.refreshtb.connect("clicked",self.folder_update)
self.aspecttb.connect("clicked",self.change_aspect_ratio)
self.quittb.connect("clicked", gtk.main_quit)
self.graph_aspect = False #Flag to change the aspect ratio of the graph, False = Auto, True = equal
############################# BOXES ###############################################
vbox = gtk.VBox()
vbox.pack_start(self.toolbar,False,False,0)
hbox=gtk.HBox()
######################### TREE VIEW #############################################
self.sw = gtk.ScrolledWindow()
self.sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
self.sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
hbox.pack_start(self.sw, False, False, 0)
self.store=[]
self.list_store = gtk.ListStore(str)
self.treeView = gtk.TreeView(self.list_store)
self.treeView.connect("row-activated",self.on_changed_rsm)
rendererText = gtk.CellRendererText()
self.TVcolumn = gtk.TreeViewColumn("RSM data files", rendererText, text=0)
self.TVcolumn.set_sort_column_id(0)
self.treeView.append_column(self.TVcolumn)
self.sw.add(self.treeView)
self.GUI_current_folder = self.DATA_current_folder = os.getcwd()
#******************************************************************
# Notebooks
#******************************************************************
self.notebook = gtk.Notebook()
self.page_GUI = gtk.HBox()
self.page_conversion = gtk.VBox()
self.page_XRDML = gtk.VBox()
######################################FIGURES####################33
#self.page_single_figure = gtk.HBox()
self.midle_panel = gtk.VBox()
self.rsm = ""
self.rsm_choosen = ""
self.my_notes = []
self.lines = []
self.points=[]
self.polygons=[]
self.fig=Figure(dpi=100)
## Draw line for arbitrary profiles
self.arb_lines_X = []
self.arb_lines_Y = []
self.arb_line_points = 0
#self.ax = self.fig.add_subplot(111)
self.ax = self.fig.add_axes([0.1,0.2,0.7,0.7])
self.fig.subplots_adjust(left=0.1,bottom=0.20, top=0.90)
self.vmin = 0
self.vmax = 1000
self.vmax_range = self.vmax
self.canvas = FigureCanvas(self.fig)
Fig_hbox = gtk.HBox()
self.Export_HQ_Image_btn = gtk.Button("Save HQ image")
self.Export_HQ_Image_btn.connect("clicked", self.Export_HQ_Image)
self.main_figure_navBar = NavigationToolbar(self.canvas, self)
self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#Global color bar
self.cax = self.fig.add_axes([0.85, 0.20, 0.03, 0.70])#left,bottom,width,height
#self.canvas.mpl_connect("motion_notify_event",self.on_motion)
self.canvas.mpl_connect("button_press_event",self.on_press)
#self.canvas.mpl_connect("button_release_event",self.on_release)
self.mouse_moved = False #If click without move: donot zoom the image
Fig_hbox.pack_start(self.Export_HQ_Image_btn, False, False, 0)
Fig_hbox.pack_start(self.main_figure_navBar, True,True, 0)
self.midle_panel.pack_start(Fig_hbox, False,False, 0)
self.midle_panel.pack_start(self.canvas, True,True, 2)
self.page_GUI.pack_start(self.midle_panel, True,True, 0)
#hbox.pack_start(self.midle_panel, True,True, 0)
########################################## RIGHT PANEL ###################
self.right_panel = gtk.VBox(False,0)
self.linear_scale_btn = gtk.ToggleButton("Linear scale")
self.linear_scale_btn.set_usize(30,0)
self.linear_scale_btn.connect("toggled",self.log_update)
self.log_scale=0
#self.wavelength_txt = gtk.Label("Energy (eV)")
##self.wavelength_txt.set_alignment(1,0.5)
#self.wavelength_field = gtk.Entry()
#self.wavelength_field.set_text("8333")
#self.wavelength_field.set_usize(30,0)
#self.lattice_const_txt = gtk.Label("Lattice constant (nm)")
#self.lattice_const_txt.set_alignment(1,0.5)
#self.lattice_const = gtk.Entry()
#self.lattice_const.set_text("0.5431")
#self.lattice_const.set_usize(30,0)
self.int_range_txt = gtk.Label("Integration range")
self.int_range_txt.set_alignment(1,0.5)
self.int_range = gtk.Entry()
self.int_range.set_text("0.05")
self.int_range.set_usize(30,0)
self.fitting_range_txt = gtk.Label("Fitting range")
self.fitting_range_txt.set_alignment(1,0.5)
self.fitting_range = gtk.Entry()
self.fitting_range.set_text("0.1")
self.fitting_range.set_usize(30,0)
# ********** Set the default values for configuration *************
self.plotXYprofiles_btn = gtk.RadioButton(None,"Plot X,Y profiles")
self.plotXYprofiles_btn.set_active(False)
self.arbitrary_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"Arbitrary profiles")
self.rectangle_profiles_btn = gtk.RadioButton(self.plotXYprofiles_btn,"ROI projection")
self.option_table = gtk.Table(4,3,False)#Pack the options
self.option_table.attach(self.linear_scale_btn, 0,1,0,1)
self.option_table.attach(self.plotXYprofiles_btn,0,1,1,2)
self.option_table.attach(self.arbitrary_profiles_btn,0,1,2,3)
self.option_table.attach(self.rectangle_profiles_btn,0,1,3,4)
# self.option_table.attach(self.wavelength_txt,1,2,0,1)
# self.option_table.attach(self.wavelength_field,2,3,0,1)
# self.option_table.attach(self.lattice_const_txt,1,2,1,2)
# self.option_table.attach(self.lattice_const, 2,3,1,2)
self.option_table.attach(self.int_range_txt, 1,2,0,1)
self.option_table.attach(self.int_range, 2,3,0,1)
self.option_table.attach(self.fitting_range_txt, 1,2,1,2)
self.option_table.attach(self.fitting_range, 2,3,1,2)
### Options for profile plots
self.profiles_log_btn = gtk.ToggleButton("Y-Log")
self.profiles_log_btn.connect("toggled",self.profiles_update)
self.profiles_export_data_btn = gtk.Button("Export data")
self.profiles_export_data_btn.connect("clicked",self.profiles_export)
self.profiles_option_box = gtk.HBox(False,0)
self.profiles_option_box.pack_start(self.profiles_log_btn, False, False, 0)
self.profiles_option_box.pack_start(self.profiles_export_data_btn, False, False, 0)
### Figure of profiles plot
self.profiles_fringes = []
self.fig_profiles = Figure()
self.profiles_ax1 = self.fig_profiles.add_subplot(211)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2 = self.fig_profiles.add_subplot(212)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas = FigureCanvas(self.fig_profiles)
self.profiles_canvas.set_size_request(450,50)
self.profiles_canvas.mpl_connect("button_press_event",self.profile_press)
self.profiles_navBar = NavigationToolbar(self.profiles_canvas, self)
self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
#### Results of fitted curves
self.fit_results_table = gtk.Table(7,3, False)
title = gtk.Label("Fitted results:")
self.chi_title = gtk.Label("Qz profile")
self.tth_title = gtk.Label("Qx profile")
y0 = gtk.Label("y0:")
xc = gtk.Label("xc:")
A = gtk.Label("A:")
w = gtk.Label("FWHM:")
mu = gtk.Label("mu:")
y0.set_alignment(0,0.5)
xc.set_alignment(0,0.5)
A.set_alignment(0,0.5)
w.set_alignment(0,0.5)
mu.set_alignment(0,0.5)
self.Qz_fitted_y0 = gtk.Label()
self.Qz_fitted_xc = gtk.Label()
self.Qz_fitted_A = gtk.Label()
self.Qz_fitted_w = gtk.Label()
self.Qz_fitted_mu = gtk.Label()
self.Qx_fitted_y0 = gtk.Label()
self.Qx_fitted_xc = gtk.Label()
self.Qx_fitted_A = gtk.Label()
self.Qx_fitted_w = gtk.Label()
self.Qx_fitted_mu = gtk.Label()
self.fit_results_table.attach(title,0,3,0,1)
self.fit_results_table.attach(self.chi_title,1,2,1,2)
self.fit_results_table.attach(self.tth_title,2,3,1,2)
self.fit_results_table.attach(y0,0,1,2,3)
self.fit_results_table.attach(xc,0,1,3,4)
self.fit_results_table.attach(A,0,1,4,5)
self.fit_results_table.attach(w,0,1,5,6)
self.fit_results_table.attach(mu,0,1,6,7)
self.fit_results_table.attach(self.Qz_fitted_y0,1,2,2,3)
self.fit_results_table.attach(self.Qz_fitted_xc,1,2,3,4)
self.fit_results_table.attach(self.Qz_fitted_A,1,2,4,5)
self.fit_results_table.attach(self.Qz_fitted_w,1,2,5,6)
self.fit_results_table.attach(self.Qz_fitted_mu,1,2,6,7)
self.fit_results_table.attach(self.Qx_fitted_y0,2,3,2,3)
self.fit_results_table.attach(self.Qx_fitted_xc,2,3,3,4)
self.fit_results_table.attach(self.Qx_fitted_A,2,3,4,5)
self.fit_results_table.attach(self.Qx_fitted_w,2,3,5,6)
self.fit_results_table.attach(self.Qx_fitted_mu,2,3,6,7)
#### PACK the right panel
self.right_panel.pack_start(self.option_table, False, False, 0)
self.right_panel.pack_start(self.profiles_option_box,False,False,0)
self.right_panel.pack_start(self.profiles_navBar,False,False,0)
self.right_panel.pack_start(self.profiles_canvas,True,True,0)
self.right_panel.pack_start(self.fit_results_table, False, False, 0)
self.page_GUI.pack_end(self.right_panel,False, False,5)
#********************************************************************
# Conversion data SPEC to HDF page
#********************************************************************
self.conv_box = gtk.VBox()
self.box1 = gtk.HBox()
self.det_frame = gtk.Frame()
self.det_frame.set_label("Detector Vantec")
self.det_frame.set_label_align(0.5,0.5)
self.exp_frame = gtk.Frame()
self.exp_frame.set_label("Experiment parameters")
self.exp_frame.set_label_align(0.5,0.5)
self.conv_frame = gtk.Frame()
self.conv_frame.set_label("Data conversion: SPEC-HDF5")
self.conv_frame.set_label_align(0.5,0.5)
#self.conv_frame.set_alignment(0.5,0.5)
#********************************************************************
# Detector parameters
#********************************************************************
self.det_table = gtk.Table(6,2,False)
self.t1 = gtk.Label("Detector size (mm)")
self.t2 = gtk.Label("Number of channels")
self.t3 = gtk.Label("Center channel")
self.t4 = gtk.Label("Channels/Degree")
self.t5 = gtk.Label("ROI (from-to)")
self.t6 = gtk.Label("Orientation")
self.t1.set_alignment(0,0.5)
self.t2.set_alignment(0,0.5)
self.t3.set_alignment(0,0.5)
self.t4.set_alignment(0,0.5)
self.t5.set_alignment(0,0.5)
self.t6.set_alignment(0,0.5)
self.t1_entry = gtk.Entry()
self.t1_entry.set_text("50")
self.t2_entry = gtk.Entry()
self.t2_entry.set_text("2048")
self.t3_entry = gtk.Entry()
self.t3_entry.set_text("819.87")
self.t4_entry = gtk.Entry()
self.t4_entry.set_text("211.012")
self.small_box = gtk.HBox()
self.t5_label = gtk.Label("-")
self.t5_entry1 = gtk.Entry()
self.t5_entry1.set_text("40")
self.t5_entry2 = gtk.Entry()
self.t5_entry2.set_text("1300")
self.small_box.pack_start(self.t5_entry1,True, True,0)
self.small_box.pack_start(self.t5_label,True, True,0)
self.small_box.pack_start(self.t5_entry2,True, True,0)
self.t6_entry = gtk.combo_box_new_text()
self.t6_entry.append_text("Up (zero on the bottom)")
self.t6_entry.append_text("Down (zero on the top)")
self.t6_entry.set_active(1)
self.det_table.attach(self.t1, 0,1,0,1)
self.det_table.attach(self.t2, 0,1,1,2)
self.det_table.attach(self.t3, 0,1,2,3)
self.det_table.attach(self.t4, 0,1,3,4)
self.det_table.attach(self.t5, 0,1,4,5)
self.det_table.attach(self.t6, 0,1,5,6)
self.det_table.attach(self.t1_entry, 1,2,0,1)
self.det_table.attach(self.t2_entry, 1,2,1,2)
self.det_table.attach(self.t3_entry, 1,2,2,3)
self.det_table.attach(self.t4_entry, 1,2,3,4)
self.det_table.attach(self.small_box, 1,2,4,5)
self.det_table.attach(self.t6_entry, 1,2,5,6)
self.det_table_align = gtk.Alignment()
self.det_table_align.set_padding(15,10,10,10)
self.det_table_align.set(0.5, 0.5, 1.0, 1.0)
self.det_table_align.add(self.det_table)
self.det_frame.add(self.det_table_align)
#********************************************************************
# Experiment parameters
#********************************************************************
self.exp_table = gtk.Table(6,2,False)
self.e1 = gtk.Label("Substrate material:")
self.e1_other = gtk.Label("If other:")
self.e2 = gtk.Label("Energy (eV)")
self.e3 = gtk.Label("Attenuation coefficient file")
self.e4 = gtk.Label("Foil colunm name (in SPEC file)")
self.e5 = gtk.Label("Monitor colunm name (in SPEC file)")
self.e6 = gtk.Label("Reference monitor (for normalization)")
self.e1.set_alignment(0,0.5)
self.e1_other.set_alignment(1,0.5)
self.e2.set_alignment(0,0.5)
self.e3.set_alignment(0,0.5)
self.e4.set_alignment(0,0.5)
self.e5.set_alignment(0,0.5)
self.e6.set_alignment(0,0.5)
#self.e1_entry = gtk.Label("Si for now")
self.e1_entry = gtk.combo_box_new_text()
self.e1_entry.append_text("-- other")
self.e1_entry.append_text("Si")
self.e1_entry.append_text("Ge")
self.e1_entry.append_text("GaAs")
self.e1_entry.append_text("GaP")
self.e1_entry.append_text("GaSb")
self.e1_entry.append_text("InAs")
self.e1_entry.append_text("InP")
self.e1_entry.append_text("InSb")
self.e1_entry.set_active(1)
self.e1_entry_other = gtk.Entry()
self.e1_entry_other.set_text("")
self.e2_entry = gtk.Entry()
self.e2_entry.set_text("8333")
self.e3_box = gtk.HBox()
self.e3_path =gtk.Entry()
self.e3_browse = gtk.Button("Browse")
self.e3_browse.connect("clicked", self.select_file, self.e3_path, "A")
self.e3_box.pack_start(self.e3_path, False, False, 0)
self.e3_box.pack_start(self.e3_browse, False, False, 0)
self.e4_entry = gtk.Entry()
self.e4_entry.set_text("pfoil")
self.e5_entry = gtk.Entry()
self.e5_entry.set_text("vct3")
self.e6_entry = gtk.Entry()
self.e6_entry.set_text("1e6")
substrate_box1 = gtk.HBox()
substrate_box2 = gtk.HBox()
substrate_box1.pack_start(self.e1, False, False, 0)
substrate_box1.pack_start(self.e1_entry, False, False, 0)
substrate_box2.pack_start(self.e1_other, False, False, 0)
substrate_box2.pack_start(self.e1_entry_other, False, False, 0)
self.exp_table.attach(substrate_box1, 0,1,0,1)
self.exp_table.attach(self.e2, 0,1,1,2)
self.exp_table.attach(self.e3, 0,1,2,3)
self.exp_table.attach(self.e4, 0,1,3,4)
self.exp_table.attach(self.e5, 0,1,4,5)
self.exp_table.attach(self.e6, 0,1,5,6)
self.exp_table.attach(substrate_box2, 1,2,0,1)
self.exp_table.attach(self.e2_entry, 1,2,1,2)
self.exp_table.attach(self.e3_box, 1,2,2,3)
self.exp_table.attach(self.e4_entry, 1,2,3,4)
self.exp_table.attach(self.e5_entry, 1,2,4,5)
self.exp_table.attach(self.e6_entry, 1,2,5,6)
self.exp_table_align = gtk.Alignment()
self.exp_table_align.set_padding(15,10,10,10)
self.exp_table_align.set(0.5, 0.5, 1.0, 1.0)
self.exp_table_align.add(self.exp_table)
self.exp_frame.add(self.exp_table_align)
#********************************************************************
# Data conversion information
#********************************************************************
self.conv_table = gtk.Table(6,3,False)
self.c1 = gtk.Label("Spec file")
self.c2 = gtk.Label("MCA file")
self.c3 = gtk.Label("Destination folder")
self.c4 = gtk.Label("Scan number (from-to)")
self.c5 = gtk.Label("Description for each RSM (optional-separate by comma)")
self.c6 = gtk.Label("Problem of foil delay (foil[n]-->data[n+1])")
self.c1.set_alignment(0,0.5)
self.c2.set_alignment(0,0.5)
self.c3.set_alignment(0,0.5)
self.c4.set_alignment(0,0.5)
self.c5.set_alignment(0,0.5)
self.c6.set_alignment(0,0.5)
self.c1_entry1 = gtk.Entry()
self.c2_entry1 = gtk.Entry()
self.c3_entry1 = gtk.Entry()
self.c4_entry1 = gtk.Entry()
self.c5_entry1 = gtk.Entry()
self.c5_entry1.set_text("")
self.c6_entry = gtk.CheckButton()
self.c1_entry2 = gtk.Button("Browse SPEC")
self.c2_entry2 = gtk.Button("Browse MCA")
self.c3_entry2 = gtk.Button("Browse Folder")
self.c4_entry2 = gtk.Entry()
self.c1_entry2.connect("clicked", self.select_file, self.c1_entry1, "S")
self.c2_entry2.connect("clicked", self.select_file, self.c2_entry1, "M")
self.c3_entry2.connect("clicked", self.select_folder, self.c3_entry1, "D")
self.conv_table.attach(self.c1, 0,1,0,1)
self.conv_table.attach(self.c2, 0,1,1,2)
self.conv_table.attach(self.c3, 0,1,2,3)
self.conv_table.attach(self.c4, 0,1,3,4)
self.conv_table.attach(self.c5, 0,1,4,5)
self.conv_table.attach(self.c6, 0,1,5,6)
self.conv_table.attach(self.c1_entry1, 1,2,0,1)
self.conv_table.attach(self.c2_entry1, 1,2,1,2)
self.conv_table.attach(self.c3_entry1, 1,2,2,3)
self.conv_table.attach(self.c4_entry1, 1,2,3,4)
self.conv_table.attach(self.c5_entry1, 1,3,4,5)
self.conv_table.attach(self.c6_entry, 1,2,5,6)
self.conv_table.attach(self.c1_entry2, 2,3,0,1)
self.conv_table.attach(self.c2_entry2, 2,3,1,2)
self.conv_table.attach(self.c3_entry2, 2,3,2,3)
self.conv_table.attach(self.c4_entry2, 2,3,3,4)
self.conv_table_align = gtk.Alignment()
self.conv_table_align.set_padding(15,10,10,10)
self.conv_table_align.set(0.5, 0.5, 1.0, 1.0)
self.conv_table_align.add(self.conv_table)
self.conv_frame.add(self.conv_table_align)
#********************************************************************
# The RUN button
#********************************************************************
self.run_conversion = gtk.Button("Execute")
self.run_conversion.connect("clicked", self.spec2HDF)
self.run_conversion.set_size_request(50,30)
self.show_info = gtk.Label()
#********************************************************************
# Pack the frames
#********************************************************************
self.box1.pack_start(self.det_frame,padding=15)
self.box1.pack_end(self.exp_frame, padding =15)
self.conv_box.pack_start(self.box1,padding=15)
self.conv_box.pack_start(self.conv_frame,padding=5)
self.conv_box.pack_start(self.run_conversion, False,False,10)
self.conv_box.pack_start(self.show_info, False,False,10)
self.page_conversion.pack_start(self.conv_box,False, False,20)
#********************************************************************
# Conversion XRDML data to HDF
#********************************************************************
self.XRDML_conv_box = gtk.VBox()
self.Instrument_table = gtk.Table(1,4,True)
self.Inst_txt = gtk.Label("Instrument:")
self.Inst_txt.set_alignment(0,0.5)
self.Instrument = gtk.combo_box_new_text()
self.Instrument.append_text("Bruker")
self.Instrument.append_text("PANalytical")
self.Instrument.set_active(0)
self.Instrument_table.attach(self.Inst_txt,0,1,0,1)
self.Instrument_table.attach(self.Instrument, 1,2,0,1)
self.Instrument.connect("changed",self.Change_Lab_Instrument)
self.choosen_instrument = self.Instrument.get_active_text()
self.XRDML_table = gtk.Table(7,4,True)
self.XRDML_tooltip = gtk.Tooltips()
self.XRDML_substrate_txt = gtk.Label("Substrate material:")
self.XRDML_substrate_other_txt = gtk.Label("If other:")
self.XRDML_substrate_inplane_txt= gtk.Label("In-plane direction (i.e. 1 1 0) - optional")
self.XRDML_substrate_outplane_txt= gtk.Label("Out-of-plane direction (i.e. 0 0 1)-optional")
self.XRDML_reflection_txt = gtk.Label("Reflection (H K L) - optional:")
self.XRDML_energy_txt = gtk.Label("Energy (eV) - optional:")
self.XRDML_description_txt = gtk.Label("Description of the sample:")
self.XRDML_xrdml_file_txt = gtk.Label("Select RAW file:")
self.XRDML_destination_txt = gtk.Label("Select a destination folder:")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_txt, "Substrate material")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_other_txt, "The substrate material, i.e. Al, SiO2, CdTe, GaN,...")
self.XRDML_tooltip.set_tip(self.XRDML_substrate_inplane_txt, "The substrate in-plane an out-of-plane direction - for calculation of the orientation matrix.")
self.XRDML_tooltip.set_tip(self.XRDML_reflection_txt, "H K L, separate by space, i.e. 2 2 4 (0 0 0 for a XRR map). This is used for offset correction.")
self.XRDML_tooltip.set_tip(self.XRDML_energy_txt, "If empty, the default Cu K_alpha_1 will be used.")
self.XRDML_tooltip.set_tip(self.XRDML_description_txt, "Description of the sample, this will be the name of the converted file. If empty, it will be named 'RSM.h5'")
self.XRDML_tooltip.set_tip(self.XRDML_xrdml_file_txt, "Select the data file recorded by the chosen equipment")
self.XRDML_tooltip.set_tip(self.XRDML_destination_txt, "Select a destination folder to store the converted file.")
self.XRDML_substrate_txt.set_alignment(0,0.5)
self.XRDML_substrate_other_txt.set_alignment(1,0.5)
self.XRDML_substrate_inplane_txt.set_alignment(0,0.5)
self.XRDML_substrate_outplane_txt.set_alignment(1,0.5)
self.XRDML_reflection_txt.set_alignment(0,0.5)
self.XRDML_energy_txt.set_alignment(0,0.5)
self.XRDML_description_txt.set_alignment(0,0.5)
self.XRDML_xrdml_file_txt.set_alignment(0,0.5)
self.XRDML_destination_txt.set_alignment(0,0.5)
self.XRDML_substrate = gtk.combo_box_new_text()
self.XRDML_substrate.append_text("-- other")
self.XRDML_substrate.append_text("Si")
self.XRDML_substrate.append_text("Ge")
self.XRDML_substrate.append_text("GaAs")
self.XRDML_substrate.append_text("GaN")
self.XRDML_substrate.append_text("GaP")
self.XRDML_substrate.append_text("GaSb")
self.XRDML_substrate.append_text("InAs")
self.XRDML_substrate.append_text("InP")
self.XRDML_substrate.append_text("InSb")
self.XRDML_substrate.append_text("Al2O3")
self.XRDML_substrate.set_active(0)
self.XRDML_substrate_other = gtk.Entry()
self.XRDML_substrate_other.set_text("")
self.XRDML_substrate_inplane = gtk.Entry()
self.XRDML_substrate_inplane.set_text("")
self.XRDML_substrate_outplane = gtk.Entry()
self.XRDML_substrate_outplane.set_text("")
self.XRDML_reflection = gtk.Entry()
self.XRDML_reflection.set_text("")
self.XRDML_energy = gtk.Entry()
self.XRDML_energy.set_text("")
self.XRDML_description = gtk.Entry()
self.XRDML_description.set_text("")
self.XRDML_xrdml_file_path = gtk.Entry()
self.XRDML_destination_path = gtk.Entry()
self.XRDML_xrdml_file_browse = gtk.Button("Browse RAW file")
self.XRDML_destination_browse= gtk.Button("Browse destination folder")
self.XRDML_xrdml_file_browse.connect("clicked", self.select_file, self.XRDML_xrdml_file_path, "S")
self.XRDML_destination_browse.connect("clicked", self.select_folder, self.XRDML_destination_path, "D")
self.XRDML_table.attach(self.XRDML_substrate_txt, 0,1,0,1)
self.XRDML_table.attach(self.XRDML_substrate, 1,2,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other_txt, 2,3,0,1)
self.XRDML_table.attach(self.XRDML_substrate_other, 3,4,0,1)
self.XRDML_table.attach(self.XRDML_substrate_inplane_txt, 0,1,1,2)
self.XRDML_table.attach(self.XRDML_substrate_inplane, 1,2,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane_txt, 2,3,1,2)
self.XRDML_table.attach(self.XRDML_substrate_outplane, 3,4,1,2)
self.XRDML_table.attach(self.XRDML_reflection_txt, 0,1,2,3)
self.XRDML_table.attach(self.XRDML_reflection, 1,2,2,3)
self.XRDML_table.attach(self.XRDML_energy_txt,0,1,3,4)
self.XRDML_table.attach(self.XRDML_energy, 1,2,3,4)
self.XRDML_table.attach(self.XRDML_description_txt, 0,1,4,5)
self.XRDML_table.attach(self.XRDML_description, 1,2,4,5)
self.XRDML_table.attach(self.XRDML_xrdml_file_txt, 0,1,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_path, 1,2,5,6)
self.XRDML_table.attach(self.XRDML_xrdml_file_browse, 2,3,5,6)
self.XRDML_table.attach(self.XRDML_destination_txt, 0,1,6,7)
self.XRDML_table.attach(self.XRDML_destination_path, 1,2,6,7)
self.XRDML_table.attach(self.XRDML_destination_browse, 2,3,6,7)
#********************************************************************
# The RUN button
#********************************************************************
self.XRDML_run = gtk.Button("Execute")
self.XRDML_run.connect("clicked", self.Convert_Lab_Source)
self.XRDML_run.set_size_request(50,30)
self.XRDML_show_info = gtk.Label()
#********************************************************************
# Pack the XRDML options
#********************************************************************
self.XRDML_conv_box.pack_start(self.Instrument_table, False, False,5)
self.XRDML_conv_box.pack_start(self.XRDML_table, False, False, 10)
self.XRDML_conv_box.pack_start(self.XRDML_run, False, False, 5)
self.XRDML_conv_box.pack_start(self.XRDML_show_info, False,False,10)
self.page_XRDML.pack_start(self.XRDML_conv_box,False, False,20)
#********************************************************************
# Pack the notebook
#********************************************************************
self.notebook.append_page(self.page_GUI, gtk.Label("RSM GUI"))
self.notebook.append_page(self.page_conversion, gtk.Label("ESRF-MCA spec file (Vantec)"))
self.notebook.append_page(self.page_XRDML, gtk.Label("Lab instruments"))
hbox.pack_start(self.notebook)
vbox.pack_start(hbox,True,True,0)
############################### Sliders ######################################
#sld_box = gtk.Fixed()
sld_box = gtk.HBox(False,2)
self.vmin_txt = gtk.Label("Vmin")
self.vmin_txt.set_alignment(0,0.5)
#self.vmin_txt.set_justify(gtk.JUSTIFY_CENTER)
self.vmax_txt = gtk.Label("Vmax")
self.vmax_txt.set_alignment(0,0.5)
#self.vmax_txt.set_justify(gtk.JUSTIFY_CENTER)
self.sld_vmin = gtk.HScale()
self.sld_vmax = gtk.HScale()
self.sld_vmin.set_size_request(200,25)
self.sld_vmax.set_size_request(200,25)
self.sld_vmin.set_range(0,self.vmax)
self.sld_vmax.set_range(0,self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(0)
self.sld_vmin.connect('value-changed',self.scale_update)
self.sld_vmax.connect('value-changed',self.scale_update)
vmax_spin_adj = gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmax_spin_btn = gtk.SpinButton(vmax_spin_adj,1,1)
self.vmax_spin_btn.set_numeric(True)
self.vmax_spin_btn.set_wrap(True)
self.vmax_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmax_spin_btn.connect('value-changed',self.scale_update_spin)
vmin_spin_adj = gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.5, 10.0, 0.0)
self.vmin_spin_btn = gtk.SpinButton(vmin_spin_adj,1,1)
self.vmin_spin_btn.set_numeric(True)
self.vmin_spin_btn.set_wrap(True)
self.vmin_spin_btn.set_size_request(80,-1)
#self.vmax_spin_btn.set_alignment(0,0.5)
self.vmin_spin_btn.connect('value-changed',self.scale_update_spin)
sld_box.pack_start(self.vmin_txt,False,False,0)
sld_box.pack_start(self.sld_vmin,False,False,0)
sld_box.pack_start(self.vmin_spin_btn,False,False,0)
sld_box.pack_start(self.vmax_txt,False,False,0)
sld_box.pack_start(self.sld_vmax,False,False,0)
sld_box.pack_start(self.vmax_spin_btn,False,False,0)
#sld_box.pack_start(self.slider_reset_btn,False,False,0)
vbox.pack_start(sld_box,False,False,3)
self.add(vbox)
self.connect("destroy", gtk.main_quit)
self.show_all()
#########################################################################################################################
def format_coord(self, x, y):
#***** Add intensity information into the navigation toolbar *******************************
numrows, numcols = (self.gridder.data.T).shape
col,row = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
if col>=0 and col<numcols and row>=0 and row<numrows:
z = self.gridder.data.T[row,col]
return 'x=%1.4f, y=%1.4f, z=%1.4f'%(x, y, z)
else:
return 'x=%1.4f, y=%1.4f'%(x, y)
def pro_format_coord(self,x,y):
return 'x=%.4f, y=%.1f'%(x,y)
def init_image(self,log=False):
self.ax.cla()
self.cax.cla()
#print "Initialize image ..."
#
#self.clevels = np.linspace(self.vmin, self.vmax, 100)
if log:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T),vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, np.log10(self.gridder.data.T), self.clevels, vmin=self.vmin, vmax=self.vmax)
else:
self.img = self.ax.pcolormesh(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T,vmin=self.vmin, vmax=self.vmax)
#self.img = self.ax.contour(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data.T, self.clevels, vmin=self.vmin, vmax=self.vmax)
self.img.cmap.set_under(alpha=0)
self.ax.axis([self.gridder.xaxis.min(), self.gridder.xaxis.max(), self.gridder.yaxis.min(), self.gridder.yaxis.max()])
#self.ax.set_aspect('equal')
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
self.ax.set_xlabel(xlabel)
self.ax.set_ylabel(ylabel)
self.ax.yaxis.label.set_size(20)
self.ax.xaxis.label.set_size(20)
self.ax.set_title(self.rsm_description,fontsize=20)
self.ax.format_coord = self.format_coord
self.cb = self.fig.colorbar(self.img, cax = self.cax, format="%.1f")#format=fm
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Intensity)\ [arb.\ units]$',fontsize=20)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=20)
self.cb.locator = MaxNLocator(nbins=6)
#self.cursor = Cursor(self.ax, color='k', linewidth=1, useblit=True)
#print "Image is initialized."
def change_aspect_ratio(self,w):
self.graph_aspect = not (self.graph_aspect)
if self.graph_aspect == True:
self.ax.set_aspect('equal')
else:
self.ax.set_aspect('auto')
self.canvas.draw()
def on_changed_rsm(self,widget,row,col):
#print "************Change RSM*************"
gc.collect() #Clear unused variables to gain memory
#************** Remind the structure of these HDF5 files:
# ************* file=[scan_id={'eta'=[data], '2theta'=[data], 'intensity'=[data], 'description'='RSM 004 ...'}]
self.clear_notes()
#self.init_image()
model = widget.get_model()
self.rsm_choosen = model[row][0]
self.rsm = join(self.GUI_current_folder,self.rsm_choosen)#file path
self.rsm_info = h5.File(self.rsm,'r')#HDF5 object that collects all information of this scan
#self.ax.set_title(self.rsm_choosen,fontsize=20)
### Data Loading ##
groups = self.rsm_info.keys()
scan = groups[0]
self.scan = self.rsm_info[scan]
self.data = self.scan.get('intensity').value
self.Qx = self.scan.get('Qx').value
self.Qy = self.scan.get('Qy').value
self.Qz = self.scan.get('Qz').value
self.rsm_description = self.scan.get('description').value
self.rsm_info.close()
#print "Data are successfully loaded."
self.gridder = xu.Gridder2D(self.data.shape[0],self.data.shape[1])
#print "Gridder is calculated."
# MM = self.data.max()
# M = np.log10(MM)
# data = flat_data(self.data,0,M)
self.gridder(self.Qx, self.Qz, self.data)
self.data = self.gridder.data.T
self.vmin=self.data.min()
self.vmax=self.data.max()
#print "Starting scale_plot()"
self.scale_plot()
#self.slider_update()
def scale_plot(self):
#print "Scale_plot() is called."
data = self.data.copy()
#self.init_image()
if self.linear_scale_btn.get_active():
self.linear_scale_btn.set_label("--> Linear scale")
data = np.log10(data)
#print data.max()
self.init_image(log=True)
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
self.vmax = np.log10(actual_vmax) if self.log_scale == 0 else actual_vmax
if actual_vmin == 0:
self.vmin=0
elif actual_vmin >0:
self.vmin = np.log10(actual_vmin) if self.log_scale == 0 else actual_vmin
self.vmax_range = data.max()
self.log_scale = 1
#log=True
else:
self.linear_scale_btn.set_label("--> Log scale")
self.init_image(log=False)
#print "Calculating min max and update slider..."
actual_vmin = self.sld_vmin.get_value()
actual_vmax = self.sld_vmax.get_value()
#print "Actual vmax: ",actual_vmax
if self.log_scale == 1:
self.vmax = np.power(10.,actual_vmax)
else:
self.vmax = actual_vmax
self.vmax_range = data.max()
if actual_vmin ==0:
self.vmin = 0
elif actual_vmin>0:
if self.log_scale == 0:
self.vmin = actual_vmin
elif self.log_scale == 1:
self.vmin = np.power(10,actual_vmin)
self.log_scale = 0
#log=False
#print "Min max are calculated."
self.sld_vmax.set_range(-6,self.vmax_range)
self.sld_vmin.set_range(-6,self.vmax_range)
#self.init_image(log)
self.slider_update()
def log_update(self,widget):
self.scale_plot()
if self.log_scale==1:
self.cb.set_label(r'$Log_{10}\ (Counts\ per\ second)\ [arb.\ units]$',fontsize=18)
else:
self.cb.set_label(r'$Intensity\ (Counts\ per\ second)$', fontsize=18)
#self.slider_update()
def scale_update(self,widget):
#print "Scale_update() is called."
self.vmin = self.sld_vmin.get_value()
self.vmax = self.sld_vmax.get_value()
self.vmin_spin_btn.set_value(self.vmin)
self.vmax_spin_btn.set_value(self.vmax)
self.slider_update()
def scale_update_spin(self,widget):
#print "Spin_update() is called"
self.vmin = self.vmin_spin_btn.get_value()
self.vmax = self.vmax_spin_btn.get_value()
self.slider_update()
def slider_update(self):
#print "slider_update() is called"
#self.img.set_clim(self.vmin, self.vmax)
self.sld_vmax.set_value(self.vmax)
self.sld_vmin.set_value(self.vmin)
if self.linear_scale_btn.get_active():
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 0.1, 1.0, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 0.1, 1.0, 0))
else:
self.vmin_spin_btn.set_adjustment(gtk.Adjustment(self.vmin, 0, self.vmax_range, 10, 100, 0))
self.vmax_spin_btn.set_adjustment(gtk.Adjustment(self.vmax, 0, self.vmax_range, 10, 100, 0))
#self.vmax_spin_btn.update()
self.img.set_clim(self.vmin, self.vmax)
self.ax.relim()
self.canvas.draw()
#print "slider_update() stoped."
def choose_folder(self, w):
dialog = gtk.FileChooserDialog(title="Select a data folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.GUI_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
folder = folder.decode('utf8')
folder_basename = folder.split("/")[-1]
#print folder_basename
self.store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.GUI_current_folder = folder
#print store
if len(self.store)>0:
self.list_store.clear()
for i in self.store:
self.list_store.append([i])
self.TVcolumn.set_title(folder_basename)
else:
pass
else:
pass
dialog.destroy()
def folder_update(self, w):
folder = self.GUI_current_folder
if folder is not os.getcwd():
store= [i for i in listdir(folder) if isfile(join(folder,i)) and i.endswith(".data") or i.endswith(".h5")]
self.store=[]
self.list_store.clear()
for i in store:
self.list_store.append([i])
self.store.append(i)
def arbitrary_line_cut(self, x, y):
#**** num: integer - number of points to be extracted
#**** convert Q coordinates to pixel coordinates
x0, y0 = xu.analysis.line_cuts.getindex(x[0], y[0], self.gridder.xaxis, self.gridder.yaxis)
x1, y1 = xu.analysis.line_cuts.getindex(x[1], y[1], self.gridder.xaxis, self.gridder.yaxis)
num = int(np.hypot(x1-x0, y1-y0)) #number of points that will be plotted
xi, yi = np.linspace(x0, x1, num), np.linspace(y0, y1, num)
profiles_data_X = profiles_data_Y = scipy.ndimage.map_coordinates(self.gridder.data, np.vstack((xi,yi)))
coor_X_export,coor_Y_export = np.linspace(x[0], x[1], num), np.linspace(y[0], y[1], num)
#coor_X_export = np.sort(coor_X_export)
#coor_Y_export = np.sort(coor_Y_export)
return coor_X_export,coor_Y_export, profiles_data_X, profiles_data_Y
def boundary_rectangles(self, x, y):
"""
IN : x[0,1], y[0,1]: positions of the line cut (arbitrary direction)
OUT: ROI rectangle: the rectangle in which the data will be taken
Bound rectangle: the limit values for Qx, Qz line cuts (min, max)
"""
x = np.asarray(x)
y = np.asarray(y)
alpha = np.arctan(abs((y[1]-y[0])/(x[1]-x[0]))) # inclined angle of the ROI w.r.t the horizontal line. Attention to the sign of alpha
#print np.degrees(alpha)
T = self.largueur_int/2.
if np.degrees(alpha)>55.0:
inc_x = 1
inc_y = 0
else:
inc_x = 0
inc_y = 1
y1 = y + T*inc_y
y2 = y - T*inc_y
x1 = x + T*inc_x
x2 = x - T*inc_x
#These positions are in reciprocal space units. The boundary order will be: 1-2-2-1
roi_rect = [[y1[0],x1[0]],[y2[0],x2[0]],[y2[1],x2[1]],[y1[1],x1[1]],[y1[0],x1[0]]]
roi_rect = path.Path(roi_rect)
#***************** Get the corresponding index of these points ***************************
i1,j1 = xu.analysis.line_cuts.getindex(x1[0], y1[0], self.gridder.xaxis, self.gridder.yaxis)
i2,j2 = xu.analysis.line_cuts.getindex(x2[0], y2[0], self.gridder.xaxis, self.gridder.yaxis)
i3,j3 = xu.analysis.line_cuts.getindex(x2[1], y2[1], self.gridder.xaxis, self.gridder.yaxis)
i4,j4 = xu.analysis.line_cuts.getindex(x1[1], y1[1], self.gridder.xaxis, self.gridder.yaxis)
roi_box = [[j1,i1],[j2,i2],[j3,i3],[j4,i4],[j1,i1]]
roi_box = path.Path(roi_box)
#******* Calculate the limit boundary rectangle
y_tmp = np.vstack((y1, y2))
x_tmp = np.vstack((x1, x2))
y_min = y_tmp.min()
y_max = y_tmp.max()
x_min = x_tmp.min()
x_max = x_tmp.max()
bound_rect = [x_min, x_max, y_min, y_max]
bound_rect = np.asarray(bound_rect)
contours = roi_rect.vertices
p=self.ax.plot(contours[:,1], contours[:,0], linewidth=1.5, color='white')
self.polygons.append(p[0])
self.canvas.draw()
return roi_box, bound_rect
def extract_roi_data(self, roi_box, bound_rect):
#***** Extraction of the ROI defined by the ROI box ******************
qx_min = bound_rect[0]
qx_max = bound_rect[1]
qz_min = bound_rect[2]
qz_max = bound_rect[3]
#***** Getting index of the boundary points in order to calculate the length of the extracted array
ixmin, izmin = xu.analysis.line_cuts.getindex(qx_min, qz_min, self.gridder.xaxis, self.gridder.yaxis)
ixmax, izmax = xu.analysis.line_cuts.getindex(qx_max, qz_max, self.gridder.xaxis, self.gridder.yaxis)
x_steps = ixmax - ixmin +1
z_steps = izmax - izmin +1
qx_coor = np.linspace(qx_min, qx_max, x_steps)
qz_coor = np.linspace(qz_min, qz_max, z_steps)
ROI = np.zeros(shape=(x_steps))
#****** Extract Qx line cuts ************************
for zi in range(izmin, izmax+1):
qx_int = self.gridder.data[ixmin:ixmax+1,zi]
#****** if the point is inside the ROI box: point = 0
inpoints = []
for i in range(ixmin,ixmax+1):
inpoint= roi_box.contains_point([zi,i])
inpoints.append(inpoint)
for b in range(len(inpoints)):
if inpoints[b]==False:
qx_int[b] = 0
ROI = np.vstack((ROI, qx_int))
ROI = np.delete(ROI, 0, 0) #Delete the first line which contains zeros
#****** Sum them up! Return Qx, Qz projection zones and Qx,Qz intensity
qx_ROI = ROI.sum(axis=0)/ROI.shape[0]
qz_ROI = ROI.sum(axis=1)/ROI.shape[1]
return qx_coor, qx_ROI, qz_coor, qz_ROI
def plot_profiles(self, x, y, cross_line=True):
if cross_line:
"""Drawing lines where I want to plot profiles"""
# ******** if this is not an arbitrary profile, x and y are not lists but just one individual point
x=x[0]
y=y[0]
hline = self.ax.axhline(y, color='k', ls='--', lw=1)
self.lines.append(hline)
vline = self.ax.axvline(x, color='k', ls='--', lw=1)
self.lines.append(vline)
"""Getting data to be plotted"""
self.coor_X_export, self.profiles_data_X = xu.analysis.line_cuts.get_qx_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, y, qrange=self.largueur_int)
self.coor_Y_export, self.profiles_data_Y = xu.analysis.line_cuts.get_qz_scan(self.gridder.xaxis, self.gridder.yaxis, self.gridder.data, x, qrange=self.largueur_int)
xc = x
yc = y
""" Fitting information """
ix,iy = xu.analysis.line_cuts.getindex(x, y, self.gridder.xaxis, self.gridder.yaxis)
ix_left,iy = xu.analysis.line_cuts.getindex(x-self.fitting_width, y, self.gridder.xaxis, self.gridder.yaxis)
qx_2_fit = self.coor_X_export[ix_left:ix*2-ix_left+1]
qx_int_2_fit = self.profiles_data_X[ix_left:2*ix-ix_left+1]
X_fitted_params, X_fitted_data = fit(qx_2_fit, qx_int_2_fit,xc, cross_line)
####################axX.plot(qx_2_fit, qx_fit_data, color='red',linewidth=2)
ix,iy_down = xu.analysis.line_cuts.getindex(x, y-self.fitting_width, self.gridder.xaxis, self.gridder.yaxis)
qz_2_fit = self.coor_Y_export[iy_down:iy*2-iy_down+1]
qz_int_2_fit = self.profiles_data_Y[iy_down:iy*2-iy_down+1]
Y_fitted_params, Y_fitted_data = fit(qz_2_fit, qz_int_2_fit,yc, cross_line)
####################axY.plot(qz_2_fit, qz_fit_data, color='red',linewidth=2)
else:
#**** extract arbitrary line cut
#**** extract one single line cut:
if not self.rectangle_profiles_btn.get_active():
self.coor_X_export, self.coor_Y_export, self.profiles_data_X, self.profiles_data_Y = self.arbitrary_line_cut(x,y)
else:
roi_box,bound_rect = self.boundary_rectangles(x,y)
self.coor_X_export, self.profiles_data_X, self.coor_Y_export, self.profiles_data_Y = self.extract_roi_data(roi_box, bound_rect)
tmpX = np.sort(self.coor_X_export)
tmpY = np.sort(self.coor_Y_export)
xc = tmpX[self.profiles_data_X.argmax()]
yc = tmpY[self.profiles_data_Y.argmax()]
""" Fitting information """
X_fitted_params, X_fitted_data = fit(self.coor_X_export, self.profiles_data_X, xc, not cross_line)
Y_fitted_params, Y_fitted_data = fit(self.coor_Y_export, self.profiles_data_Y, yc, not cross_line)
qx_2_fit = self.coor_X_export
qz_2_fit = self.coor_Y_export
""" Plotting profiles """
self.profiles_ax1.cla()
self.profiles_ax2.cla()
self.profiles_ax1.format_coord = self.pro_format_coord
self.profiles_ax2.format_coord = self.pro_format_coord
#self.cursor_pro1 = Cursor(self.profiles_ax1, color='k', linewidth=1, useblit=True)
#self.cursor_pro2 = Cursor(self.profiles_ax2, color='k', linewidth=1, useblit=True)
self.profiles_ax1.plot(self.coor_Y_export, self.profiles_data_Y, color='blue', lw=3)
self.profiles_ax1.plot(qz_2_fit, Y_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax2.plot(self.coor_X_export, self.profiles_data_X, color='blue', lw=3)
self.profiles_ax2.plot(qx_2_fit, X_fitted_data, color='red', lw=1.5, alpha=0.8)
self.profiles_ax1.set_title("Qz profile", size=14)
self.profiles_ax2.set_title("Qx profile", size=14)
self.profiles_canvas.draw()
# Show the fitted results
self.Qz_fitted_y0.set_text("%.4f"%Y_fitted_params['y0'].value)
self.Qz_fitted_xc.set_text("%.4f"%Y_fitted_params['xc'].value)
self.Qz_fitted_A.set_text("%.4f"%Y_fitted_params['A'].value)
self.Qz_fitted_w.set_text("%.4f"%Y_fitted_params['w'].value)
self.Qz_fitted_mu.set_text("%.4f"%Y_fitted_params['mu'].value)
self.Qx_fitted_y0.set_text("%.4f"%X_fitted_params['y0'].value)
self.Qx_fitted_xc.set_text("%.4f"%X_fitted_params['xc'].value)
self.Qx_fitted_A.set_text("%.4f"%X_fitted_params['A'].value)
self.Qx_fitted_w.set_text("%.4f"%X_fitted_params['w'].value)
self.Qx_fitted_mu.set_text("%.4f"%X_fitted_params['mu'].value)
self.profiles_refresh()
self.canvas.draw()
def draw_pointed(self, x, y, finished=False):
#if len(self.lines)>0:
# self.clear_notes()
p=self.ax.plot(x,y,'ro')
self.points.append(p[0])
if finished:
l=self.ax.plot(self.arb_lines_X, self.arb_lines_Y, '--',linewidth=1.5, color='white')
self.lines.append(l[0])
self.canvas.draw()
def profiles_refresh(self):
""" """
if self.profiles_log_btn.get_active():
self.profiles_ax1.set_yscale('log')
self.profiles_ax2.set_yscale('log')
else:
self.profiles_ax1.set_yscale('linear')
self.profiles_ax2.set_yscale('linear')
self.profiles_canvas.draw()
#return
def profiles_update(self, widget):
self.profiles_refresh()
def profiles_export(self,widget):
""" Export X,Y profiles data in the same folder as the EDF image """
proX_fname = self.rsm.split(".")[0]+"_Qx_profile.dat"
proY_fname = self.rsm.split(".")[0]+"_Qz_profile.dat"
proX_export= np.vstack([self.coor_X_export, self.profiles_data_X])
proX_export=proX_export.T
proY_export= np.vstack([self.coor_Y_export, self.profiles_data_Y])
proY_export=proY_export.T
try:
np.savetxt(proX_fname, proX_export)
np.savetxt(proY_fname, proY_export)
self.popup_info('info','Data are successfully exported!')
except:
self.popup_info('error','ERROR! Data not exported!')
def on_press(self, event):
#******************** Plot X,Y cross profiles ***************************************************
if (event.inaxes == self.ax) and (event.button==3) and self.plotXYprofiles_btn.get_active():
x = event.xdata
y = event.ydata
xx=[]
yy=[]
xx.append(x)
yy.append(y)
self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
self.plot_profiles(xx,yy,cross_line=True)
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
#******************** Plot arbitrary profiles ***************************************************
elif (event.inaxes == self.ax) and (event.button==1) and (self.arbitrary_profiles_btn.get_active() or self.rectangle_profiles_btn.get_active()):
#self.clear_notes()
try:
self.largueur_int = float(self.int_range.get_text())
self.fitting_width = float(self.fitting_range.get_text())
except:
self.popup_info("error","Please check that you have entered all the parameters correctly !")
self.arb_line_points +=1
#print "Number of points clicked: ",self.arb_line_points
if self.arb_line_points>2:
self.clear_notes()
self.arb_line_points=1
x = event.xdata
y = event.ydata
self.arb_lines_X.append(x)
self.arb_lines_Y.append(y)
if len(self.arb_lines_X)<2:
finished=False
elif len(self.arb_lines_X)==2:
finished = True
self.draw_pointed(x,y,finished)#If finished clicking, connect the two points by a line
if finished:
self.plot_profiles(self.arb_lines_X, self.arb_lines_Y, cross_line=False)
self.arb_lines_X=[]
self.arb_lines_Y=[]
#self.canvas.draw()
#******************** Clear cross lines in the main image ****************************************
elif event.button==2:
self.clear_notes()
def profile_press(self, event):
""" Calculate thickness fringes """
if event.inaxes == self.profiles_ax1:
draw_fringes = True
ax = self.profiles_ax1
X_data = self.coor_Y_export
Y_data = self.profiles_data_Y
xlabel = r'$Q_z (nm^{-1})$'
title = "Linear regression of Qz fringes"
title_FFT = "Fast Fourier Transform of Qz profiles"
xlabel_FFT= "Period (nm)"
elif event.inaxes == self.profiles_ax2:
draw_fringes = True
ax = self.profiles_ax2
X_data = self.coor_X_export
Y_data = self.profiles_data_X
xlabel = r'$Q_x (nm^{-1})$'
title = "Linear regression of Qx fringes"
title_FFT = "Fast Fourier Transform of Qx profiles"
xlabel_FFT= "Period (nm)"
else:
draw_fringes = False
if draw_fringes and (event.button==1):
if len(self.profiles_fringes)>0:
self.profiles_fringes = np.asarray(self.profiles_fringes)
self.profiles_fringes = np.sort(self.profiles_fringes)
fringes_popup = PopUpFringes(self.profiles_fringes, xlabel, "Fringes order", title)
self.profiles_fringes=[]
self.clear_notes()
elif draw_fringes and (event.button == 3):
vline=ax.axvline(event.xdata, linewidth=2, color="green")
self.lines.append(vline)
self.profiles_fringes.append(event.xdata)
elif draw_fringes and event.button == 2:
XF,YF = Fourier(X_data, Y_data)
popup_window=PopUpImage(XF, YF, xlabel_FFT, "Normalized intensity", title_FFT)
self.profiles_canvas.draw()
#plt.clf()
def clear_notes(self):
"""
print "Number of notes: ",len(self.my_notes)
print "Number of lines: ",len(self.lines)
print "Number of points: ",len(self.points)
print "Number of polygons: ",len(self.polygons)
"""
if len(self.my_notes)>0:
for txt in self.my_notes:
txt.remove()
if len(self.lines)>0:
for line in self.lines:
line.remove()
if len(self.points)>0:
for p in self.points:
p.remove()
if len(self.polygons)>0:
for p in self.polygons:
p.remove()
self.canvas.draw()
self.my_notes = []
#self.profiles_notes = []
self.lines=[]
self.points=[]
self.polygons=[]
self.arb_lines_X=[]
self.arb_lines_Y=[]
self.arb_line_points = 0
def on_motion(self,event):
print "Mouse moved !"
if event.inaxes == self.ax and self.arbitrary_profiles_btn.get_active() and self.arb_line_points==1:
x = event.xdata
y = event.ydata
self.clear_notes()
line = self.ax.plot([self.arb_lines_X[0], x], [self.arb_lines_Y[0],y], 'ro-')
self.lines.append(line)
self.canvas.draw()
def on_release(self, event):
if event.inaxes == self.ax:
if self.mouse_moved==True:
self.mouse_moved = False
def popup_info(self,info_type,text):
""" info_type = WARNING, INFO, QUESTION, ERROR """
if info_type.upper() == "WARNING":
mess_type = gtk.MESSAGE_WARNING
elif info_type.upper() == "INFO":
mess_type = gtk.MESSAGE_INFO
elif info_type.upper() == "ERROR":
mess_type = gtk.MESSAGE_ERROR
elif info_type.upper() == "QUESTION":
mess_type = gtk.MESSAGE_QUESTION
self.warning=gtk.MessageDialog(self, gtk.DIALOG_DESTROY_WITH_PARENT, mess_type, gtk.BUTTONS_CLOSE,text)
self.warning.run()
self.warning.destroy()
#********************************************************************
# Functions for the Spec-HDF5 data conversion
#********************************************************************
def select_file(self,widget,path,label):
dialog = gtk.FileChooserDialog("Select file",None,gtk.FILE_CHOOSER_ACTION_OPEN,(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response = dialog.run()
if response == gtk.RESPONSE_OK:
file_choosen = dialog.get_filename()
path.set_text(file_choosen)
self.DATA_current_folder = os.path.dirname(file_choosen)
if label == "A":
self.attenuation_file = file_choosen.decode('utf8')
elif label == "S":
self.spec_file = file_choosen.decode('utf8')
elif label == "M":
self.mca_file = file_choosen.decode('utf8')
else:
pass
dialog.destroy()
def select_folder(self, widget, path, label):
dialog = gtk.FileChooserDialog(title="Select folder",action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK))
dialog.set_current_folder(self.DATA_current_folder)
response=dialog.run()
if response==gtk.RESPONSE_OK:
folder=dialog.get_filename()
path.set_text(folder)
self.DATA_current_folder = folder.decode('utf8')
if label == "D":
self.des_folder = folder.decode('utf8')
else:
pass
dialog.destroy()
def HKL2Q(self,H,K,L,a):
""" Q// est dans la direction [110], Qz // [001]"""
Qx = H*np.sqrt(2.)/a
Qy = K*np.sqrt(2.)/a
Qz = L/a
return [Qx, Qy, Qz]
def loadAmap(self,scanid,specfile,mapData,retard):
try:
psdSize = float(self.t1_entry.get_text())
Nchannels = int(self.t2_entry.get_text())
psdMin = int(self.t5_entry1.get_text())
psdMax = int(self.t5_entry2.get_text())
psd0 = float(self.t3_entry.get_text())
pixelSize = psdSize/Nchannels
pixelPerDeg = float(self.t4_entry.get_text())
distance = pixelSize * pixelPerDeg / np.tan(np.radians(1.0)) # sample-detector distance in mm
psdor = self.t6_entry.get_active() #psd orientation (up, down, in, out)
if psdor == 0:
psdor = 'z+'
elif psdor == 1:
psdor = 'z-'
else:
psdor = 'unknown'
energy = float(self.e2_entry.get_text())
filter_data = self.attenuation_file
monitor_col = self.e5_entry.get_text()
foil_col = self.e4_entry.get_text()
monitor_ref = float(self.e6_entry.get_text())
#****************** Calculation ************************
headers, scan_kappa = SP.ReadSpec(specfile,scanid)
Eta = scan_kappa['Eta']
print Eta.shape
tth = headers['P'][0]
omega = headers['P'][1]
tth = float(tth)
omega = float(omega)
print "Del: %.2f, Eta: %.2f"%(tth,omega)
#Si = xu.materials.Si
hxrd = xu.HXRD(self.substrate.Q(self.in_plane), self.substrate.Q(self.out_of_plane), en = energy)
hxrd.Ang2Q.init_linear(psdor,psd0, Nchannels, distance=distance, pixelwidth=pixelSize, chpdeg=pixelPerDeg)
HKL = hxrd.Ang2HKL(omega, tth)
HKL = np.asarray(HKL)
HKL = HKL.astype(int)
print "HKL = ",HKL
H=K=L=np.zeros(shape=(0,Nchannels))
for i in range(len(Eta)):
om=Eta[i]
q=hxrd.Ang2HKL(om,tth,mat=self.substrate,dettype='linear')
H = np.vstack((H,q[0]))
K = np.vstack((K,q[1]))
L = np.vstack((L,q[2]))
filtre_foil = scan_kappa[foil_col]
filtre = filtre_foil.copy()
monitor= scan_kappa[monitor_col]
foil_data = np.loadtxt(filter_data)
for f in xrange(foil_data.shape[0]):
coef = filtre_foil == f
filtre[coef] = foil_data[f,1]
#print filtre
mapData = mapData + 1e-6
if retard:
for i in range(len(filtre)-1):
mapData[i+1] = mapData[i+1]*filtre[i]
else:
for i in range(len(filtre)):
mapData[i] = mapData[i]*filtre[i]
for i in range(len(monitor)):
mapData[i] = mapData[i]*monitor_ref/monitor[i]
mapData = mapData[:,psdMin:psdMax]
H = H[:,psdMin:psdMax]
K = K[:,psdMin:psdMax]
L = L[:,psdMin:psdMax]
########## Correction d'offset ###############
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = HKL[0] - H_sub
K_offset = HKL[1] - K_sub
L_offset = HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
Q = self.HKL2Q(H, K, L, a)
return Q,mapData
except:
self.popup_info("warning", "Please make sure that you have correctly entered the all parameters.")
return None,None
def gtk_waiting(self):
while gtk.events_pending():
gtk.main_iteration()
def Change_Lab_Instrument(self, widget):
self.choosen_instrument = self.Instrument.get_active_text()
print "I choose ",self.choosen_instrument
if self.choosen_instrument == "Bruker":
self.XRDML_xrdml_file_txt.set_text("Select RAW file: ")
self.XRDML_xrdml_file_browse.set_label("Browse RAW file")
elif self.choosen_instrument == "PANalytical":
self.XRDML_xrdml_file_txt.set_text("Select XRDML file: ")
self.XRDML_xrdml_file_browse.set_label("Browse XRDML file")
def Convert_Lab_Source(self, widget):
print "Instrument chosen: ",self.choosen_instrument
energy = self.XRDML_energy.get_text()
if energy == "":
energy = 8048
else:
energy = float(energy)
self.lam = xu.lam2en(energy)/10
HKL = self.XRDML_reflection.get_text()
if HKL == "":
self.offset_correction = False
else:
self.offset_correction = True
HKL = HKL.split()
HKL = np.asarray([int(i) for i in HKL])
self.HKL = HKL
substrate = self.XRDML_substrate.get_active_text()
if substrate == "-- other":
substrate = self.XRDML_substrate_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
in_plane = self.XRDML_substrate_inplane.get_text()
out_of_plane = self.XRDML_substrate_outplane.get_text()
if in_plane != "" and out_of_plane != "":
in_plane = in_plane.split()
self.in_plane = np.asarray([int(i) for i in in_plane])
out_of_plane = out_of_plane.split()
self.out_of_plane = np.asarray([int(i) for i in out_of_plane])
self.has_orientation_matrix = True
self.experiment = xu.HXRD(self.substrate.Q(self.in_plane),self.substrate.Q(self.out_of_plane), en=energy)
else:
self.has_orientation_matrix = False
self.experiment = xu.HXRD(self.substrate.Q(1,1,0),self.substrate.Q(0,0,1), en=energy)
if self.choosen_instrument == "Bruker":
self.Bruker2HDF()
elif self.choosen_instrument == "PANalytical":
self.XRDML2HDF()
def XRDML2HDF(self):
try:
xrdml_file = self.spec_file
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading XRDML data ...")
self.gtk_waiting()
dataFile = xu.io.XRDMLFile(xrdml_file)
scan = dataFile.scan
omega_exp = scan['Omega']
tth_exp = scan['2Theta']
data = scan['detector']
if self.has_orientation_matrix:
omega,tth,psd = xu.io.getxrdml_map(xrdml_file)
[qx,qy,qz] = self.experiment.Ang2Q(omega, tth)
mapData = psd.reshape(data.shape)
H = qy.reshape(data.shape)
K = qy.reshape(data.shape)
L = qz.reshape(data.shape)
else:
mapData = data
psi = omega_exp - tth_exp/2.
Qmod= 2.*np.sin(np.radians(tth_exp/2.))/self.lam
Qx = Qmod * np.sin(np.radians(psi))
Qz = Qmod * np.cos(np.radians(psi))
H=K = Qx*a/np.sqrt(2.0)
L = Qz*a
########## Correction d'offset ###############
if self.offset_correction:
x,y=np.unravel_index(np.argmax(mapData),mapData.shape)
H_sub = H[x,y]
K_sub = K[x,y]
L_sub = L[x,y]
H_offset = self.HKL[0] - H_sub
K_offset = self.HKL[1] - K_sub
L_offset = self.HKL[2] - L_sub
H = H + H_offset
K = K + K_offset
L = L + L_offset
Q = self.HKL2Q(H, K, L, a)
self.XRDML_show_info.set_text("XRDML data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "XRDML_Map"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=mapData, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Bruker2HDF(self):
try:
raw_file = self.spec_file
from MCA_GUI.Bruker import convert_raw_to_uxd,get_Bruker
uxd_file = raw_file.split(".")[0]+".uxd"
convert_raw_to_uxd(raw_file, uxd_file)
description = self.XRDML_description.get_text()
self.XRDML_show_info.set_text("Reading Raw data ...")
self.gtk_waiting()
a = self.substrate._geta1()[0] #in Angstrom
a = a/10.
dataset = get_Bruker(uxd_file)
theta = dataset['omega']
dTheta = dataset['tth']
Qhkl = self.experiment.Ang2HKL(theta, dTheta)
Qx,Qy,Qz = Qhkl[0],Qhkl[1],Qhkl[2]
########## Correction d'offset ###############
if self.offset_correction:
x,y=np.unravel_index(np.argmax(dataset['data']),dataset['data'].shape)
Hsub = Qhkl[0][x,y]
Ksub = Qhkl[1][x,y]
Lsub = Qhkl[2][x,y]
Qx = Qhkl[0]+self.HKL[0]-Hsub
Qy = Qhkl[1]+self.HKL[1]-Ksub
Qz = Qhkl[2]+self.HKL[2]-Lsub
Q = self.HKL2Q(Qx, Qy, Qz, a)
self.XRDML_show_info.set_text("Raw data are successfully loaded.")
self.gtk_waiting()
if description == "":
no_description = True
description = "RSM"
else:
no_description = False
h5file = description+".h5"
info = "\nSaving file: %s"%(h5file)
self.XRDML_show_info.set_text(info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
s = h5file.create_group(description)
s.create_dataset('intensity', data=dataset['data'], compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=description)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def spec2HDF(self,widget):
try:
specfile = self.spec_file
mcafile = self.mca_file
scan_beg = int(self.c4_entry1.get_text())
scan_end = int(self.c4_entry2.get_text())
substrate = self.e1_entry.get_active_text()
if substrate == "-- other":
substrate = self.e1_entry_other.get_text()
command = "self.substrate = xu.materials."+substrate
exec(command)
scanid = range(scan_beg, scan_end+1)
self.show_info.set_text("Reading MCA data ...")
self.gtk_waiting()
allMaps = SP.ReadMCA2D_complete(mcafile)
description = self.c5_entry1.get_text()
retard = self.c6_entry.get_active()
total = len(allMaps)
total_maps_loaded = "Number of map(s) loaded: %d"%total
self.show_info.set_text(total_maps_loaded)
self.gtk_waiting()
if description == "":
no_description = True
else:
description = description.split(",")
no_description = False
for i in range(len(allMaps)):
scannumber = scanid[i]
scan_name = "Scan_%d"%scannumber
if no_description:
h5file = scan_name+".h5"
d = scan_name
else:
h5file = description[i].strip()+".h5"
d = description[i].strip()
info = "\nSaving file N# %d/%d: %s"%(i+1,total,h5file)
out_info = total_maps_loaded + info
self.show_info.set_text(out_info)
self.gtk_waiting()
h5file = join(self.des_folder,h5file)
if os.path.isfile(h5file):
del_file = "rm -f %s"%h5file
os.system(del_file)
h5file = h5.File(h5file,"w")
Q,mapdata = self.loadAmap(scannumber, specfile, allMaps[i], retard)
s = h5file.create_group(scan_name)
s.create_dataset('intensity', data=mapdata, compression='gzip', compression_opts=9)
s.create_dataset('Qx', data=Q[0], compression='gzip', compression_opts=9)
s.create_dataset('Qy', data=Q[1], compression='gzip', compression_opts=9)
s.create_dataset('Qz', data=Q[2], compression='gzip', compression_opts=9)
s.create_dataset('description', data=d)
h5file.close()
self.popup_info("info","Data conversion completed!")
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
self.popup_info("warning", "ERROR: %s"%str(exc_value))
def Export_HQ_Image(self, widget):
dialog = gtk.FileChooserDialog(title="Save image", action=gtk.FILE_CHOOSER_ACTION_SAVE, buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK))
filename = self.rsm_choosen.split(".")[0] if self.rsm_choosen != "" else "Img"
dialog.set_current_name(filename+".png")
#dialog.set_filename(filename)
dialog.set_current_folder(self.GUI_current_folder)
filtre = gtk.FileFilter()
filtre.set_name("images")
filtre.add_pattern("*.png")
filtre.add_pattern("*.jpg")
filtre.add_pattern("*.pdf")
filtre.add_pattern("*.ps")
filtre.add_pattern("*.eps")
dialog.add_filter(filtre)
filtre = gtk.FileFilter()
filtre.set_name("Other")
filtre.add_pattern("*")
dialog.add_filter(filtre)
response = dialog.run()
if response==gtk.RESPONSE_OK:
#self.fig.savefig(dialog.get_filename())
xlabel = r'$Q_x (nm^{-1})$'
ylabel = r'$Q_z (nm^{-1})$'
fig = plt.figure(figsize=(10,8),dpi=100)
ax = fig.add_axes([0.12,0.2,0.7,0.7])
cax = fig.add_axes([0.85,0.2,0.03,0.7])
clabel = r'$Intensity\ (Counts\ per\ second)$'
fmt = "%d"
if self.linear_scale_btn.get_active():
clabel = r'$Log_{10}\ (Intensity)\ [arb.\ units]$'
fmt = "%.2f"
data = self.gridder.data.T
data = flat_data(data, self.vmin, self.vmax, self.linear_scale_btn.get_active())
img = ax.contourf(self.gridder.xaxis, self.gridder.yaxis, data, 100, vmin=self.vmin*1.1, vmax=self.vmax)
cb = fig.colorbar(img,cax=cax, format=fmt)
cb.set_label(clabel, fontsize=20)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.yaxis.label.set_size(20)
ax.xaxis.label.set_size(20)
ax.set_title(self.rsm_description,fontsize=20)
fig.savefig(dialog.get_filename())
plt.close()
dialog.destroy()
if __name__=="__main__":
MyMainWindow()
gtk.main()
| gpl-2.0 |
stylianos-kampakis/scikit-learn | sklearn/utils/testing.py | 71 | 26178 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| bsd-3-clause |
Oliver-Lab/snakemakelib-oliver | snakemakelib_oliver/workflows/qc/app/graphics.py | 2 | 1113 | # Copyright (C) 2015 by Per Unneberg
import pandas as pd
from math import log10
from snakemakelib.application import SampleApplication, PlatformUnitApplication
from snakemakelib.io import IOTarget, IOAggregateTarget
from snakemakelib.graphics import scatter, points, tooltips, facet_grid, colorbrewer, mlines, lines
from blaze import Data, append, odo, DataFrame
from snakemakelib_oliver.odo import fastqc
from snakemakelib.odo import cutadapt
from bokeh.charts import Scatter
from bokeh.plotting import figure, gridplot
__all__ = ['qc_cutadapt_plot_metrics',]
DEFAULT_TOOLS = "pan,wheel_zoom,box_zoom,box_select,reset,save,hover,resize"
# Plotting functions
def qc_cutadapt_plot_metrics(df, **kwargs):
df.set_index(['SM', 'PU', 'PlatformUnit', 'statistic'], inplace=True)
df.sortlevel(inplace=True)
df = df.loc[pd.IndexSlice[:, :, :, "Reads percent"], :].reset_index()
from bokeh.charts import Scatter
p = Scatter(df, x="PlatformUnit", y="value",
color="statistic", legend="top_right",
title="Cutadapt metrics", ylabel="% reads with adapter")
return p
| mit |
nguyentu1602/numpy | numpy/core/tests/test_multiarray.py | 5 | 221306 | from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
assert_array_less, runstring, dec
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and suboffsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed bytewise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x, offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_uint64(self):
x = np.empty((3, 2, 1), dtype=np.uint64)
y = np.empty((3, 2, 1), dtype=np.uint64)
value = 2**64 - 1
y[...] = value
x.fill(value)
assert_array_equal(x, y)
def test_fill_struct_array(self):
# Filling from a scalar
x = np.array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
x.fill(x[0])
assert_equal(x['f1'][1], x['f1'][0])
# Filling from a tuple that can be converted
# to a scalar
x = np.zeros(2, dtype=[('a', 'f8'), ('b', 'i4')])
x.fill((3.5, -2))
assert_array_equal(x['a'], [3.5, 3.5])
assert_array_equal(x['b'], [-2, -2])
class TestArrayConstruction(TestCase):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
assert_equal(r, np.ones((2, 6)))
d = np.ones(6)
tgt = np.ones((2, 6))
r = np.array([d, d])
assert_equal(r, tgt)
tgt[1] = 2
r = np.array([d, d + 1])
assert_equal(r, tgt)
d = np.ones(6)
r = np.array([[d, d]])
assert_equal(r, np.ones((1, 2, 6)))
d = np.ones(6)
r = np.array([[d, d], [d, d]])
assert_equal(r, np.ones((2, 2, 6)))
d = np.ones((6, 6))
r = np.array([d, d])
assert_equal(r, np.ones((2, 6, 6)))
d = np.ones((6, ))
r = np.array([[d, d + 1], d + 2])
assert_equal(len(r), 2)
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
tgt = np.ones((2, 3), dtype=np.bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
assert_equal(r, tgt)
r = np.array([[True, False], [True, False], [False, True]])
assert_equal(r, tgt.T)
def test_array_empty(self):
assert_raises(TypeError, np.array)
def test_array_copy_false(self):
d = np.array([1, 2, 3])
e = np.array(d, copy=False)
d[1] = 3
assert_array_equal(e, [1, 3, 3])
e = np.array(d, copy=False, order='F')
d[1] = 4
assert_array_equal(e, [1, 4, 3])
e[2] = 7
assert_array_equal(d, [1, 4, 7])
def test_array_copy_true(self):
d = np.array([[1,2,3], [1, 2, 3]])
e = np.array(d, copy=True)
d[0, 1] = 3
e[0, 2] = -7
assert_array_equal(e, [[1, 2, -7], [1, 2, 3]])
assert_array_equal(d, [[1, 3, 3], [1, 2, 3]])
e = np.array(d, copy=True, order='F')
d[0, 1] = 5
e[0, 2] = 7
assert_array_equal(e, [[1, 3, 7], [1, 2, 3]])
assert_array_equal(d, [[1, 5, 3], [1,2,3]])
def test_array_cont(self):
d = np.ones(10)[::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.ascontiguousarray(d).flags.f_contiguous)
assert_(np.asfortranarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
d = np.ones((10, 10))[::2,::2]
assert_(np.ascontiguousarray(d).flags.c_contiguous)
assert_(np.asfortranarray(d).flags.f_contiguous)
class TestAssignment(TestCase):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
# Broadcasting the input to the output
a[...] = np.arange(3)
assert_equal(a, [[0, 1, 2], [0, 1, 2]])
a[...] = np.arange(2).reshape(2, 1)
assert_equal(a, [[0, 0, 0], [1, 1, 1]])
# For compatibility with <= 1.5, a limited version of broadcasting
# the output to the input.
#
# This behavior is inconsistent with NumPy broadcasting
# in general, because it only uses one of the two broadcasting
# rules (adding a new "1" dimension to the left of the shape),
# applied to the output instead of an input. In NumPy 2.0, this kind
# of broadcasting assignment will likely be disallowed.
a[...] = np.arange(6)[::-1].reshape(1, 2, 3)
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
# The other type of broadcasting would require a reduction operation.
def assign(a, b):
a[...] = b
assert_raises(ValueError, assign, a, np.arange(12).reshape(2, 2, 3))
def test_assignment_errors(self):
# Address issue #2276
class C:
pass
a = np.zeros(1)
def assign(v):
a[0] = v
assert_raises((AttributeError, TypeError), assign, C())
assert_raises(ValueError, assign, [1])
class TestDtypedescr(TestCase):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
d2 = np.dtype('f8')
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
class TestZeroRank(TestCase):
def setUp(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
self.assertEqual(a[...], 0)
self.assertEqual(b[...], 'x')
self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
self.assertEqual(a[()], 0)
self.assertEqual(b[()], 'x')
self.assertTrue(type(a[()]) is a.dtype.type)
self.assertTrue(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[0], b)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
self.assertEqual(a, 42)
b[...] = ''
self.assertEqual(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
self.assertEqual(a, 42)
b[()] = ''
self.assertEqual(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(IndexError, assign, a, 0, 42)
self.assertRaises(IndexError, assign, b, 0, '')
self.assertRaises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
self.assertEqual(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
self.assertEqual(x[()], 6)
def test_output(self):
x = np.array(2)
self.assertRaises(ValueError, np.add, x, [1], x)
class TestScalarIndexing(TestCase):
def setUp(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
self.assertEqual(a[...], 0)
self.assertEqual(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
self.assertEqual(a[()], 0)
self.assertEqual(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
self.assertRaises(IndexError, lambda x: x[0], a)
self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
def assign(x, i, v):
x[i] = v
self.assertRaises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
self.assertEqual(a[np.newaxis].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ...].shape, (1,))
self.assertEqual(a[..., np.newaxis].shape, (1,))
self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
def subscript(x, i):
x[i]
self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
a = np.arange(4)
a[:-1] = a[1:]
assert_equal(a, [1, 2, 3, 3])
a = np.arange(4)
a[1:] = a[:-1]
assert_equal(a, [0, 0, 1, 2])
# With positive and negative strides
a = np.arange(4)
a[:] = a[::-1]
assert_equal(a, [3, 2, 1, 0])
a = np.arange(6).reshape(2, 3)
a[::-1,:] = a[:, ::-1]
assert_equal(a, [[5, 4, 3], [2, 1, 0]])
a = np.arange(6).reshape(2, 3)
a[::-1, ::-1] = a[:, ::-1]
assert_equal(a, [[3, 4, 5], [0, 1, 2]])
# With just one element overlapping
a = np.arange(5)
a[:3] = a[2:]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[2:] = a[:3]
assert_equal(a, [0, 1, 0, 1, 2])
a = np.arange(5)
a[2::-1] = a[2:]
assert_equal(a, [4, 3, 2, 3, 4])
a = np.arange(5)
a[2:] = a[2::-1]
assert_equal(a, [0, 1, 2, 1, 0])
a = np.arange(5)
a[2::-1] = a[:1:-1]
assert_equal(a, [2, 3, 4, 3, 4])
a = np.arange(5)
a[:1:-1] = a[2::-1]
assert_equal(a, [0, 1, 0, 1, 2])
class TestCreation(TestCase):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
self.assertRaises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
nstr = ['123', '123']
result = np.array([123, 123], dtype=int)
for type in types:
msg = 'String conversion for %s' % type
assert_equal(np.array(nstr, dtype=type), result, err_msg=msg)
def test_void(self):
arr = np.array([], dtype='V')
assert_equal(arr.dtype.kind, 'V')
def test_zeros(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((13,), dtype=dt)
assert_equal(np.count_nonzero(d), 0)
# true for ieee floats
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='4i4')
assert_equal(np.count_nonzero(d), 0)
assert_equal(d.sum(), 0)
assert_(not d.any())
d = np.zeros(2, dtype='(2,4)i4, (2,4)i4')
assert_equal(np.count_nonzero(d), 0)
@dec.slow
def test_zeros_big(self):
# test big array as they might be allocated different by the sytem
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
for dt in types:
d = np.zeros((30 * 1024**2,), dtype=dt)
assert_(not d.any())
def test_zeros_obj(self):
# test initialization from PyLong(0)
d = np.zeros((13,), dtype=object)
assert_array_equal(d, [0] * 13)
assert_equal(np.count_nonzero(d), 0)
def test_zeros_obj_obj(self):
d = np.zeros(10, dtype=[('k', object, 2)])
assert_array_equal(d['k'], 0)
def test_zeros_like_like_zeros(self):
# test zeros_like returns the same as zeros
for c in np.typecodes['All']:
if c == 'V':
continue
d = np.zeros((3,3), dtype=c)
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
# explicitly check some special cases
d = np.zeros((3,3), dtype='S5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='U5')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>i4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='<M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='>M8[s]')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
d = np.zeros((3,3), dtype='f4,f4')
assert_array_equal(np.zeros_like(d), d)
assert_equal(np.zeros_like(d).dtype, d.dtype)
def test_empty_unicode(self):
# don't throw decode errors on garbage memory
for i in range(5, 100, 5):
d = np.empty(i, dtype='U')
str(d)
def test_sequence_non_homogenous(self):
assert_equal(np.array([4, 2**80]).dtype, np.object)
assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80, 4]).dtype, np.object)
assert_equal(np.array([2**80] * 3).dtype, np.object)
assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
assert_equal(np.array([long(4), 2**80]).dtype, np.object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
assert_equal(np.array([2**80, long(4)]).dtype, np.object)
def test_non_sequence_sequence(self):
"""Should not segfault.
Class Fail breaks the sequence protocol for new style classes, i.e.,
those derived from object. Class Map is a mapping type indicated by
raising a ValueError. At some point we may raise a warning instead
of an error in the Fail case.
"""
class Fail(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
class Map(object):
def __len__(self):
return 1
def __getitem__(self, index):
raise KeyError()
a = np.array([Map()])
assert_(a.shape == (1,))
assert_(a.dtype == np.dtype(object))
assert_raises(ValueError, np.array, [Fail()])
def test_no_len_object_type(self):
# gh-5100, want object array from iterable object without len()
class Point2:
def __init__(self):
pass
def __getitem__(self, ind):
if ind in [0, 1]:
return ind
else:
raise IndexError()
d = np.array([Point2(), Point2(), Point2()])
assert_equal(d.dtype, np.dtype(object))
class TestStructured(TestCase):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
# Since the subarray is always in C-order, a transpose
# does not swap the subarray:
assert_array_equal(a.T['a'], a['a'].transpose(1, 0, 2, 3))
# In Fortran order, the subarray gets appended
# like in all other cases, not prepended as a special case
b = a.copy(order='F')
assert_equal(a['a'].shape, b['a'].shape)
assert_equal(a.T['a'].shape, a.T.copy()['a'].shape)
def test_subarray_comparison(self):
# Check that comparisons between record arrays with
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
b[1].b = 'c'
assert_equal(a == b, [True, False])
assert_equal(a != b, [False, True])
for i in range(3):
b[0].a = a[0].a
b[0].a[i] = 5
assert_equal(a == b, [False, False])
assert_equal(a != b, [True, True])
for i in range(2):
for j in range(2):
b = a.copy()
b[0].c[i, j] = 10
assert_equal(a == b, [False, True])
assert_equal(a != b, [True, False])
# Check that broadcasting with a subarray works
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8')])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8')])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[(0,)], [(1,)]], dtype=[('a', 'f8', (1,))])
b = np.array([(0,), (0,), (1,)], dtype=[('a', 'f8', (1,))])
assert_equal(a == b, [[True, True, False], [False, False, True]])
assert_equal(b == a, [[True, True, False], [False, False, True]])
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))])
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that broadcasting Fortran-style arrays with a subarray work
a = np.array([[([0, 0],)], [([1, 1],)]], dtype=[('a', 'f8', (2,))], order='F')
b = np.array([([0, 0],), ([0, 1],), ([1, 1],)], dtype=[('a', 'f8', (2,))])
assert_equal(a == b, [[True, False, False], [False, False, True]])
assert_equal(b == a, [[True, False, False], [False, False, True]])
# Check that incompatible sub-array shapes don't result to broadcasting
x = np.zeros((1,), dtype=[('a', ('f4', (1, 2))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
x = np.zeros((1,), dtype=[('a', ('f4', (2, 1))), ('b', 'i1')])
y = np.zeros((1,), dtype=[('a', ('f4', (2,))), ('b', 'i1')])
# This comparison invokes deprecated behaviour, and will probably
# start raising an error eventually. What we really care about in this
# test is just that it doesn't return True.
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
assert_equal(x == y, False)
# Check that structured arrays that are different only in
# byte-order work
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i8'), ('b', '<f8')])
b = np.array([(5, 43), (10, 1)], dtype=[('a', '<i8'), ('b', '>f8')])
assert_equal(a == b, [False, True])
def test_casting(self):
# Check that casting a structured array to change its byte order
# works
a = np.array([(1,)], dtype=[('a', '<i4')])
assert_(np.can_cast(a.dtype, [('a', '>i4')], casting='unsafe'))
b = a.astype([('a', '>i4')])
assert_equal(b, a.byteswap().newbyteorder())
assert_equal(a['a'][0], b['a'][0])
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
# Check that 'equiv' casting can reorder fields and change byte
# order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
# fields
t = [('a', '<i8'), ('b', '>f8')]
assert_(np.can_cast(a.dtype, t, casting='safe'))
c = a.astype(t, casting='safe')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that 'same_kind' casting can change byte order and
# change field widths within a "kind"
t = [('a', '<i4'), ('b', '>f4')]
assert_(np.can_cast(a.dtype, t, casting='same_kind'))
c = a.astype(t, casting='same_kind')
assert_equal((c == np.array([(5, 42), (10, 1)], dtype=t)),
[True, True])
# Check that casting fails if the casting rule should fail on
# any of the fields
t = [('a', '>i8'), ('b', '<f4')]
assert_(not np.can_cast(a.dtype, t, casting='safe'))
assert_raises(TypeError, a.astype, t, casting='safe')
t = [('a', '>i2'), ('b', '<f8')]
assert_(not np.can_cast(a.dtype, t, casting='equiv'))
assert_raises(TypeError, a.astype, t, casting='equiv')
t = [('a', '>i8'), ('b', '<i2')]
assert_(not np.can_cast(a.dtype, t, casting='same_kind'))
assert_raises(TypeError, a.astype, t, casting='same_kind')
assert_(not np.can_cast(a.dtype, b.dtype, casting='no'))
assert_raises(TypeError, a.astype, b.dtype, casting='no')
# Check that non-'unsafe' casting can't change the set of field names
for casting in ['no', 'safe', 'equiv', 'same_kind']:
t = [('a', '>i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
t = [('a', '>i4'), ('b', '<f8'), ('c', 'i4')]
assert_(not np.can_cast(a.dtype, t, casting=casting))
def test_objview(self):
# https://github.com/numpy/numpy/issues/3286
a = np.array([], dtype=[('a', 'f'), ('b', 'f'), ('c', 'O')])
a[['a', 'b']] # TypeError?
# https://github.com/numpy/numpy/issues/3253
dat2 = np.zeros(3, [('A', 'i'), ('B', '|O')])
dat2[['B', 'A']] # TypeError?
def test_setfield(self):
# https://github.com/numpy/numpy/issues/3126
struct_dt = np.dtype([('elem', 'i4', 5),])
dt = np.dtype([('field', 'i4', 10),('struct', struct_dt)])
x = np.zeros(1, dt)
x[0]['field'] = np.ones(10, dtype='i4')
x[0]['struct'] = np.ones(1, dtype=struct_dt)
assert_equal(x[0]['field'], np.ones(10, dtype='i4'))
def test_setfield_object(self):
# make sure object field assignment with ndarray value
# on void scalar mimics setitem behavior
b = np.zeros(1, dtype=[('x', 'O')])
# next line should work identically to b['x'][0] = np.arange(3)
b[0]['x'] = np.arange(3)
assert_equal(b[0]['x'], np.arange(3))
#check that broadcasting check still works
c = np.zeros(1, dtype=[('x', 'O', 5)])
def testassign():
c[0]['x'] = np.arange(3)
assert_raises(ValueError, testassign)
class TestBool(TestCase):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
self.assertTrue(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
self.assertTrue(a1 is b1)
self.assertTrue(np.array([True])[0] is a1)
self.assertTrue(np.array(True)[()] is a1)
def test_sum(self):
d = np.ones(101, dtype=np.bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
d = np.frombuffer(b'\xff\xff' * 100, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
def check_count_nonzero(self, power, length):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
a = np.array(l, dtype=np.bool)
c = builtins.sum(l)
self.assertEqual(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
self.assertEqual(np.count_nonzero(a), c)
av *= 4
self.assertEqual(np.count_nonzero(a), c)
av[av != 0] = 0xFF
self.assertEqual(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
# covers most cases of the 16 byte unrolled code
self.check_count_nonzero(12, 17)
@dec.slow
def test_count_nonzero_all(self):
# check all combinations in a length 17 array
# covers all cases of the 16 byte unrolled code
self.check_count_nonzero(17, 17)
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
a = np.zeros((18,), dtype=np.bool)[o+1:]
a[:o] = True
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
a = np.ones((18,), dtype=np.bool)[o+1:]
a[:o] = False
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
def test_round(self):
def check_round(arr, expected, *round_args):
assert_equal(arr.round(*round_args), expected)
# With output array
out = np.zeros_like(arr)
res = arr.round(*round_args, out=out)
assert_equal(out, expected)
assert_equal(out, res)
check_round(np.array([1.2, 1.5]), [1, 2])
check_round(np.array(1.5), 2)
check_round(np.array([12.2, 15.5]), [10, 20], -1)
check_round(np.array([12.15, 15.51]), [12.2, 15.5], 1)
# Complex rounding
check_round(np.array([4.5 + 1.5j]), [4 + 2j])
check_round(np.array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
self.assertRaises(ValueError, lambda: a.transpose(0))
self.assertRaises(ValueError, lambda: a.transpose(0, 0))
self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
# necessary to check the lessthan comparison, so sorts that
# only follow the insertion sort path are sufficient. We only
# test doubles and complex doubles as the logic is the same.
# check doubles
msg = "Test real sort order with nans"
a = np.array([np.nan, 1, 0])
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# check complex
msg = "Test complex sort order with nans"
a = np.zeros(9, dtype=np.complex128)
a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
b = np.sort(a)
assert_equal(b, a[::-1], msg)
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
c = bi.copy()
c.sort(kind=kind)
assert_equal(c, ai, msg)
# test sorting of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
c = arr.copy()
c.sort()
msg = 'byte-swapped complex sort, dtype={0}'.format(dt)
assert_equal(c, arr, msg)
# test string sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test unicode sorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test object array sorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test record array sorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "object sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test datetime64 sorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# test timedelta64 sorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
c = b.copy()
c.sort(kind=kind)
assert_equal(c, a, msg)
# check axis handling. This should be the same for all type
# specific sorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 0], [3, 2]])
c = np.array([[2, 3], [0, 1]])
d = a.copy()
d.sort(axis=0)
assert_equal(d, b, "test sort with axis=0")
d = a.copy()
d.sort(axis=1)
assert_equal(d, c, "test sort with axis=1")
d = a.copy()
d.sort()
assert_equal(d, c, "test sort with default axis")
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array sort with axis={0}'.format(axis)
assert_equal(np.sort(a, axis=axis), a, msg)
msg = 'test empty array sort with axis=None'
assert_equal(np.sort(a, axis=None), a.ravel(), msg)
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
assert_(arr.flags.f_contiguous)
assert_(not arr.flags.c_contiguous)
def assert_c(arr):
assert_(not arr.flags.fortran)
assert_(not arr.flags.f_contiguous)
assert_(arr.flags.c_contiguous)
a = np.empty((2, 2), order='F')
# Test copying a Fortran array
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_fortran(a.copy('A'))
# Now test starting with a C array.
a = np.empty((2, 2), order='C')
assert_c(a.copy())
assert_c(a.copy('C'))
assert_fortran(a.copy('F'))
assert_c(a.copy('A'))
def test_sort_order(self):
# Test sorting an array with fields
x1 = np.array([21, 32, 14])
x2 = np.array(['my', 'first', 'name'])
x3 = np.array([3.1, 4.5, 6.2])
r = np.rec.fromarrays([x1, x2, x3], names='id,word,number')
r.sort(order=['id'])
assert_equal(r.id, np.array([14, 21, 32]))
assert_equal(r.word, np.array(['name', 'my', 'first']))
assert_equal(r.number, np.array([6.2, 3.1, 4.5]))
r.sort(order=['word'])
assert_equal(r.id, np.array([32, 21, 14]))
assert_equal(r.word, np.array(['first', 'my', 'name']))
assert_equal(r.number, np.array([4.5, 3.1, 6.2]))
r.sort(order=['number'])
assert_equal(r.id, np.array([21, 32, 14]))
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
if sys.byteorder == 'little':
strtype = '>i2'
else:
strtype = '<i2'
mydtype = [('name', strchar + '5'), ('col2', strtype)]
r = np.array([('a', 1), ('b', 255), ('c', 3), ('d', 258)],
dtype=mydtype)
r.sort(order='col2')
assert_equal(r['col2'], [1, 3, 255, 258])
assert_equal(r, np.array([('a', 1), ('c', 3), ('b', 255), ('d', 258)],
dtype=mydtype))
def test_argsort(self):
# all c scalar argsorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
a = np.arange(101)
b = a[::-1].copy()
for kind in ['q', 'm', 'h']:
msg = "scalar argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), a, msg)
assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare fuction differs.
ai = a*1j + 1
bi = b*1j + 1
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
for kind in ['q', 'm', 'h']:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
# test argsort of complex arrays requiring byte-swapping, gh-5441
for endianess in '<>':
for dt in np.typecodes['Complex']:
arr = np.array([1+3.j, 2+2.j, 3+1.j], dtype=endianess + dt)
msg = 'byte-swapped complex argsort, dtype={0}'.format(dt)
assert_equal(arr.argsort(),
np.arange(len(arr), dtype=np.intp), msg)
# test string argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test unicode argsorts.
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
a = np.empty((101,), dtype=np.object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test structured array argsorts.
dt = np.dtype([('f', float), ('i', int)])
a = np.array([(i, i) for i in range(101)], dtype=dt)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'm', 'h']:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test datetime64 argsorts.
a = np.arange(0, 101, dtype='datetime64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "datetime64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test timedelta64 argsorts.
a = np.arange(0, 101, dtype='timedelta64[D]')
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
for kind in ['q', 'h', 'm']:
msg = "timedelta64 argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
b = np.array([[1, 1], [0, 0]])
c = np.array([[1, 0], [1, 0]])
assert_equal(a.copy().argsort(axis=0), b)
assert_equal(a.copy().argsort(axis=1), c)
assert_equal(a.copy().argsort(), c)
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argsort with axis={0}'.format(axis)
assert_equal(np.argsort(a, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argsort with axis=None'
assert_equal(np.argsort(a, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
# check that stable argsorts are stable
r = np.arange(100)
# scalars
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
a = np.zeros(100, dtype=np.complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.sort, kind=k)
assert_raises(ValueError, d.argsort, kind=k)
def test_searchsorted(self):
# test for floats and complex containing nans. The logic is the
# same for all float types so only test double types for now.
# The search sorted routines use the compare functions for the
# array type, so this checks if that is consistent with the sort
# order.
# check double
a = np.array([0, 1, np.nan])
msg = "Test real searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(3), msg)
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
msg = "Test complex searchsorted with nans, side='l'"
b = a.searchsorted(a, side='l')
assert_equal(b, np.arange(9), msg)
msg = "Test complex searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 10), msg)
msg = "Test searchsorted with little endian, side='l'"
a = np.array([0, 128], dtype='<i4')
b = a.searchsorted(np.array(128, dtype='<i4'))
assert_equal(b, 1, msg)
msg = "Test searchsorted with big endian, side='l'"
a = np.array([0, 128], dtype='>i4')
b = a.searchsorted(np.array(128, dtype='>i4'))
assert_equal(b, 1, msg)
# Check 0 elements
a = np.ones(0)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 0])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 0, 0])
a = np.ones(1)
# Check 1 element
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 1])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 1, 1])
# Check all elements equal
a = np.ones(2)
b = a.searchsorted([0, 1, 2], 'l')
assert_equal(b, [0, 0, 2])
b = a.searchsorted([0, 1, 2], 'r')
assert_equal(b, [0, 2, 2])
# Test searching unaligned array
a = np.arange(10)
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
unaligned[:] = a
# Test searching unaligned array
b = unaligned.searchsorted(a, 'l')
assert_equal(b, a)
b = unaligned.searchsorted(a, 'r')
assert_equal(b, a + 1)
# Test searching for unaligned keys
b = a.searchsorted(unaligned, 'l')
assert_equal(b, a)
b = a.searchsorted(unaligned, 'r')
assert_equal(b, a + 1)
# Test smart resetting of binsearch indices
a = np.arange(5)
b = a.searchsorted([6, 5, 4], 'l')
assert_equal(b, [5, 5, 4])
b = a.searchsorted([6, 5, 4], 'r')
assert_equal(b, [5, 5, 5])
# Test all type specific binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.arange(2, dtype=dt)
out = np.arange(2)
else:
a = np.arange(0, 5, dtype=dt)
out = np.arange(5)
b = a.searchsorted(a, 'l')
assert_equal(b, out)
b = a.searchsorted(a, 'r')
assert_equal(b, out + 1)
def test_searchsorted_unicode(self):
# Test searchsorted on unicode strings.
# 1.6.1 contained a string length miscalculation in
# arraytypes.c.src:UNICODE_compare() which manifested as
# incorrect/inconsistent results from searchsorted.
a = np.array(['P:\\20x_dapi_cy3\\20x_dapi_cy3_20100185_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100186_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100187_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100189_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100190_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100191_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100192_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100193_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100194_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100195_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100196_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
dtype=np.unicode)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
assert_equal([a.searchsorted(a[i], 'left') for i in ind], ind)
assert_equal([a.searchsorted(a[i], 'right') for i in ind], ind + 1)
def test_searchsorted_with_sorter(self):
a = np.array([5, 2, 1, 3, 4])
s = np.argsort(a)
assert_raises(TypeError, np.searchsorted, a, 0, sorter=(1, (2, 3)))
assert_raises(TypeError, np.searchsorted, a, 0, sorter=[1.1])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[1, 2, 3, 4, 5, 6])
# bounds check
assert_raises(ValueError, np.searchsorted, a, 4, sorter=[0, 1, 2, 3, 5])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[-1, 0, 1, 2, 3])
assert_raises(ValueError, np.searchsorted, a, 0, sorter=[4, 0, -1, 2, 3])
a = np.random.rand(300)
s = a.argsort()
b = np.sort(a)
k = np.linspace(0, 1, 20)
assert_equal(b.searchsorted(k), a.searchsorted(k, sorter=s))
a = np.array([0, 1, 2, 3, 5]*20)
s = a.argsort()
k = [0, 1, 2, 3, 5]
expected = [0, 20, 40, 60, 80]
assert_equal(a.searchsorted(k, side='l', sorter=s), expected)
expected = [20, 40, 60, 80, 100]
assert_equal(a.searchsorted(k, side='r', sorter=s), expected)
# Test searching unaligned array
keys = np.arange(10)
a = keys.copy()
np.random.shuffle(s)
s = a.argsort()
aligned = np.empty(a.itemsize * a.size + 1, 'uint8')
unaligned = aligned[1:].view(a.dtype)
# Test searching unaligned array
unaligned[:] = a
b = unaligned.searchsorted(keys, 'l', s)
assert_equal(b, keys)
b = unaligned.searchsorted(keys, 'r', s)
assert_equal(b, keys + 1)
# Test searching for unaligned keys
unaligned[:] = keys
b = a.searchsorted(unaligned, 'l', s)
assert_equal(b, keys)
b = a.searchsorted(unaligned, 'r', s)
assert_equal(b, keys + 1)
# Test all type specific indirect binary search functions
types = ''.join((np.typecodes['AllInteger'], np.typecodes['AllFloat'],
np.typecodes['Datetime'], '?O'))
for dt in types:
if dt == 'M':
dt = 'M8[D]'
if dt == '?':
a = np.array([1, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([1, 0], dtype=np.int16)
out = np.array([1, 0])
else:
a = np.array([3, 4, 1, 2, 0], dtype=dt)
# We want the sorter array to be of a type that is different
# from np.intp in all platforms, to check for #4698
s = np.array([4, 2, 3, 0, 1], dtype=np.int16)
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
# Test non-contiguous sorter array
a = np.array([3, 4, 1, 2, 0])
srt = np.empty((10,), dtype=np.intp)
srt[1::2] = -1
srt[::2] = [4, 2, 3, 0, 1]
s = srt[::2]
out = np.array([3, 4, 1, 2, 0], dtype=np.intp)
b = a.searchsorted(a, 'l', s)
assert_equal(b, out)
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
def test_searchsorted_return_type(self):
# Functions returning indices should always return base ndarrays
class A(np.ndarray):
pass
a = np.arange(5).view(A)
b = np.arange(1, 3).view(A)
s = np.arange(5).view(A)
assert_(not isinstance(a.searchsorted(b, 'l'), A))
assert_(not isinstance(a.searchsorted(b, 'r'), A))
assert_(not isinstance(a.searchsorted(b, 'l', s), A))
assert_(not isinstance(a.searchsorted(b, 'r', s), A))
def test_argpartition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
# Test also for generic type argpartition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.argpartition, 10)
assert_raises(ValueError, d_obj.argpartition, -11)
def test_partition_out_of_range(self):
# Test out of range values in kth raise an error, gh-5469
d = np.arange(10)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
# Test also for generic type partition, which uses sorting
# and used to not bound check kth
d_obj = np.arange(10, dtype=object)
assert_raises(ValueError, d_obj.partition, 10)
assert_raises(ValueError, d_obj.partition, -11)
def test_partition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
def test_argpartition_empty_array(self):
# check axis handling for multidimensional empty arrays
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
assert_equal(np.partition(a, 0, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
assert_equal(np.partition(a, 0, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
assert_raises(ValueError, np.partition, d, 2, kind="nonsense")
assert_raises(ValueError, np.argpartition, d, 2, kind="nonsense")
assert_raises(ValueError, d.partition, 2, axis=0, kind="nonsense")
assert_raises(ValueError, d.argpartition, 2, axis=0, kind="nonsense")
for k in ("introselect",):
d = np.array([])
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(np.argpartition(d, 0, kind=k), d)
d = np.ones((1))
assert_array_equal(np.partition(d, 0, kind=k)[0], d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# kth not modified
kth = np.array([30, 15, 5])
okth = kth.copy()
np.partition(np.arange(40), kth)
assert_array_equal(kth, okth)
for r in ([2, 1], [1, 2], [1, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
for r in ([3, 2, 1], [1, 2, 3], [2, 1, 3], [2, 3, 1],
[1, 1, 1], [1, 2, 2], [2, 2, 1], [1, 2, 1]):
d = np.array(r)
tgt = np.sort(d)
assert_array_equal(np.partition(d, 0, kind=k)[0], tgt[0])
assert_array_equal(np.partition(d, 1, kind=k)[1], tgt[1])
assert_array_equal(np.partition(d, 2, kind=k)[2], tgt[2])
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
assert_array_equal(d[np.argpartition(d, 1, kind=k)],
np.partition(d, 1, kind=k))
assert_array_equal(d[np.argpartition(d, 2, kind=k)],
np.partition(d, 2, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.ones((50))
assert_array_equal(np.partition(d, 0, kind=k), d)
assert_array_equal(d[np.argpartition(d, 0, kind=k)],
np.partition(d, 0, kind=k))
# sorted
d = np.arange((49))
self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
np.partition(d, 15, kind=k))
# rsorted
d = np.arange((47))[::-1]
self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
assert_array_equal(np.partition(d, -6, kind=k),
np.partition(d, 41, kind=k))
assert_array_equal(np.partition(d, -16, kind=k),
np.partition(d, 31, kind=k))
assert_array_equal(d[np.argpartition(d, -6, kind=k)],
np.partition(d, 41, kind=k))
# median of 3 killer, O(n^2) on pure median 3 pivot quickselect
# exercises the median of median of 5 code used to keep O(n)
d = np.arange(1000000)
x = np.roll(d, d.size // 2)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
d = np.arange(1000001)
x = np.roll(d, d.size // 2 + 1)
mid = x.size // 2 + 1
assert_equal(np.partition(x, mid)[mid], mid)
# max
d = np.ones(10)
d[1] = 4
assert_equal(np.partition(d, (2, -1))[-1], 4)
assert_equal(np.partition(d, (2, -1))[2], 1)
assert_equal(d[np.argpartition(d, (2, -1))][-1], 4)
assert_equal(d[np.argpartition(d, (2, -1))][2], 1)
d[1] = np.nan
assert_(np.isnan(d[np.argpartition(d, (2, -1))][-1]))
assert_(np.isnan(np.partition(d, (2, -1))[-1]))
# equal elements
d = np.arange((47)) % 7
tgt = np.sort(np.arange((47)) % 7)
np.random.shuffle(d)
for i in range(d.size):
self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
np.partition(d, 16, kind=k))
for i in range(d.size):
d[i:].partition(0, kind=k)
assert_array_equal(d, tgt)
d = np.array([0, 1, 2, 3, 4, 5, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 7, 7, 7, 7, 9])
kth = [0, 3, 19, 20]
assert_equal(np.partition(d, kth, kind=k)[kth], (0, 3, 7, 7))
assert_equal(d[np.argpartition(d, kth, kind=k)][kth], (0, 3, 7, 7))
d = np.array([2, 1])
d.partition(0, kind=k)
assert_raises(ValueError, d.partition, 2)
assert_raises(ValueError, d.partition, 3, axis=1)
assert_raises(ValueError, np.partition, d, 2)
assert_raises(ValueError, np.partition, d, 2, axis=1)
assert_raises(ValueError, d.argpartition, 2)
assert_raises(ValueError, d.argpartition, 3, axis=1)
assert_raises(ValueError, np.argpartition, d, 2)
assert_raises(ValueError, np.argpartition, d, 2, axis=1)
d = np.arange(10).reshape((2, 5))
d.partition(1, axis=0, kind=k)
d.partition(4, axis=1, kind=k)
np.partition(d, 1, axis=0, kind=k)
np.partition(d, 4, axis=1, kind=k)
np.partition(d, 1, axis=None, kind=k)
np.partition(d, 9, axis=None, kind=k)
d.argpartition(1, axis=0, kind=k)
d.argpartition(4, axis=1, kind=k)
np.argpartition(d, 1, axis=0, kind=k)
np.argpartition(d, 4, axis=1, kind=k)
np.argpartition(d, 1, axis=None, kind=k)
np.argpartition(d, 9, axis=None, kind=k)
assert_raises(ValueError, d.partition, 2, axis=0)
assert_raises(ValueError, d.partition, 11, axis=1)
assert_raises(TypeError, d.partition, 2, axis=None)
assert_raises(ValueError, np.partition, d, 9, axis=1)
assert_raises(ValueError, np.partition, d, 11, axis=None)
assert_raises(ValueError, d.argpartition, 2, axis=0)
assert_raises(ValueError, d.argpartition, 11, axis=1)
assert_raises(ValueError, np.argpartition, d, 9, axis=1)
assert_raises(ValueError, np.argpartition, d, 11, axis=None)
td = [(dt, s) for dt in [np.int32, np.float32, np.complex64]
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
at = self.assertTrue
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
d1 = np.tile(np.arange(s, dtype=dt), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
self.assertEqual(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
assert_array_less(p[i], p[i + 1:])
aae(p, d[np.argpartition(d, i, kind=k)])
p = np.partition(d1, i, axis=1, kind=k)
aae(p[:, i], np.array([i] * d1.shape[0], dtype=dt))
# array_less does not seem to work right
at((p[:, :i].T <= p[:, i]).all(),
msg="%d: %r <= %r" % (i, p[:, i], p[:, :i].T))
at((p[:, i + 1:].T > p[:, i]).all(),
msg="%d: %r < %r" % (i, p[:, i], p[:, i + 1:].T))
aae(p, d1[np.arange(d1.shape[0])[:, None],
np.argpartition(d1, i, axis=1, kind=k)])
p = np.partition(d0, i, axis=0, kind=k)
aae(p[i,:], np.array([i] * d1.shape[0],
dtype=dt))
# array_less does not seem to work right
at((p[:i,:] <= p[i,:]).all(),
msg="%d: %r <= %r" % (i, p[i,:], p[:i,:]))
at((p[i + 1:,:] > p[i,:]).all(),
msg="%d: %r < %r" % (i, p[i,:], p[:, i + 1:]))
aae(p, d0[np.argpartition(d0, i, axis=0, kind=k),
np.arange(d0.shape[1])[None,:]])
# check inplace
dc = d.copy()
dc.partition(i, kind=k)
assert_equal(dc, np.partition(d, i, kind=k))
dc = d0.copy()
dc.partition(i, axis=0, kind=k)
assert_equal(dc, np.partition(d0, i, axis=0, kind=k))
dc = d1.copy()
dc.partition(i, axis=1, kind=k)
assert_equal(dc, np.partition(d1, i, axis=1, kind=k))
def assert_partitioned(self, d, kth):
prev = 0
for k in np.sort(kth):
assert_array_less(d[prev:k], d[k], err_msg='kth %d' % k)
assert_((d[k:] >= d[k]).all(),
msg="kth %d, %r not greater equal %d" % (k, d[k:], d[k]))
prev = k + 1
def test_partition_iterative(self):
d = np.arange(17)
kth = (0, 1, 2, 429, 231)
assert_raises(ValueError, d.partition, kth)
assert_raises(ValueError, d.argpartition, kth)
d = np.arange(10).reshape((2, 5))
assert_raises(ValueError, d.partition, kth, axis=0)
assert_raises(ValueError, d.partition, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=1)
assert_raises(ValueError, np.partition, d, kth, axis=None)
d = np.array([3, 4, 2, 1])
p = np.partition(d, (0, 3))
self.assert_partitioned(p, (0, 3))
self.assert_partitioned(d[np.argpartition(d, (0, 3))], (0, 3))
assert_array_equal(p, np.partition(d, (-3, -1)))
assert_array_equal(p, d[np.argpartition(d, (-3, -1))])
d = np.arange(17)
np.random.shuffle(d)
d.partition(range(d.size))
assert_array_equal(np.arange(17), d)
np.random.shuffle(d)
assert_array_equal(np.arange(17), d[d.argpartition(range(d.size))])
# test unsorted kth
d = np.arange(17)
np.random.shuffle(d)
keys = np.array([1, 3, 8, -2])
np.random.shuffle(d)
p = np.partition(d, keys)
self.assert_partitioned(p, keys)
p = d[np.argpartition(d, keys)]
self.assert_partitioned(p, keys)
np.random.shuffle(keys)
assert_array_equal(np.partition(d, keys), p)
assert_array_equal(d[np.argpartition(d, keys)], p)
# equal kth
d = np.arange(20)[::-1]
self.assert_partitioned(np.partition(d, [5]*4), [5])
self.assert_partitioned(np.partition(d, [5]*4 + [6, 13]),
[5]*4 + [6, 13])
self.assert_partitioned(d[np.argpartition(d, [5]*4)], [5])
self.assert_partitioned(d[np.argpartition(d, [5]*4 + [6, 13])],
[5]*4 + [6, 13])
d = np.arange(12)
np.random.shuffle(d)
d1 = np.tile(np.arange(12), (4, 1))
map(np.random.shuffle, d1)
d0 = np.transpose(d1)
kth = (1, 6, 7, -1)
p = np.partition(d1, kth, axis=1)
pa = d1[np.arange(d1.shape[0])[:, None],
d1.argpartition(kth, axis=1)]
assert_array_equal(p, pa)
for i in range(d1.shape[0]):
self.assert_partitioned(p[i,:], kth)
p = np.partition(d0, kth, axis=0)
pa = d0[np.argpartition(d0, kth, axis=0),
np.arange(d0.shape[1])[None,:]]
assert_array_equal(p, pa)
for i in range(d0.shape[1]):
self.assert_partitioned(p[:, i], kth)
def test_partition_cdtype(self):
d = np.array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.9, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
tgt = np.sort(d, order=['age', 'height'])
assert_array_equal(np.partition(d, range(d.size),
order=['age', 'height']),
tgt)
assert_array_equal(d[np.argpartition(d, range(d.size),
order=['age', 'height'])],
tgt)
for k in range(d.size):
assert_equal(np.partition(d, k, order=['age', 'height'])[k],
tgt[k])
assert_equal(d[np.argpartition(d, k, order=['age', 'height'])][k],
tgt[k])
d = np.array(['Galahad', 'Arthur', 'zebra', 'Lancelot'])
tgt = np.sort(d)
assert_array_equal(np.partition(d, range(d.size)), tgt)
for k in range(d.size):
assert_equal(np.partition(d, k)[k], tgt[k])
assert_equal(d[np.argpartition(d, k)][k], tgt[k])
def test_partition_unicode_kind(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.partition, 2, kind=k)
assert_raises(ValueError, d.argpartition, 2, kind=k)
def test_partition_fuzz(self):
# a few rounds of random data testing
for j in range(10, 30):
for i in range(1, j - 2):
d = np.arange(j)
np.random.shuffle(d)
d = d % np.random.randint(2, 30)
idx = np.random.randint(d.size)
kth = [0, idx, i, i + 1]
tgt = np.sort(d)[kth]
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
def test_argpartition_gh5524(self):
# A test for functionality of argpartition on lists.
d = [6,7,3,2,9,0]
p = np.argpartition(d,1)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
y0 = np.array([1, 2, 3, 4, 5, 6], np.int32)
y0f = np.array([1, 4, 2, 5, 3, 6], np.int32)
y1 = np.array([1, 2, 3, 4, 5, 6, 7, 8], np.int32)
y1f = np.array([1, 5, 3, 7, 2, 6, 4, 8], np.int32)
assert_equal(x0.flatten(), y0)
assert_equal(x0.flatten('F'), y0f)
assert_equal(x0.flatten('F'), x0.T.flatten())
assert_equal(x1.flatten(), y1)
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
def test_dot(self):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
# test passing in an output array
c = np.zeros_like(a)
a.dot(b, c)
assert_equal(c, np.dot(a, b))
# test keyword args
c = np.zeros_like(a)
a.dot(b=b, out=c)
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
assert_equal(b.diagonal(0), [[0, 6], [1, 7]])
assert_equal(b.diagonal(1), [[2], [3]])
assert_equal(b.diagonal(-1), [[4], [5]])
assert_raises(ValueError, b.diagonal, axis1=0, axis2=0)
assert_equal(b.diagonal(0, 1, 2), [[0, 3], [4, 7]])
assert_equal(b.diagonal(0, 0, 1), [[0, 6], [1, 7]])
assert_equal(b.diagonal(offset=1, axis1=0, axis2=2), [[1], [3]])
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diagonal(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
a = np.diag(np.eye(3))
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
def test_diagonal_memleak(self):
# Regression test for a bug that crept in at one point
a = np.zeros((100, 100))
assert_(sys.getrefcount(a) < 50)
for i in range(100):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
def test_put(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
for dt in icodes + fcodes + 'O':
tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [1, 3, 5])
assert_equal(a, tgt.reshape(2, 3))
for dt in '?':
tgt = np.array([False, True, False, True, False, True], dtype=dt)
# test 1-d
a = np.zeros(6, dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt)
# test 2-d
a = np.zeros((2, 3), dtype=dt)
a.put([1, 3, 5], [True]*3)
assert_equal(a, tgt.reshape(2, 3))
# check must be writeable
a = np.zeros(6)
a.flags.writeable = False
assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_(not a.ravel().flags.owndata)
assert_equal(a.ravel('F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='C'), [0, 1, 2, 3])
assert_equal(a.ravel(order='F'), [0, 2, 1, 3])
assert_equal(a.ravel(order='A'), [0, 1, 2, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_equal(a.ravel(order='K'), [0, 1, 2, 3])
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
a = np.array([[0, 1], [2, 3]], order='F')
assert_equal(a.ravel(), [0, 1, 2, 3])
assert_equal(a.ravel(order='A'), [0, 2, 1, 3])
assert_equal(a.ravel(order='K'), [0, 2, 1, 3])
assert_(not a.ravel(order='A').flags.owndata)
assert_(not a.ravel(order='K').flags.owndata)
assert_equal(a.ravel(), a.reshape(-1))
assert_equal(a.ravel(order='A'), a.reshape(-1, order='A'))
a = np.array([[0, 1], [2, 3]])[::-1, :]
assert_equal(a.ravel(), [2, 3, 0, 1])
assert_equal(a.ravel(order='C'), [2, 3, 0, 1])
assert_equal(a.ravel(order='F'), [2, 0, 3, 1])
assert_equal(a.ravel(order='A'), [2, 3, 0, 1])
# 'K' doesn't reverse the axes of negative strides
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
# General case of possible ravel that is not contiguous but
# works and includes a 1-sized axis with non matching stride
a = a.swapaxes(-1, -2) # swap back to C-order
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
a = a.T # swap all to Fortran order
assert_(np.may_share_memory(a.ravel(order='F'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
# Test negative strides:
a = np.arange(4)[::-1].reshape(2, 2)
assert_(np.may_share_memory(a.ravel(order='C'), a))
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
# Test keeporder with weirdly strided 1-sized dims (1-d first stride)
a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order
strides = list(a.strides)
strides[0] = -12
strides[-1] = 0
a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
assert_equal(a.ravel('K'), a.ravel('C'))
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
# If the stride is not 8, NPY_RELAXED_STRIDES_CHECKING is messing
# them up on purpose:
if np.ones(1).strides == (8,):
assert_(np.may_share_memory(a.ravel('K'), a))
assert_equal(a.ravel('K').strides, (a.dtype.itemsize,))
for order in ('C', 'F', 'A', 'K'):
# 0-d corner case:
a = np.array(0)
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
#Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('C'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('A'), [0, 4, 8, 12, 16, 20, 24, 28])
assert_equal(a.ravel('F'), [0, 16, 8, 24, 4, 20, 12, 28])
a = b[::2, ...]
assert_equal(a.ravel('K'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('C'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
assert_raises(ValueError, a.swapaxes, -5, 0)
assert_raises(ValueError, a.swapaxes, 4, 0)
assert_raises(ValueError, a.swapaxes, 0, -5)
assert_raises(ValueError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
for k, src in enumerate((a, b)):
c = src.swapaxes(i, j)
# check shape
shape = list(src.shape)
shape[i] = src.shape[j]
shape[j] = src.shape[i]
assert_equal(c.shape, shape, str((i, j, k)))
# check array contents
i0, i1, i2, i3 = [dim-1 for dim in c.shape]
j0, j1, j2, j3 = [dim-1 for dim in src.shape]
assert_equal(src[idx[j0], idx[j1], idx[j2], idx[j3]],
c[idx[i0], idx[i1], idx[i2], idx[i3]],
str((i, j, k)))
# check a view is always returned, gh-5260
assert_(not c.flags['OWNDATA'], str((i, j, k)))
# check on non-contiguous input array
if k == 1:
b = c
def test_conjugate(self):
a = np.array([1-1j, 1+1j, 23+23.0j])
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 23+23.0j], 'F')
ac = a.conj()
assert_equal(a.real, ac.real)
assert_equal(a.imag, -ac.imag)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1, 2, 3])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1.0, 2.0, 3.0])
ac = a.conj()
assert_equal(a, ac)
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1+1j, 1, 2.0], object)
ac = a.conj()
assert_equal(ac, [k.conjugate() for k in a])
assert_equal(ac, a.conjugate())
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
[0.5, 1.0])
d = np.array([0.5, 0.5])[::2]
assert_array_almost_equal(d * (d * np.array([1.0, 2.0])),
[0.25, 0.5])
a = np.array([0.5])
b = np.array([0.5])
c = a + b
c = a - b
c = a * b
c = a / b
assert_equal(a, b)
assert_almost_equal(c, 1.)
c = a + b * 2. / b * a - a / b
assert_equal(a, b)
assert_equal(c, 0.5)
# true divide
a = np.array([5])
b = np.array([3])
c = (a * a) / b
assert_almost_equal(c, 25 / 3)
assert_equal(a, 5)
assert_equal(b, 3)
def test_extension_incref_elide(self):
# test extension (e.g. cython) calling PyNumber_* slots without
# increasing the reference counts
#
# def incref_elide(a):
# d = input.copy() # refcount 1
# return d, d + d # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide
d = np.ones(5)
orig, res = incref_elide(d)
# the return original should not be changed to an inplace operation
assert_array_equal(orig, d)
assert_array_equal(res, d + d)
def test_extension_incref_elide_stack(self):
# scanning if the refcount == 1 object is on the python stack to check
# that we are called directly from python is flawed as object may still
# be above the stack pointer and we have no access to the top of it
#
# def incref_elide_l(d):
# return l[4] + l[4] # PyNumber_Add without increasing refcount
from numpy.core.multiarray_tests import incref_elide_l
# padding with 1 makes sure the object on the stack is not overwriten
l = [1, 1, 1, 1, np.ones(5)]
res = incref_elide_l(l)
# the return original should not be changed to an inplace operation
assert_array_equal(l[4], np.ones(5))
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
ops = {
'__add__': ('__radd__', np.add, True),
'__sub__': ('__rsub__', np.subtract, True),
'__mul__': ('__rmul__', np.multiply, True),
'__truediv__': ('__rtruediv__', np.true_divide, True),
'__floordiv__': ('__rfloordiv__', np.floor_divide, True),
'__mod__': ('__rmod__', np.remainder, True),
'__divmod__': ('__rdivmod__', None, False),
'__pow__': ('__rpow__', np.power, True),
'__lshift__': ('__rlshift__', np.left_shift, True),
'__rshift__': ('__rrshift__', np.right_shift, True),
'__and__': ('__rand__', np.bitwise_and, True),
'__xor__': ('__rxor__', np.bitwise_xor, True),
'__or__': ('__ror__', np.bitwise_or, True),
'__ge__': ('__le__', np.less_equal, False),
'__gt__': ('__lt__', np.less, False),
'__le__': ('__ge__', np.greater_equal, False),
'__lt__': ('__gt__', np.greater, False),
'__eq__': ('__eq__', np.equal, False),
'__ne__': ('__ne__', np.not_equal, False),
}
class OtherNdarraySubclass(np.ndarray):
pass
class OtherNdarraySubclassWithOverride(np.ndarray):
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def check(op_name, ndsubclass):
rop_name, np_op, has_iop = ops[op_name]
if has_iop:
iop_name = '__i' + op_name[2:]
iop = getattr(operator, iop_name)
if op_name == "__divmod__":
op = divmod
else:
op = getattr(operator, op_name)
# Dummy class
def __init__(self, *a, **kw):
pass
def __numpy_ufunc__(self, *a, **kw):
raise AssertionError(("__numpy_ufunc__ %r %r shouldn't have "
"been called!") % (a, kw))
def __op__(self, *other):
return "op"
def __rop__(self, *other):
return "rop"
if ndsubclass:
bases = (np.ndarray,)
else:
bases = (object,)
dct = {'__init__': __init__,
'__numpy_ufunc__': __numpy_ufunc__,
op_name: __op__}
if op_name != rop_name:
dct[rop_name] = __rop__
cls = type("Rop" + rop_name, bases, dct)
# Check behavior against both bare ndarray objects and a
# ndarray subclasses with and without their own override
obj = cls((1,), buffer=np.ones(1,))
arr_objs = [np.array([1]),
np.array([2]).view(OtherNdarraySubclass),
np.array([3]).view(OtherNdarraySubclassWithOverride),
]
for arr in arr_objs:
err_msg = "%r %r" % (op_name, arr,)
# Check that ndarray op gives up if it sees a non-subclass
if not isinstance(obj, arr.__class__):
assert_equal(getattr(arr, op_name)(obj),
NotImplemented, err_msg=err_msg)
# Check that the Python binops have priority
assert_equal(op(obj, arr), "op", err_msg=err_msg)
if op_name == rop_name:
assert_equal(op(arr, obj), "op", err_msg=err_msg)
else:
assert_equal(op(arr, obj), "rop", err_msg=err_msg)
# Check that Python binops have priority also for in-place ops
if has_iop:
assert_equal(getattr(arr, iop_name)(obj),
NotImplemented, err_msg=err_msg)
if op_name != "__pow__":
# inplace pow requires the other object to be
# integer-like?
assert_equal(iop(arr, obj), "rop", err_msg=err_msg)
# Check that ufunc call __numpy_ufunc__ normally
if np_op is not None:
assert_raises(AssertionError, np_op, arr, obj,
err_msg=err_msg)
assert_raises(AssertionError, np_op, obj, arr,
err_msg=err_msg)
# Check all binary operations
for op_name in sorted(ops.keys()):
yield check, op_name, True
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
def __numpy_ufunc__(self, *a, **kw):
return "ufunc"
def __mul__(self, other):
return 123
def __rmul__(self, other):
return 321
def __rsub__(self, other):
return "no subs for me"
def __gt__(self, other):
return "yep"
def __lt__(self, other):
return "nope"
class SomeClass2(SomeClass, np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
if ufunc is np.multiply or ufunc is np.bitwise_and:
return "ufunc"
else:
inputs = list(inputs)
inputs[i] = np.asarray(self)
func = getattr(ufunc, method)
r = func(*inputs, **kw)
if 'out' in kw:
return r
else:
x = self.__class__(r.shape, dtype=r.dtype)
x[...] = r
return x
class SomeClass3(SomeClass2):
def __rsub__(self, other):
return "sub for me"
arr = np.array([0])
obj = SomeClass()
obj2 = SomeClass2((1,), dtype=np.int_)
obj2[0] = 9
obj3 = SomeClass3((1,), dtype=np.int_)
obj3[0] = 4
# obj is first, so should get to define outcome.
assert_equal(obj * arr, 123)
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
assert_equal(arr * obj, 321)
# obj is second, but has __numpy_ufunc__ and defines __rsub__.
assert_equal(arr - obj, "no subs for me")
# obj is second, but has __numpy_ufunc__ and defines __lt__.
assert_equal(arr > obj, "nope")
# obj is second, but has __numpy_ufunc__ and defines __gt__.
assert_equal(arr < obj, "yep")
# Called as a ufunc, obj.__numpy_ufunc__ is used.
assert_equal(np.multiply(arr, obj), "ufunc")
# obj is second, but has __numpy_ufunc__ and defines __rmul__.
arr *= obj
assert_equal(arr, 321)
# obj2 is an ndarray subclass, so CPython takes care of the same rules.
assert_equal(obj2 * arr, 123)
assert_equal(arr * obj2, 321)
assert_equal(arr - obj2, "no subs for me")
assert_equal(arr > obj2, "nope")
assert_equal(arr < obj2, "yep")
# Called as a ufunc, obj2.__numpy_ufunc__ is called.
assert_equal(np.multiply(arr, obj2), "ufunc")
# Also when the method is not overridden.
assert_equal(arr & obj2, "ufunc")
arr *= obj2
assert_equal(arr, 321)
obj2 += 33
assert_equal(obj2[0], 42)
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
# Obj3 is subclass that defines __rsub__. CPython calls it.
assert_equal(arr - obj3, "sub for me")
assert_equal(obj2 - obj3, "sub for me")
# obj3 is a subclass that defines __rmul__. CPython calls it.
assert_equal(arr * obj3, 321)
# But not here, since obj3.__rmul__ is obj2.__rmul__.
assert_equal(obj2 * obj3, 123)
# And of course, here obj3.__mul__ should be called.
assert_equal(obj3 * obj2, 123)
# obj3 defines __numpy_ufunc__ but obj3.__radd__ is obj2.__radd__.
# (and both are just ndarray.__radd__); see #4815.
res = obj2 + obj3
assert_equal(res, 46)
assert_(isinstance(res, SomeClass2))
# Since obj3 is a subclass, it should have precedence, like CPython
# would give, even though obj2 has __numpy_ufunc__ and __radd__.
# See gh-4815 and gh-5747.
res = obj3 + obj2
assert_equal(res, 46)
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
return kw
a = SomeClass()
kw = np.add(a, [1])
assert_('sig' not in kw and 'signature' not in kw)
kw = np.add(a, [1], sig='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
kw = np.add(a, [1], signature='ii->i')
assert_('sig' not in kw and 'signature' in kw)
assert_equal(kw['signature'], 'ii->i')
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
assert_(IsPythonScalar(1))
assert_(IsPythonScalar(2**80))
assert_(IsPythonScalar(2.))
assert_(IsPythonScalar("a"))
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
self.assertTrue(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
self.assertTrue(isinstance(x[0], int))
self.assertTrue(type(x[0, ...]) is np.ndarray)
class TestPickling(TestCase):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
carray,
np.transpose(carray),
np.array([('xxx', 1, 2.0)], dtype=[('a', (str, 3)), ('b', int),
('c', float)])
]
for a in DATA:
assert_equal(a, pickle.loads(a.dumps()), err_msg="%r" % a)
def _loads(self, obj):
if sys.version_info[0] >= 3:
return np.loads(obj, encoding='latin1')
else:
return np.loads(obj)
# version 0 pickles, using protocol=2 to pickle
# version 0 doesn't have a version field
def test_version0_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version0_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
# version 1 pickles, using protocol=2 to pickle
def test_version1_int8(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02i1K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x04\x01\x02\x03\x04tb.'
a = np.array([1, 2, 3, 4], dtype=np.int8)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_float32(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x04\x85cnumpy\ndtype\nq\x04U\x02f4K\x00K\x01\x87Rq\x05(K\x01U\x01<NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89U\x10\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@tb.'
a = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_version1_object(self):
s = '\x80\x02cnumpy.core._internal\n_reconstruct\nq\x01cnumpy\nndarray\nq\x02K\x00\x85U\x01b\x87Rq\x03(K\x01K\x02\x85cnumpy\ndtype\nq\x04U\x02O8K\x00K\x01\x87Rq\x05(K\x01U\x01|NNJ\xff\xff\xff\xffJ\xff\xff\xff\xfftb\x89]q\x06(}q\x07U\x01aK\x01s}q\x08U\x01bK\x02setb.'
a = np.array([{'a':1}, {'b':2}])
p = self._loads(asbytes(s))
assert_equal(a, p)
def test_subarray_int_shape(self):
s = "cnumpy.core.multiarray\n_reconstruct\np0\n(cnumpy\nndarray\np1\n(I0\ntp2\nS'b'\np3\ntp4\nRp5\n(I1\n(I1\ntp6\ncnumpy\ndtype\np7\n(S'V6'\np8\nI0\nI1\ntp9\nRp10\n(I3\nS'|'\np11\nN(S'a'\np12\ng3\ntp13\n(dp14\ng12\n(g7\n(S'V4'\np15\nI0\nI1\ntp16\nRp17\n(I3\nS'|'\np18\n(g7\n(S'i1'\np19\nI0\nI1\ntp20\nRp21\n(I3\nS'|'\np22\nNNNI-1\nI-1\nI0\ntp23\nb(I2\nI2\ntp24\ntp25\nNNI4\nI1\nI0\ntp26\nbI0\ntp27\nsg3\n(g7\n(S'V2'\np28\nI0\nI1\ntp29\nRp30\n(I3\nS'|'\np31\n(g21\nI2\ntp32\nNNI2\nI1\nI0\ntp33\nbI4\ntp34\nsI6\nI1\nI0\ntp35\nbI00\nS'\\x01\\x01\\x01\\x01\\x01\\x02'\np36\ntp37\nb."
a = np.array([(1, (1, 2))], dtype=[('a', 'i1', (2, 2)), ('b', 'i1', 2)])
p = self._loads(asbytes(s))
assert_equal(a, p)
class TestFancyIndexing(TestCase):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, [0]] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_tuple(self):
x = np.ones((1, 1))
x[:, (0,)] = 2.0
assert_array_equal(x, np.array([[2.0]]))
x = np.ones((1, 1, 1))
x[:,:, (0,)] = 2.0
assert_array_equal(x, np.array([[[2.0]]]))
def test_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
assert_array_equal(x[m], np.array([2]))
def test_mask2(self):
x = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
assert_array_equal(x[m], np.array([[5, 6, 7, 8]]))
assert_array_equal(x[m2], np.array([2, 5]))
assert_array_equal(x[m3], np.array([2]))
def test_assign_mask(self):
x = np.array([1, 2, 3, 4])
m = np.array([0, 1, 0, 0], bool)
x[m] = 5
assert_array_equal(x, np.array([1, 5, 3, 4]))
def test_assign_mask2(self):
xorig = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
m = np.array([0, 1], bool)
m2 = np.array([[0, 1, 0, 0], [1, 0, 0, 0]], bool)
m3 = np.array([[0, 1, 0, 0], [0, 0, 0, 0]], bool)
x = xorig.copy()
x[m] = 10
assert_array_equal(x, np.array([[1, 2, 3, 4], [10, 10, 10, 10]]))
x = xorig.copy()
x[m2] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [10, 6, 7, 8]]))
x = xorig.copy()
x[m3] = 10
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
class TestStringCompare(TestCase):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
def test_mixed(self):
g1 = np.array(["spam", "spa", "spammer", "and eggs"])
g2 = "spam"
assert_array_equal(g1 == g2, [x == g2 for x in g1])
assert_array_equal(g1 != g2, [x != g2 for x in g1])
assert_array_equal(g1 < g2, [x < g2 for x in g1])
assert_array_equal(g1 > g2, [x > g2 for x in g1])
assert_array_equal(g1 <= g2, [x <= g2 for x in g1])
assert_array_equal(g1 >= g2, [x >= g2 for x in g1])
def test_unicode(self):
g1 = np.array([sixu("This"), sixu("is"), sixu("example")])
g2 = np.array([sixu("This"), sixu("was"), sixu("example")])
assert_array_equal(g1 == g2, [g1[i] == g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 != g2, [g1[i] != g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 <= g2, [g1[i] <= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 >= g2, [g1[i] >= g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 < g2, [g1[i] < g2[i] for i in [0, 1, 2]])
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
class TestArgmax(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 1),
([complex(1, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(1, 1)], 2),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 5),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 3),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 1),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 2),
([False, False, False, False, True], 4),
([False, False, False, True, False], 3),
([True, False, False, False, False], 0),
([True, False, True, False, False], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 3),
#(['zz', 'a', 'aa', 'a'], 0),
#(['aa', 'z', 'zz', 'a'], 2),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amax = a.max(i)
aargmax = a.argmax(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amax == aargmax.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmax, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmax(-1, out=out)
assert_equal(out, a.argmax(-1))
def test_argmax_unicode(self):
d = np.zeros(6031, dtype='<U9')
d[5942] = "as"
assert_equal(d.argmax(), 5942)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmax and numpy.argmax support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.zeros(2, dtype=int)
assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestArgmin(TestCase):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
([0, 1, 2, np.nan, 3], 3),
([np.nan, 0, 1, 2, 3], 0),
([np.nan, 0, np.nan, 2, 3], 0),
([0, 1, 2, 3, complex(0, np.nan)], 4),
([0, 1, 2, 3, complex(np.nan, 0)], 4),
([0, 1, 2, complex(np.nan, 0), 3], 3),
([0, 1, 2, complex(0, np.nan), 3], 3),
([complex(0, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, np.nan), 0, 1, 2, 3], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, np.nan), complex(np.nan, 2), complex(np.nan, 1)], 0),
([complex(np.nan, 0), complex(np.nan, 2), complex(np.nan, np.nan)], 0),
([complex(0, 0), complex(0, 2), complex(0, 1)], 0),
([complex(1, 0), complex(0, 2), complex(0, 1)], 2),
([complex(1, 0), complex(0, 2), complex(1, 1)], 1),
([np.datetime64('1923-04-14T12:43:12'),
np.datetime64('1994-06-21T14:43:15'),
np.datetime64('2001-10-15T04:10:32'),
np.datetime64('1995-11-25T16:02:16'),
np.datetime64('2005-01-04T03:14:12'),
np.datetime64('2041-12-03T14:05:03')], 0),
([np.datetime64('1935-09-14T04:40:11'),
np.datetime64('1949-10-12T12:32:11'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
# Assorted tests with NaTs
([np.datetime64('NaT'),
np.datetime64('NaT'),
np.datetime64('2010-01-03T05:14:12'),
np.datetime64('NaT'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
([np.timedelta64(2, 's'),
np.timedelta64(1, 's'),
np.timedelta64('NaT', 's'),
np.timedelta64(3, 's')], 1),
([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
([timedelta(days=1, seconds=43), timedelta(days=10, seconds=5),
timedelta(days=5, seconds=14)], 0),
([timedelta(days=10, seconds=24), timedelta(days=10, seconds=5),
timedelta(days=10, seconds=43)], 1),
([True, True, True, True, False], 4),
([True, True, True, False, True], 3),
([False, True, True, True, True], 0),
([False, True, False, True, True], 0),
# Can't reduce a "flexible type"
#(['a', 'z', 'aa', 'zz'], 0),
#(['zz', 'a', 'aa', 'a'], 1),
#(['aa', 'z', 'zz', 'a'], 3),
]
def test_all(self):
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
amin = a.min(i)
aargmin = a.argmin(i)
axes = list(range(a.ndim))
axes.remove(i)
assert_(np.all(amin == aargmin.choose(*a.transpose(i,*axes))))
def test_combinations(self):
for arr, pos in self.nan_arr:
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
def test_minimum_signed_integers(self):
a = np.array([1, -2**7, -2**7 + 1], dtype=np.int8)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**15, -2**15 + 1], dtype=np.int16)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**31, -2**31 + 1], dtype=np.int32)
assert_equal(np.argmin(a), 1)
a = np.array([1, -2**63, -2**63 + 1], dtype=np.int64)
assert_equal(np.argmin(a), 1)
def test_output_shape(self):
# see also gh-616
a = np.ones((10, 5))
# Check some simple shape mismatches
out = np.ones(11, dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
out = np.ones((2, 5), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, out)
# these could be relaxed possibly (used to allow even the previous)
out = np.ones((1, 10), dtype=np.int_)
assert_raises(ValueError, a.argmin, -1, np.ones((1, 10)))
out = np.ones(10, dtype=np.int_)
a.argmin(-1, out=out)
assert_equal(out, a.argmin(-1))
def test_argmin_unicode(self):
d = np.ones(6031, dtype='<U9')
d[6001] = "0"
assert_equal(d.argmin(), 6001)
def test_np_vs_ndarray(self):
# make sure both ndarray.argmin and numpy.argmin support out/axis args
a = np.random.normal(size=(2,3))
#check positional args
out1 = np.zeros(2, dtype=int)
out2 = np.ones(2, dtype=int)
assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
assert_equal(out1, out2)
#check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.ones(3, dtype=int)
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
class TestMinMax(TestCase):
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
assert_equal(np.amax(1, axis=0), 1)
assert_equal(np.amin(1, axis=0), 1)
assert_equal(np.amax(1, axis=None), 1)
assert_equal(np.amin(1, axis=None), 1)
def test_axis(self):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
def test_datetime(self):
# NaTs are ignored
for dtype in ('m8[s]', 'm8[Y]'):
a = np.arange(10).astype(dtype)
a[3] = 'NaT'
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[9])
a[0] = 'NaT'
assert_equal(np.amin(a), a[1])
assert_equal(np.amax(a), a[9])
a.fill('NaT')
assert_equal(np.amin(a), a[0])
assert_equal(np.amax(a), a[0])
class TestNewaxis(TestCase):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
class TestClip(TestCase):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
def _clip_type(self, type_group, array_max,
clip_min, clip_max, inplace=False,
expected_min=None, expected_max=None):
if expected_min is None:
expected_min = clip_min
if expected_max is None:
expected_max = clip_max
for T in np.sctypes[type_group]:
if sys.byteorder == 'little':
byte_orders = ['=', '>']
else:
byte_orders = ['<', '=']
for byteorder in byte_orders:
dtype = np.dtype(T).newbyteorder(byteorder)
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
x.clip(clip_min, clip_max, x)
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
if x.dtype.byteorder == '|':
byteorder = '|'
assert_equal(x.dtype.byteorder, byteorder)
self._check_range(x, expected_min, expected_max)
return x
def test_basic(self):
for inplace in [False, True]:
self._clip_type(
'float', 1024, -12.8, 100.2, inplace=inplace)
self._clip_type(
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
'int', 1024, -120, 100.5, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, 0, 0, inplace=inplace)
self._clip_type(
'uint', 1024, -120, 100, inplace=inplace, expected_min=0)
def test_record_array(self):
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '<f8'), ('z', '<f8')])
y = rec['x'].clip(-0.3, 0.5)
self._check_range(y, -0.3, 0.5)
def test_max_or_min(self):
val = np.array([0, 1, 2, 3, 4, 5, 6, 7])
x = val.clip(3)
assert_(np.all(x >= 3))
x = val.clip(min=3)
assert_(np.all(x >= 3))
x = val.clip(max=4)
assert_(np.all(x <= 4))
class TestPutmask(object):
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_(np.all(x[mask] == T(val)))
assert_(x.dtype == T)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(1000)*100
mask = x < 40
for val in [-100, 0, 15]:
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T), T, mask, val
def test_mask_size(self):
assert_raises(ValueError, np.putmask, np.array([1, 2, 3]), [True], 5)
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
np.putmask(x, [True, False, True], -1)
assert_array_equal(x, [-1, 2, -1])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
np.putmask(rec['x'], [True, False], 10)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [2, 4])
assert_array_equal(rec['z'], [3, 3])
np.putmask(rec['y'], [True, False], 11)
assert_array_equal(rec['x'], [10, 5])
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
def test_masked_array(self):
## x = np.array([1,2,3])
## z = np.ma.array(x,mask=[True,False,False])
## np.putmask(z,[True,True,True],3)
pass
class TestTake(object):
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
def test_ip_types(self):
unchecked_types = [str, unicode, np.void, object]
x = np.random.random(24)*100
x.shape = 2, 3, 4
for types in np.sctypes.values():
for T in types:
if T not in unchecked_types:
yield self.tst_basic, x.copy().astype(T)
def test_raise(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_raises(IndexError, x.take, [0, 1, 2], axis=0)
assert_raises(IndexError, x.take, [-3], axis=0)
assert_array_equal(x.take([-1], axis=0)[0], x[1])
def test_clip(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='clip')[0], x[0])
assert_array_equal(x.take([2], axis=0, mode='clip')[0], x[1])
def test_wrap(self):
x = np.random.random(24)*100
x.shape = 2, 3, 4
assert_array_equal(x.take([-1], axis=0, mode='wrap')[0], x[1])
assert_array_equal(x.take([2], axis=0, mode='wrap')[0], x[0])
assert_array_equal(x.take([3], axis=0, mode='wrap')[0], x[1])
def tst_byteorder(self, dtype):
x = np.array([1, 2, 3], dtype)
assert_array_equal(x.take([0, 2, 1]), [1, 3, 2])
def test_ip_byteorder(self):
for dtype in ('>i4', '<i4'):
yield self.tst_byteorder, dtype
def test_record_array(self):
# Note mixed byteorder.
rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)],
dtype=[('x', '<f8'), ('y', '>f8'), ('z', '<f8')])
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
class TestLexsort(TestCase):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
x = np.vstack((b, a))
idx = np.lexsort(x)
assert_array_equal(idx, expected_idx)
assert_array_equal(x[1][idx], np.sort(x[1]))
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
b = np.array([2,1,0], dtype='datetime64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
a = np.array([0,0,0], dtype='timedelta64[D]')
b = np.array([2,1,0], dtype='timedelta64[D]')
idx = np.lexsort((b, a))
expected_idx = np.array([2, 1, 0])
assert_array_equal(idx, expected_idx)
class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
def setUp(self):
shape = (2, 4, 3)
rand = np.random.random
self.x = rand(shape) + rand(shape).astype(np.complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
assert_array_equal(v, y)
def test_uint64_fromstring(self):
d = np.fromstring("9923372036854775807 104783749223640",
dtype=np.uint64, sep=' ')
e = np.array([9923372036854775807, 104783749223640], dtype=np.uint64)
assert_array_equal(d, e)
def test_int64_fromstring(self):
d = np.fromstring("-25041670086757 104783749223640",
dtype=np.int64, sep=' ')
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
def test_empty_files_binary(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename)
assert_(y.size == 0, "Array not empty")
def test_empty_files_text(self):
f = open(self.filename, 'w')
f.close()
y = np.fromfile(self.filename, sep=" ")
assert_(y.size == 0, "Array not empty")
def test_roundtrip_file(self):
f = open(self.filename, 'wb')
self.x.tofile(f)
f.close()
# NB. doesn't work with flush+seek, due to use of C stdio
f = open(self.filename, 'rb')
y = np.fromfile(f, dtype=self.dtype)
f.close()
assert_array_equal(y, self.x.flat)
def test_roundtrip_filename(self):
self.x.tofile(self.filename)
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
s = self.x.tobytes('F')
y = np.fromstring(s, dtype=self.dtype)
assert_array_equal(y, self.x.flatten('F'))
def test_roundtrip_str(self):
x = self.x.real.ravel()
s = "@".join(map(str, x))
y = np.fromstring(s, sep="@")
# NB. str imbues less precision
nan_mask = ~np.isfinite(x)
assert_array_equal(x[nan_mask], y[nan_mask])
assert_array_almost_equal(x[~nan_mask], y[~nan_mask], decimal=5)
def test_roundtrip_repr(self):
x = self.x.real.ravel()
s = "@".join(map(repr, x))
y = np.fromstring(s, sep="@")
assert_array_equal(x, y)
def test_file_position_after_fromfile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.close()
for mode in ['rb', 'r+b']:
err_msg = "%d %s" % (size, mode)
f = open(self.filename, mode)
f.read(2)
np.fromfile(f, dtype=np.float64, count=1)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def test_file_position_after_tofile(self):
# gh-4118
sizes = [io.DEFAULT_BUFFER_SIZE//8,
io.DEFAULT_BUFFER_SIZE,
io.DEFAULT_BUFFER_SIZE*8]
for size in sizes:
err_msg = "%d" % (size,)
f = open(self.filename, 'wb')
f.seek(size-1)
f.write(b'\0')
f.seek(10)
f.write(b'12')
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10 + 2 + 8, err_msg=err_msg)
f = open(self.filename, 'r+b')
f.read(2)
f.seek(0, 1) # seek between read&write required by ANSI C
np.array([0], dtype=np.float64).tofile(f)
pos = f.tell()
f.close()
assert_equal(pos, 10, err_msg=err_msg)
def _check_from(self, s, value, **kw):
y = np.fromstring(asbytes(s), **kw)
assert_array_equal(y, value)
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, **kw)
assert_array_equal(y, value)
def test_nan(self):
self._check_from(
"nan +nan -nan NaN nan(foo) +NaN(BAR) -NAN(q_u_u_x_)",
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
sep=' ')
def test_inf(self):
self._check_from(
"inf +inf -inf infinity -Infinity iNfInItY -inF",
[np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf, -np.inf],
sep=' ')
def test_numbers(self):
self._check_from("1.234 -1.234 .3 .3e55 -123133.1231e+133",
[1.234, -1.234, .3, .3e55, -123133.1231e+133], sep=' ')
def test_binary(self):
self._check_from('\x00\x00\x80?\x00\x00\x00@\x00\x00@@\x00\x00\x80@',
np.array([1, 2, 3, 4]),
dtype='<f4')
@dec.slow # takes > 1 minute on mechanical hard drive
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], sep=',')
def test_counted_string(self):
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=4, sep=',')
self._check_from('1,2,3,4', [1., 2., 3.], count=3, sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], count=-1, sep=',')
def test_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3, 4], dtype=int, sep=' ')
def test_counted_string_with_ws(self):
self._check_from('1 2 3 4 ', [1, 2, 3], count=3, dtype=int,
sep=' ')
def test_ascii(self):
self._check_from('1 , 2 , 3 , 4', [1., 2., 3., 4.], sep=',')
self._check_from('1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
self._check_from('1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from('1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
def test_dtype(self):
v = np.array([1, 2, 3, 4], dtype=np.int_)
self._check_from('1,2,3,4', v, sep=',', dtype=np.int_)
def test_dtype_bool(self):
# can't use _check_from because fromstring can't handle True/False
v = np.array([True, False, True, False], dtype=np.bool_)
s = '1,0,-2.3,0'
f = open(self.filename, 'wb')
f.write(asbytes(s))
f.close()
y = np.fromfile(self.filename, sep=',', dtype=np.bool_)
assert_(y.dtype == '?')
assert_array_equal(y, v)
def test_tofile_sep(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.0,3.51,4.0')
def test_tofile_format(self):
x = np.array([1.51, 2, 3.51, 4], dtype=float)
f = open(self.filename, 'w')
x.tofile(f, sep=',', format='%.2f')
f.close()
f = open(self.filename, 'r')
s = f.read()
f.close()
assert_equal(s, '1.51,2.00,3.51,4.00')
def test_locale(self):
in_foreign_locale(self.test_numbers)()
in_foreign_locale(self.test_nan)()
in_foreign_locale(self.test_inf)()
in_foreign_locale(self.test_counted_string)()
in_foreign_locale(self.test_ascii)()
in_foreign_locale(self.test_malformed)()
in_foreign_locale(self.test_tofile_sep)()
in_foreign_locale(self.test_tofile_format)()
class TestFromBuffer(object):
def tst_basic(self, buffer, expected, kwargs):
assert_array_equal(np.frombuffer(buffer,**kwargs), expected)
def test_ip_basic(self):
for byteorder in ['<', '>']:
for dtype in [float, int, np.complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
yield self.tst_basic, buf, x.flat, {'dtype':dt}
def test_empty(self):
yield self.tst_basic, asbytes(''), np.array([]), {}
class TestFlat(TestCase):
def setUp(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
a.flags.writeable = False
self.a = a
self.b = a[::2, ::2]
self.a0 = a0
self.b0 = a0[::2, ::2]
def test_contiguous(self):
testpassed = False
try:
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.a.flat[12] == 12.0
def test_discontiguous(self):
testpassed = False
try:
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
assert testpassed
assert self.b.flat[4] == 12.0
def test___array__(self):
c = self.a.flat.__array__()
d = self.b.flat.__array__()
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
assert c.flags.writeable is False
assert d.flags.writeable is False
assert e.flags.writeable is True
assert f.flags.writeable is True
assert c.flags.updateifcopy is False
assert d.flags.updateifcopy is False
assert e.flags.updateifcopy is False
assert f.flags.updateifcopy is True
assert f.base is self.b0
class TestResize(TestCase):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
x.resize((5, 5))
assert_array_equal(x.flat[:9],
np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).flat)
assert_array_equal(x[9:].flat, 0)
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
self.assertRaises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
x = np.eye(3)
x.resize(3)
assert_array_equal(x, np.eye(3)[0,:])
def test_none_shape(self):
x = np.eye(3)
x.resize(None)
assert_array_equal(x, np.eye(3))
x.resize()
assert_array_equal(x, np.eye(3))
def test_invalid_arguements(self):
self.assertRaises(TypeError, np.eye(3).resize, 'hi')
self.assertRaises(ValueError, np.eye(3).resize, -1)
self.assertRaises(TypeError, np.eye(3).resize, order=1)
self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
def test_zeros_appended(self):
x = np.eye(3)
x.resize(2, 3, 3)
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
a.resize(15,)
assert_equal(a.shape, (15,))
assert_array_equal(a['k'][-5:], 0)
assert_array_equal(a['k'][:-5], 1)
class TestRecord(TestCase):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
assert_equal(dt.names, ['p', 'q'])
if sys.version_info[0] >= 3:
def test_bytes_fields(self):
# Bytes are not allowed in field names and not recognized in titles
# on Py3
assert_raises(TypeError, np.dtype, [(asbytes('a'), int)])
assert_raises(TypeError, np.dtype, [(('b', asbytes('a')), int)])
dt = np.dtype([((asbytes('a'), 'b'), int)])
assert_raises(ValueError, dt.__getitem__, asbytes('a'))
x = np.array([(1,), (2,), (3,)], dtype=dt)
assert_raises(IndexError, x.__getitem__, asbytes('a'))
y = x[0]
assert_raises(IndexError, y.__getitem__, asbytes('a'))
else:
def test_unicode_field_titles(self):
# Unicode field titles are added to field dict on Py2
title = unicode('b')
dt = np.dtype([((title, 'a'), int)])
dt[title]
dt['a']
x = np.array([(1,), (2,), (3,)], dtype=dt)
x[title]
x['a']
y = x[0]
y[title]
y['a']
def test_unicode_field_names(self):
# Unicode field names are not allowed on Py2
title = unicode('b')
assert_raises(TypeError, np.dtype, [(title, int)])
assert_raises(TypeError, np.dtype, [(('a', title), int)])
def test_field_names(self):
# Test unicode and 8-bit / byte strings can be used
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
is_py3 = sys.version_info[0] >= 3
if is_py3:
funcs = (str,)
# byte string indexing fails gracefully
assert_raises(IndexError, a.__setitem__, asbytes('f1'), 1)
assert_raises(IndexError, a.__getitem__, asbytes('f1'))
assert_raises(IndexError, a['f1'].__setitem__, asbytes('sf1'), 1)
assert_raises(IndexError, a['f1'].__getitem__, asbytes('sf1'))
else:
funcs = (str, unicode)
for func in funcs:
b = a.copy()
fn1 = func('f1')
b[fn1] = 1
assert_equal(b[fn1], 1)
fnn = func('not at all')
assert_raises(ValueError, b.__setitem__, fnn, 1)
assert_raises(ValueError, b.__getitem__, fnn)
b[0][fn1] = 2
assert_equal(b[fn1], 2)
# Subfield
assert_raises(IndexError, b[0].__setitem__, fnn, 1)
assert_raises(IndexError, b[0].__getitem__, fnn)
# Subfield
fn3 = func('f3')
sfn1 = func('sf1')
b[fn3][sfn1] = 1
assert_equal(b[fn3][sfn1], 1)
assert_raises(ValueError, b[fn3].__setitem__, fnn, 1)
assert_raises(ValueError, b[fn3].__getitem__, fnn)
# multiple Subfields
fn2 = func('f2')
b[fn2] = 3
assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# view of subfield view/copy
assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(), (2, 3))
assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(), (3, 2))
view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
'raises segfault on python 2.x')
else:
assert_raises(ValueError, a.__setitem__, sixu('\u03e0'), 1)
assert_raises(ValueError, a.__getitem__, sixu('\u03e0'))
def test_field_names_deprecation(self):
def collect_warnings(f, *args, **kwargs):
with warnings.catch_warnings(record=True) as log:
warnings.simplefilter("always")
f(*args, **kwargs)
return [w.category for w in log]
a = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
a['f1'][0] = 1
a['f2'][0] = 2
a['f3'][0] = (3,)
b = np.zeros((1,), dtype=[('f1', 'i4'),
('f2', 'i4'),
('f3', [('sf1', 'i4')])])
b['f1'][0] = 1
b['f2'][0] = 2
b['f3'][0] = (3,)
# All the different functions raise a warning, but not an error, and
# 'a' is not modified:
assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
[FutureWarning])
assert_equal(a, b)
# Views also warn
subset = a[['f1', 'f2']]
subset_view = subset.view()
assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
[FutureWarning])
# But the write goes through:
assert_equal(subset['f1'][0], 10)
# Only one warning per multiple field indexing, though (even if there
# are multiple views involved):
assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
b = np.array([(1, 2), (3, 4)], dtype=[('num1', 'i1'), ('num2', 'i2')])
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
self.assertTrue(hash(a[0]) == hash(a[1]))
self.assertTrue(hash(a[0]) == hash(b[0]))
self.assertTrue(hash(a[0]) != hash(b[1]))
self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
self.assertRaises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
np.array([()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
class TestView(TestCase):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
('b', np.int8), ('a', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i4')
# ... and again without the keyword.
z = x.view('<i4')
assert_array_equal(y, z)
assert_array_equal(y, [67305985, 134678021])
def _mean(a, **args):
return a.mean(**args)
def _var(a, **args):
return a.var(**args)
def _std(a, **args):
return a.std(**args)
class TestStats(TestCase):
funcs = [_mean, _var, _std]
def setUp(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
self.omat = np.array([Decimal(repr(r)) for r in self.rmat.flat])
self.omat = self.omat.reshape(4, 5)
def test_keepdims(self):
mat = np.eye(3)
for f in self.funcs:
for axis in [0, 1]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.ndim == mat.ndim)
assert_(res.shape[axis] == 1)
for axis in [None]:
res = f(mat, axis=axis, keepdims=True)
assert_(res.shape == (1, 1))
def test_out(self):
mat = np.eye(3)
for f in self.funcs:
out = np.zeros(3)
tgt = f(mat, axis=1)
res = f(mat, axis=1, out=out)
assert_almost_equal(res, out)
assert_almost_equal(res, tgt)
out = np.empty(2)
assert_raises(ValueError, f, mat, axis=1, out=out)
out = np.empty((2, 2))
assert_raises(ValueError, f, mat, axis=1, out=out)
def test_dtype_from_input(self):
icodes = np.typecodes['AllInteger']
fcodes = np.typecodes['AllFloat']
# object type
for f in self.funcs:
mat = np.array([[Decimal(1)]*3]*3)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = type(f(mat, axis=None))
assert_(res is Decimal)
# integer types
for f in self.funcs:
for c in icodes:
mat = np.eye(3, dtype=c)
tgt = np.float64
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# mean for float types
for f in [_mean]:
for c in fcodes:
mat = np.eye(3, dtype=c)
tgt = mat.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
# var, std for float types
for f in [_var, _std]:
for c in fcodes:
mat = np.eye(3, dtype=c)
# deal with complex types
tgt = mat.real.dtype.type
res = f(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
# stats for integer types
# FIXME:
# this needs definition as there are lots places along the line
# where type casting may take place.
#for f in self.funcs:
# for c in np.typecodes['AllInteger']:
# tgt = np.dtype(c).type
# res = f(mat, axis=1, dtype=c).dtype.type
# assert_(res is tgt)
# # scalar case
# res = f(mat, axis=None, dtype=c).dtype.type
# assert_(res is tgt)
# stats for float types
for f in self.funcs:
for c in np.typecodes['AllFloat']:
tgt = np.dtype(c).type
res = f(mat, axis=1, dtype=c).dtype.type
assert_(res is tgt)
# scalar case
res = f(mat, axis=None, dtype=c).dtype.type
assert_(res is tgt)
def test_ddof(self):
for f in [_var]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * dim
res = f(self.rmat, axis=1, ddof=ddof) * (dim - ddof)
for f in [_std]:
for ddof in range(3):
dim = self.rmat.shape[1]
tgt = f(self.rmat, axis=1) * np.sqrt(dim)
res = f(self.rmat, axis=1, ddof=ddof) * np.sqrt(dim - ddof)
assert_almost_equal(res, tgt)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
dim = self.rmat.shape[1]
for f in [_var, _std]:
for ddof in range(dim, dim + 2):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(self.rmat, axis=1, ddof=ddof)
assert_(not (res < 0).any())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
A = np.zeros((0, 3))
for f in self.funcs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(A, axis=axis)).all())
assert_(len(w) > 0)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(A, axis=axis), np.zeros([]))
def test_mean_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * mat.shape[axis]
assert_almost_equal(res, tgt)
for axis in [None]:
tgt = mat.sum(axis=axis)
res = _mean(mat, axis=axis) * np.prod(mat.shape)
assert_almost_equal(res, tgt)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
msqr = _mean(mat * mat.conj(), axis=axis)
mean = _mean(mat, axis=axis)
tgt = msqr - mean * mean.conjugate()
res = _var(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_std_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
for axis in [0, 1, None]:
tgt = np.sqrt(_var(mat, axis=axis))
res = _std(mat, axis=axis)
assert_almost_equal(res, tgt)
def test_subclass(self):
class TestArray(np.ndarray):
def __new__(cls, data, info):
result = np.array(data)
result = result.view(cls)
result.info = info
return result
def __array_finalize__(self, obj):
self.info = getattr(obj, "info", '')
dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')
res = dat.mean(1)
assert_(res.info == dat.info)
res = dat.std(1)
assert_(res.info == dat.info)
res = dat.var(1)
assert_(res.info == dat.info)
class TestVdot(TestCase):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
# test real
a = np.eye(3)
for dt in dt_numeric + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test complex
a = np.eye(3) * 1j
for dt in dt_complex + 'O':
b = a.astype(dt)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), 3)
# test boolean
b = np.eye(3, dtype=np.bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
def test_vdot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.vdot(a, a)
# integer arrays are exact
assert_equal(np.vdot(a, b), res)
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
class TestDot(TestCase):
def setUp(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
self.b2 = np.random.rand(2)
self.b3 = np.random.rand(1, 2)
self.b4 = np.random.rand(4)
self.N = 7
def test_dotmatmat(self):
A = self.A
res = np.dot(A.transpose(), A)
tgt = np.array([[1.45046013, 0.86323640],
[0.86323640, 0.84934569]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec(self):
A, b1 = self.A, self.b1
res = np.dot(A, b1)
tgt = np.array([[0.32114320], [0.04889721],
[0.15696029], [0.33612621]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotmatvec2(self):
A, b2 = self.A, self.b2
res = np.dot(A, b2)
tgt = np.array([0.29677940, 0.04518649, 0.14468333, 0.31039293])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat(self):
A, b4 = self.A, self.b4
res = np.dot(b4, A)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat2(self):
b3, A = self.b3, self.A
res = np.dot(b3, A.transpose())
tgt = np.array([[0.58793804, 0.08957460, 0.30605758, 0.62716383]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecmat3(self):
A, b4 = self.A, self.b4
res = np.dot(A.transpose(), b4)
tgt = np.array([1.23495091, 1.12222648])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecouter(self):
b1, b3 = self.b1, self.b3
res = np.dot(b1, b3)
tgt = np.array([[0.20128610, 0.08400440], [0.07190947, 0.03001058]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecvecinner(self):
b1, b3 = self.b1, self.b3
res = np.dot(b3, b1)
tgt = np.array([[ 0.23129668]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect1(self):
b1 = np.ones((3, 1))
b2 = [5.3]
res = np.dot(b1, b2)
tgt = np.array([5.3, 5.3, 5.3])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotcolumnvect2(self):
b1 = np.ones((3, 1)).transpose()
b2 = [6.2]
res = np.dot(b2, b1)
tgt = np.array([6.2, 6.2, 6.2])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar(self):
np.random.seed(100)
b1 = np.random.rand(1, 1)
b2 = np.random.rand(1, 4)
res = np.dot(b1, b2)
tgt = np.array([[0.15126730, 0.23068496, 0.45905553, 0.00256425]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_dotvecscalar2(self):
np.random.seed(100)
b1 = np.random.rand(4, 1)
b2 = np.random.rand(1, 1)
res = np.dot(b1, b2)
tgt = np.array([[0.00256425],[0.00131359],[0.00200324],[ 0.00398638]])
assert_almost_equal(res, tgt, decimal=self.N)
def test_all(self):
dims = [(), (1,), (1, 1)]
dout = [(), (1,), (1, 1), (1,), (), (1,), (1, 1), (1,), (1, 1)]
for dim, (dim1, dim2) in zip(dout, itertools.product(dims, dims)):
b1 = np.zeros(dim1)
b2 = np.zeros(dim2)
res = np.dot(b1, b2)
tgt = np.zeros(dim)
assert_(res.shape == tgt.shape)
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
class Vec(object):
def __init__(self, sequence=None):
if sequence is None:
sequence = []
self.array = np.array(sequence)
def __add__(self, other):
out = Vec()
out.array = self.array + other.array
return out
def __sub__(self, other):
out = Vec()
out.array = self.array - other.array
return out
def __mul__(self, other): # with scalar
out = Vec(self.array.copy())
out.array *= other
return out
def __rmul__(self, other):
return self*other
U_non_cont = np.transpose([[1., 1.], [1., 2.]])
U_cont = np.ascontiguousarray(U_non_cont)
x = np.array([Vec([1., 0.]), Vec([0., 1.])])
zeros = np.array([Vec([0., 0.]), Vec([0., 0.])])
zeros_test = np.dot(U_cont, x) - np.dot(U_non_cont, x)
assert_equal(zeros[0].array, zeros_test[0].array)
assert_equal(zeros[1].array, zeros_test[1].array)
def test_dot_2args(self):
from numpy.core.multiarray import dot
a = np.array([[1, 2], [3, 4]], dtype=float)
b = np.array([[1, 0], [1, 1]], dtype=float)
c = np.array([[3, 2], [7, 4]], dtype=float)
d = dot(a, b)
assert_allclose(c, d)
def test_dot_3args(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 32))
for i in range(12):
dot(f, v, r)
assert_equal(sys.getrefcount(r), 2)
r2 = dot(f, v, out=None)
assert_array_equal(r2, r)
assert_(r is dot(f, v, out=r))
v = v[:, 0].copy() # v.shape == (16,)
r = r[:, 0].copy() # r.shape == (1024,)
r2 = dot(f, v)
assert_(r is dot(f, v, r))
assert_array_equal(r2, r)
def test_dot_3args_errors(self):
from numpy.core.multiarray import dot
np.random.seed(22)
f = np.random.random_sample((1024, 16))
v = np.random.random_sample((16, 32))
r = np.empty((1024, 31))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32,))
assert_raises(ValueError, dot, f, v, r)
r = np.empty((32, 1024))
assert_raises(ValueError, dot, f, v, r)
assert_raises(ValueError, dot, f, v, r.T)
r = np.empty((1024, 64))
assert_raises(ValueError, dot, f, v, r[:, ::2])
assert_raises(ValueError, dot, f, v, r[:, :32])
r = np.empty((1024, 32), dtype=np.float32)
assert_raises(ValueError, dot, f, v, r)
r = np.empty((1024, 32), dtype=int)
assert_raises(ValueError, dot, f, v, r)
def test_dot_array_order(self):
a = np.array([[1, 2], [3, 4]], order='C')
b = np.array([[1, 2], [3, 4]], order='F')
res = np.dot(a, a)
# integer arrays are exact
assert_equal(np.dot(a, b), res)
assert_equal(np.dot(b, a), res)
assert_equal(np.dot(b, b), res)
def test_dot_scalar_and_matrix_of_objects(self):
# Ticket #2469
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
def test_dot_override(self):
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A()
b = B()
c = np.array([[1]])
assert_equal(np.dot(a, b), "A")
assert_equal(c.dot(a), "A")
assert_raises(TypeError, np.dot, b, c)
assert_raises(TypeError, c.dot, b)
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
d = dtype(0)
N = np.prod(shape)
tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
address = tmp.__array_interface__["data"][0]
for offset in range(align):
if (address + offset) % align == 0:
break
tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
return tmp.reshape(shape, order=order)
def as_aligned(arr, align, dtype, order='C'):
aligned = aligned_array(arr.shape, align, dtype, order)
aligned[:] = arr[:]
return aligned
def assert_dot_close(A, X, desired):
assert_allclose(np.dot(A, X), desired, rtol=1e-5, atol=1e-7)
m = aligned_array(100, 15, np.float32)
s = aligned_array((100, 100), 15, np.float32)
np.dot(s, m) # this will always segfault if the bug is present
testdata = itertools.product((15,32), (10000,), (200,89), ('C','F'))
for align, m, n, a_order in testdata:
# Calculation in double precision
A_d = np.random.rand(m, n)
X_d = np.random.rand(n)
desired = np.dot(A_d, X_d)
# Calculation with aligned single precision
A_f = as_aligned(A_d, align, np.float32, order=a_order)
X_f = as_aligned(X_d, align, np.float32)
assert_dot_close(A_f, X_f, desired)
# Strided A rows
A_d_2 = A_d[::2]
desired = np.dot(A_d_2, X_d)
A_f_2 = A_f[::2]
assert_dot_close(A_f_2, X_f, desired)
# Strided A columns, strided X vector
A_d_22 = A_d_2[:, ::2]
X_d_2 = X_d[::2]
desired = np.dot(A_d_22, X_d_2)
A_f_22 = A_f_2[:, ::2]
X_f_2 = X_f[::2]
assert_dot_close(A_f_22, X_f_2, desired)
# Check the strides are as expected
if a_order == 'F':
assert_equal(A_f_22.strides, (8, 8 * m))
else:
assert_equal(A_f_22.strides, (8 * n, 8))
assert_equal(X_f_2.strides, (8,))
# Strides in A rows + cols only
X_f_2c = as_aligned(X_f_2, align, np.float32)
assert_dot_close(A_f_22, X_f_2c, desired)
# Strides just in A cols
A_d_12 = A_d[:, ::2]
desired = np.dot(A_d_12, X_d_2)
A_f_12 = A_f[:, ::2]
assert_dot_close(A_f_12, X_f_2c, desired)
# Strides in A cols and X
assert_dot_close(A_f_12, X_f_2, desired)
class MatmulCommon():
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
"""
# Should work with these types. Will want to add
# "O" at some point
types = "?bhilqBHILQefdgFDG"
def test_exceptions(self):
dims = [
((1,), (2,)), # mismatched vector vector
((2, 1,), (2,)), # mismatched matrix vector
((2,), (1, 2)), # mismatched vector matrix
((1, 2), (3, 1)), # mismatched matrix matrix
((1,), ()), # vector scalar
((), (1)), # scalar vector
((1, 1), ()), # matrix scalar
((), (1, 1)), # scalar matrix
((2, 2, 1), (3, 1, 2)), # cannot broadcast
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
assert_raises(ValueError, self.matmul, a, b)
def test_shapes(self):
dims = [
((1, 1), (2, 1, 1)), # broadcast first argument
((2, 1, 1), (1, 1)), # broadcast second argument
((2, 1, 1), (2, 1, 1)), # matrix stack sizes match
]
for dt, (dm1, dm2) in itertools.product(self.types, dims):
a = np.ones(dm1, dtype=dt)
b = np.ones(dm2, dtype=dt)
res = self.matmul(a, b)
assert_(res.shape == (2, 1, 1))
# vector vector returns scalars.
for dt in self.types:
a = np.ones((2,), dtype=dt)
b = np.ones((2,), dtype=dt)
c = self.matmul(a, b)
assert_(np.array(c).shape == ())
def test_result_types(self):
mat = np.ones((1,1))
vec = np.ones((1,))
for dt in self.types:
m = mat.astype(dt)
v = vec.astype(dt)
for arg in [(m, v), (v, m), (m, m)]:
res = self.matmul(*arg)
assert_(res.dtype == dt)
# vector vector returns scalars
res = self.matmul(v, v)
assert_(type(res) is np.dtype(dt).type)
def test_vector_vector_values(self):
vec = np.array([1, 2])
tgt = 5
for dt in self.types[1:]:
v1 = vec.astype(dt)
res = self.matmul(v1, v1)
assert_equal(res, tgt)
# boolean type
vec = np.array([True, True], dtype='?')
res = self.matmul(vec, vec)
assert_equal(res, True)
def test_vector_matrix_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([7, 10])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(v, m1)
assert_equal(res, tgt1)
res = self.matmul(v, m2)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_vector_values(self):
vec = np.array([1, 2])
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([5, 11])
tgt2 = np.stack([tgt1]*2, axis=0)
for dt in self.types[1:]:
v = vec.astype(dt)
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
res = self.matmul(m1, v)
assert_equal(res, tgt1)
res = self.matmul(m2, v)
assert_equal(res, tgt2)
# boolean type
vec = np.array([True, False])
mat1 = np.array([[True, False], [False, True]])
mat2 = np.stack([mat1]*2, axis=0)
tgt1 = np.array([True, False])
tgt2 = np.stack([tgt1]*2, axis=0)
res = self.matmul(vec, mat1)
assert_equal(res, tgt1)
res = self.matmul(vec, mat2)
assert_equal(res, tgt2)
def test_matrix_matrix_values(self):
mat1 = np.array([[1, 2], [3, 4]])
mat2 = np.array([[1, 0], [1, 1]])
mat12 = np.stack([mat1, mat2], axis=0)
mat21 = np.stack([mat2, mat1], axis=0)
tgt11 = np.array([[7, 10], [15, 22]])
tgt12 = np.array([[3, 2], [7, 4]])
tgt21 = np.array([[1, 2], [4, 6]])
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
for dt in self.types[1:]:
m1 = mat1.astype(dt)
m2 = mat2.astype(dt)
m12 = mat12.astype(dt)
m21 = mat21.astype(dt)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
# boolean type
m1 = np.array([[1, 1], [0, 0]], dtype=np.bool_)
m2 = np.array([[1, 0], [1, 1]], dtype=np.bool_)
m12 = np.stack([m1, m2], axis=0)
m21 = np.stack([m2, m1], axis=0)
tgt11 = m1
tgt12 = m1
tgt21 = np.array([[1, 1], [1, 1]], dtype=np.bool_)
tgt12_21 = np.stack([tgt12, tgt21], axis=0)
tgt11_12 = np.stack((tgt11, tgt12), axis=0)
tgt11_21 = np.stack((tgt11, tgt21), axis=0)
# matrix @ matrix
res = self.matmul(m1, m2)
assert_equal(res, tgt12)
res = self.matmul(m2, m1)
assert_equal(res, tgt21)
# stacked @ matrix
res = self.matmul(m12, m1)
assert_equal(res, tgt11_21)
# matrix @ stacked
res = self.matmul(m1, m12)
assert_equal(res, tgt11_12)
# stacked @ stacked
res = self.matmul(m12, m21)
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
class B(np.ndarray):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return NotImplemented
a = A([1, 2])
b = B([1, 2])
c = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
assert_raises(TypeError, self.matmul, b, c)
class TestMatmul(MatmulCommon, TestCase):
matmul = np.matmul
def test_out_arg(self):
a = np.ones((2, 2), dtype=np.float)
b = np.ones((2, 2), dtype=np.float)
tgt = np.full((2,2), 2, dtype=np.float)
# test as positional argument
msg = "out positional argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((2, 2), dtype=np.float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
# einsum and cblas raise different error types, so
# use Exception.
msg = "out argument with illegal cast"
out = np.zeros((2, 2), dtype=np.int32)
assert_raises(Exception, self.matmul, a, b, out=out)
# skip following tests for now, cblas does not allow non-contiguous
# outputs and consistency with dot would require same type,
# dimensions, subtype, and c_contiguous.
# test out with allowed type cast
# msg = "out argument with allowed cast"
# out = np.zeros((2, 2), dtype=np.complex128)
# self.matmul(a, b, out=out)
# assert_array_equal(out, tgt, err_msg=msg)
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
# c = np.zeros((2, 2, 2), dtype=np.float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
class TestMatmulOperator(MatmulCommon, TestCase):
import operator
matmul = operator.matmul
def test_array_priority_override(self):
class A(object):
__array_priority__ = 1000
def __matmul__(self, other):
return "A"
def __rmatmul__(self, other):
return "A"
a = A()
b = np.ones(2)
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
# and nudge people to writing "a = a @ b".
a = np.eye(3)
b = np.eye(3)
assert_raises(TypeError, a.__imatmul__, b)
import operator
assert_raises(TypeError, operator.imatmul, a, b)
# we avoid writing the token `exec` so as not to crash python 2's
# parser
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
class TestInner(TestCase):
def test_inner_scalar_and_matrix_of_objects(self):
# Ticket #4482
arr = np.matrix([1, 2], dtype=object)
desired = np.matrix([[3, 6]], dtype=object)
assert_equal(np.inner(arr, 3), desired)
assert_equal(np.inner(3, arr), desired)
def test_vecself(self):
# Ticket 844.
# Inner product of a vector with itself segfaults or give
# meaningless result
a = np.zeros(shape=(1, 80), dtype=np.float64)
p = np.inner(a, a)
assert_almost_equal(p, 0, decimal=14)
class TestSummarization(TestCase):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
assert_(str(A) == strA)
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
assert_(repr(A) == reprA)
def test_2d(self):
A = np.arange(1002).reshape(2, 501)
strA = '[[ 0 1 2 ..., 498 499 500]\n' \
' [ 501 502 503 ..., 999 1000 1001]]'
assert_(str(A) == strA)
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
assert_(repr(A) == reprA)
class TestChoose(TestCase):
def setUp(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
self.y2 = 3*np.ones((2, 3), dtype=int)
self.ind = [0, 0, 1]
def test_basic(self):
A = np.choose(self.ind, (self.x, self.y))
assert_equal(A, [2, 2, 3])
def test_broadcast1(self):
A = np.choose(self.ind, (self.x2, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
def test_broadcast2(self):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
# TODO: test for multidimensional
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
class TestNeighborhoodIter(TestCase):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4,
NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple2d(self):
self._test_simple2d(np.float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
def _test_mirror2d(self, dt):
x = np.array([[0, 1], [2, 3]], dtype=dt)
r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0],
NEIGH_MODE['mirror'])
assert_array_equal(l, r)
def test_mirror2d(self):
self._test_mirror2d(np.float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
# Simple, 1d tests
def _test_simple(self, dt):
# Test padding with constant values
x = np.linspace(1, 5, 5).astype(dt)
r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
assert_array_equal(l, r)
r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
assert_array_equal(l, r)
def test_simple_float(self):
self._test_simple(np.float)
def test_simple_object(self):
self._test_simple(Decimal)
# Test mirror modes
def _test_mirror(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
self.assertTrue([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
self._test_mirror(np.float)
def test_mirror_object(self):
self._test_mirror(Decimal)
# Circular mode
def _test_circular(self, dt):
x = np.linspace(1, 5, 5).astype(dt)
r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
def test_circular(self):
self._test_circular(np.float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
class TestStackedNeighborhoodIter(TestCase):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
# Test zero and one padding for simple data type
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0], dtype=dt),
np.array([0], dtype=dt),
np.array([1], dtype=dt),
np.array([2], dtype=dt),
np.array([3], dtype=dt),
np.array([0], dtype=dt),
np.array([0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
[0, 0], NEIGH_MODE['zero'])
assert_array_equal(l, r)
r = [np.array([1, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-1, 1], NEIGH_MODE['one'])
assert_array_equal(l, r)
# 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# mirror padding
def test_simple_mirror(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 1], dtype=dt),
np.array([1, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 3], dtype=dt),
np.array([3, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
# circular padding
def test_simple_circular(self):
dt = np.float64
# Stacking zero on top of mirror
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 3, 1], dtype=dt),
np.array([3, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 1], dtype=dt),
np.array([3, 1, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
[-1, 1], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt),
np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 0], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 2nd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([0, 1, 2], dtype=dt),
np.array([1, 2, 3], dtype=dt),
np.array([2, 3, 0], dtype=dt),
np.array([3, 0, 0], dtype=dt),
np.array([0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[0, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# Stacking mirror on top of zero: 3rd
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([3, 0, 0, 1, 2], dtype=dt),
np.array([0, 0, 1, 2, 3], dtype=dt),
np.array([0, 1, 2, 3, 0], dtype=dt),
np.array([1, 2, 3, 0, 0], dtype=dt),
np.array([2, 3, 0, 0, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
[-2, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
# 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
# being strictly within the array
def test_simple_strict_within(self):
dt = np.float64
# Stacking zero on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 0], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['zero'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 3], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['mirror'])
assert_array_equal(l, r)
# Stacking mirror on top of zero, first neighborhood strictly inside the
# array
x = np.array([1, 2, 3], dtype=dt)
r = [np.array([1, 2, 3, 1], dtype=dt)]
l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
[-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
class TestWarnings(object):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1-2j, 1+2j])
with warnings.catch_warnings():
warnings.simplefilter("error", np.ComplexWarning)
assert_raises(np.ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
class TestMinScalarType(object):
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
wanted = np.dtype('uint8')
assert_equal(wanted, dt)
def test_usigned_short(self):
dt = np.min_scalar_type(2**16-1)
wanted = np.dtype('uint16')
assert_equal(wanted, dt)
def test_usigned_int(self):
dt = np.min_scalar_type(2**32-1)
wanted = np.dtype('uint32')
assert_equal(wanted, dt)
def test_usigned_longlong(self):
dt = np.min_scalar_type(2**63-1)
wanted = np.dtype('uint64')
assert_equal(wanted, dt)
def test_object(self):
dt = np.min_scalar_type(2**64)
wanted = np.dtype('O')
assert_equal(wanted, dt)
if sys.version_info[:2] == (2, 6):
from numpy.core.multiarray import memorysimpleview as memoryview
from numpy.core._internal import _dtype_from_pep3118
class TestPEP3118Dtype(object):
def _check(self, spec, wanted):
dt = np.dtype(wanted)
if isinstance(wanted, list) and isinstance(wanted[-1], tuple):
if wanted[-1][0] == '':
names = list(dt.names)
names[-1] = ''
dt.names = tuple(names)
assert_equal(_dtype_from_pep3118(spec), dt,
err_msg="spec %r != dtype %r" % (spec, wanted))
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@'+s, {'f0': ('i1', 0),
'f1': ('i', align*(1 + j//align))})
self._check('='+s, {'f0': ('i1', 0),
'f1': ('i', 1+j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('ix', [('f0', 'i'), ('', VV(1))])
self._check('ixx', [('f0', 'i'), ('', VV(2))])
self._check('ixxx', [('f0', 'i'), ('', VV(3))])
self._check('ixxxx', [('f0', 'i'), ('', VV(4))])
self._check('i7x', [('f0', 'i'), ('', VV(7))])
self._check('^ix', [('f0', 'i'), ('', 'V1')])
self._check('^ixx', [('f0', 'i'), ('', 'V2')])
self._check('^ixxx', [('f0', 'i'), ('', 'V3')])
self._check('^ixxxx', [('f0', 'i'), ('', 'V4')])
self._check('^i7x', [('f0', 'i'), ('', 'V7')])
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
def VV(n):
return 'V%d' % (align*(1 + (n-1)//align))
self._check('(3)T{ix}', ({'f0': ('i', 0), '': (VV(1), 4)}, (3,)))
class TestNewBufferProtocol(object):
def _check_roundtrip(self, obj):
obj = np.asarray(obj)
x = memoryview(obj)
y = np.asarray(x)
y2 = np.array(x)
assert_(not y.flags.owndata)
assert_(y2.flags.owndata)
assert_equal(y.dtype, obj.dtype)
assert_equal(y.shape, obj.shape)
assert_array_equal(obj, y)
assert_equal(y2.dtype, obj.dtype)
assert_equal(y2.shape, obj.shape)
assert_array_equal(obj, y2)
def test_roundtrip(self):
x = np.array([1, 2, 3, 4, 5], dtype='i4')
self._check_roundtrip(x)
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
self._check_roundtrip(x)
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
self._check_roundtrip(x)
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes('xxx'), True, 1.0)],
dtype=dt)
self._check_roundtrip(x)
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', (int, (2, 2)))])
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i2')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='>i4')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<i4')
self._check_roundtrip(x)
# check long long can be represented as non-native
x = np.array([1, 2, 3], dtype='>q')
self._check_roundtrip(x)
# Native-only data types can be passed through the buffer interface
# only in native byte order
if sys.byteorder == 'little':
x = np.array([1, 2, 3], dtype='>g')
assert_raises(ValueError, self._check_roundtrip, x)
x = np.array([1, 2, 3], dtype='<g')
self._check_roundtrip(x)
else:
x = np.array([1, 2, 3], dtype='>g')
self._check_roundtrip(x)
x = np.array([1, 2, 3], dtype='<g')
assert_raises(ValueError, self._check_roundtrip, x)
def test_roundtrip_half(self):
half_list = [
1.0,
-2.0,
6.5504 * 10**4, # (max half precision)
2**-14, # ~= 6.10352 * 10**-5 (minimum positive normal)
2**-24, # ~= 5.96046 * 10**-8 (minimum strictly positive subnormal)
0.0,
-0.0,
float('+inf'),
float('-inf'),
0.333251953125, # ~= 1/3
]
x = np.array(half_list, dtype='>e')
self._check_roundtrip(x)
x = np.array(half_list, dtype='<e')
self._check_roundtrip(x)
def test_roundtrip_single_types(self):
for typ in np.typeDict.values():
dtype = np.dtype(typ)
if dtype.char in 'Mm':
# datetimes cannot be used in buffers
continue
if dtype.char == 'V':
# skip void
continue
x = np.zeros(4, dtype=dtype)
self._check_roundtrip(x)
if dtype.char not in 'qQgG':
dt = dtype.newbyteorder('<')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
dt = dtype.newbyteorder('>')
x = np.zeros(4, dtype=dt)
self._check_roundtrip(x)
def test_roundtrip_scalar(self):
# Issue #4015.
self._check_roundtrip(0)
def test_export_simple_1d(self):
x = np.array([1, 2, 3, 4, 5], dtype='i')
y = memoryview(x)
assert_equal(y.format, 'i')
assert_equal(y.shape, (5,))
assert_equal(y.ndim, 1)
assert_equal(y.strides, (4,))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_simple_nd(self):
x = np.array([[1, 2], [3, 4]], dtype=np.float64)
y = memoryview(x)
assert_equal(y.format, 'd')
assert_equal(y.shape, (2, 2))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (16, 8))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 8)
def test_export_discontiguous(self):
x = np.zeros((3, 3, 3), dtype=np.float32)[:, 0,:]
y = memoryview(x)
assert_equal(y.format, 'f')
assert_equal(y.shape, (3, 3))
assert_equal(y.ndim, 2)
assert_equal(y.strides, (36, 4))
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 4)
def test_export_record(self):
dt = [('a', 'b'),
('b', 'h'),
('c', 'i'),
('d', 'l'),
('dx', 'q'),
('e', 'B'),
('f', 'H'),
('g', 'I'),
('h', 'L'),
('hx', 'Q'),
('i', np.single),
('j', np.double),
('k', np.longdouble),
('ix', np.csingle),
('jx', np.cdouble),
('kx', np.clongdouble),
('l', 'S4'),
('m', 'U4'),
('n', 'V3'),
('o', '?'),
('p', np.half),
]
x = np.array(
[(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
asbytes('aaaa'), 'bbbb', asbytes(' '), True, 1.0)],
dtype=dt)
y = memoryview(x)
assert_equal(y.shape, (1,))
assert_equal(y.ndim, 1)
assert_equal(y.suboffsets, EMPTY)
sz = sum([np.dtype(b).itemsize for a, b in dt])
if np.dtype('l').itemsize == 4:
assert_equal(y.format, 'T{b:a:=h:b:i:c:l:d:q:dx:B:e:@H:f:=I:g:L:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
else:
assert_equal(y.format, 'T{b:a:=h:b:i:c:q:d:q:dx:B:e:@H:f:=I:g:Q:h:Q:hx:f:i:d:j:^g:k:=Zf:ix:Zd:jx:^Zg:kx:4s:l:=4w:m:3x:n:?:o:@e:p:}')
# Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides
if not (np.ones(1).strides[0] == np.iinfo(np.intp).max):
assert_equal(y.strides, (sz,))
assert_equal(y.itemsize, sz)
def test_export_subarray(self):
x = np.array(([[1, 2], [3, 4]],), dtype=[('a', ('i', (2, 2)))])
y = memoryview(x)
assert_equal(y.format, 'T{(2,2)i:a:}')
assert_equal(y.shape, EMPTY)
assert_equal(y.ndim, 0)
assert_equal(y.strides, EMPTY)
assert_equal(y.suboffsets, EMPTY)
assert_equal(y.itemsize, 16)
def test_export_endian(self):
x = np.array([1, 2, 3], dtype='>i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, '>i')
else:
assert_equal(y.format, 'i')
x = np.array([1, 2, 3], dtype='<i')
y = memoryview(x)
if sys.byteorder == 'little':
assert_equal(y.format, 'i')
else:
assert_equal(y.format, '<i')
def test_export_flags(self):
# Check SIMPLE flag, see also gh-3613 (exception should be BufferError)
assert_raises(ValueError, get_buffer_info, np.arange(5)[::2], ('SIMPLE',))
def test_padding(self):
for j in range(8):
x = np.array([(1,), (2,)], dtype={'f0': (int, j)})
self._check_roundtrip(x)
def test_reference_leak(self):
count_1 = sys.getrefcount(np.core._internal)
a = np.zeros(4)
b = memoryview(a)
c = np.asarray(b)
count_2 = sys.getrefcount(np.core._internal)
assert_equal(count_1, count_2)
del c # avoid pyflakes unused variable warning.
def test_padded_struct_array(self):
dt1 = np.dtype(
[('a', 'b'), ('b', 'i'), ('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
x1 = np.arange(dt1.itemsize, dtype=np.int8).view(dt1)
self._check_roundtrip(x1)
dt2 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)), ('d', 'i')],
align=True)
x2 = np.arange(dt2.itemsize, dtype=np.int8).view(dt2)
self._check_roundtrip(x2)
dt3 = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
def test_relaxed_strides(self):
# Test that relaxed strides are converted to non-relaxed
c = np.ones((1, 10, 10), dtype='i8')
# Check for NPY_RELAXED_STRIDES_CHECKING:
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
assert memoryview(c).strides == (800, 80, 8)
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
assert memoryview(fortran).strides == (8, 80, 800)
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
assert_(strides[0] == 8)
arr = np.ones((10, 1), order='F')
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
class TestArrayAttributeDeletion(object):
def test_multiarray_writable_attributes_deletion(self):
"""ticket #2046, should not seqfault, raise AttributeError"""
a = np.ones(2)
attr = ['shape', 'strides', 'data', 'dtype', 'real', 'imag', 'flat']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_not_writable_attributes_deletion(self):
a = np.ones(2)
attr = ["ndim", "flags", "itemsize", "size", "nbytes", "base",
"ctypes", "T", "__array_interface__", "__array_struct__",
"__array_priority__", "__array_finalize__"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ['updateifcopy', 'aligned', 'writeable']
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_multiarray_flags_not_writable_attribute_deletion(self):
a = np.ones(2).flags
attr = ["contiguous", "c_contiguous", "f_contiguous", "fortran",
"owndata", "fnc", "forc", "behaved", "carray", "farray",
"num"]
for s in attr:
assert_raises(AttributeError, delattr, a, s)
def test_array_interface():
# Test scalar coercion within the array interface
class Foo(object):
def __init__(self, value):
self.value = value
self.iface = {'typestr': '=f8'}
def __float__(self):
return float(self.value)
@property
def __array_interface__(self):
return self.iface
f = Foo(0.5)
assert_equal(np.array(f), 0.5)
assert_equal(np.array([f]), [0.5])
assert_equal(np.array([f, f]), [0.5, 0.5])
assert_equal(np.array(f).dtype, np.dtype('=f8'))
# Test various shape definitions
f.iface['shape'] = ()
assert_equal(np.array(f), 0.5)
f.iface['shape'] = None
assert_raises(TypeError, np.array, f)
f.iface['shape'] = (1, 1)
assert_equal(np.array(f), [[0.5]])
f.iface['shape'] = (2,)
assert_raises(ValueError, np.array, f)
# test scalar with no shape
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
def test_flat_element_deletion():
it = np.ones(3).flat
try:
del it[1]
del it[1:2]
except TypeError:
pass
except:
raise AssertionError
def test_scalar_element_deletion():
a = np.zeros(2, dtype=[('x', 'int'), ('y', 'int')])
assert_raises(ValueError, a[0].__delitem__, 'x')
class TestMemEventHook(TestCase):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
test_pydatamem_seteventhook_start()
# force an allocation and free of a numpy array
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
test_pydatamem_seteventhook_end()
class TestMapIter(TestCase):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
a = np.arange(12).reshape((3, 4)).astype(float)
index = ([1, 1, 2, 0],
[0, 0, 2, 3])
vals = [50, 50, 30, 16]
test_inplace_increment(a, index, vals)
assert_equal(a, [[0.00, 1., 2.0, 19.],
[104., 5., 6.0, 7.0],
[8.00, 9., 40., 11.]])
b = np.arange(6).astype(float)
index = (np.array([1, 2, 0]),)
vals = [50, 4, 100.1]
test_inplace_increment(b, index, vals)
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
class TestAsCArray(TestCase):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
assert_equal(array[3], from_c)
def test_2darray(self):
array = np.arange(24, dtype=np.double).reshape(3, 8)
from_c = test_as_c_array(array, 2, 4)
assert_equal(array[2, 4], from_c)
def test_3darray(self):
array = np.arange(24, dtype=np.double).reshape(2, 3, 4)
from_c = test_as_c_array(array, 1, 2, 3)
assert_equal(array[1, 2, 3], from_c)
class TestConversion(TestCase):
def test_array_scalar_relational_operation(self):
#All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1), "type %s failed" % (dt1,))
#unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
#Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1), "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
"type %s and %s failed" % (dt1, dt2))
class TestWhere(TestCase):
def test_basic(self):
dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
c = np.ones(53, dtype=np.bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
assert_equal(np.where(False, dt(0), dt(1)), dt(1))
d = np.ones_like(c).astype(dt)
e = np.zeros_like(d)
r = d.astype(dt)
c[7] = False
r[7] = e[7]
assert_equal(np.where(c, e, e), e)
assert_equal(np.where(c, d, e), r)
assert_equal(np.where(c, d, e[0]), r)
assert_equal(np.where(c, d[0], e), r)
assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2])
assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2])
assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3])
assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3])
assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2])
assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3])
assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_exotic(self):
# object
assert_array_equal(np.where(True, None, None), np.array(None))
# zero sized
m = np.array([], dtype=bool).reshape(0, 3)
b = np.array([], dtype=np.float64).reshape(0, 3)
assert_array_equal(np.where(m, 0, b), np.array([]).reshape(0, 3))
# object cast
d = np.array([-1.34, -0.16, -0.54, -0.31, -0.08, -0.95, 0.000, 0.313,
0.547, -0.18, 0.876, 0.236, 1.969, 0.310, 0.699, 1.013,
1.267, 0.229, -1.39, 0.487])
nan = float('NaN')
e = np.array(['5z', '0l', nan, 'Wz', nan, nan, 'Xq', 'cs', nan, nan,
'QN', nan, nan, 'Fd', nan, nan, 'kp', nan, '36', 'i1'],
dtype=object)
m = np.array([0,0,1,0,1,1,0,0,1,1,0,1,1,0,1,1,0,1,0,0], dtype=bool)
r = e[:]
r[np.where(m)] = d[np.where(m)]
assert_array_equal(np.where(m, d, e), r)
r = e[:]
r[np.where(~m)] = d[np.where(~m)]
assert_array_equal(np.where(m, e, d), r)
assert_array_equal(np.where(m, e, e), e)
# minimal dtype result with NaN scalar (e.g required by pandas)
d = np.array([1., 2.], dtype=np.float32)
e = float('NaN')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
e = float('-Infinity')
assert_equal(np.where(True, d, e).dtype, np.float32)
# also check upcast
e = float(1e150)
assert_equal(np.where(True, d, e).dtype, np.float64)
def test_ndim(self):
c = [True, False]
a = np.zeros((2, 25))
b = np.ones((2, 25))
r = np.where(np.array(c)[:,np.newaxis], a, b)
assert_array_equal(r[0], a[0])
assert_array_equal(r[1], b[0])
a = a.T
b = b.T
r = np.where(c, a, b)
assert_array_equal(r[:,0], a[:,0])
assert_array_equal(r[:,1], b[:,0])
def test_dtype_mix(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
a = np.uint32(1)
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
a = a.astype(np.float32)
b = b.astype(np.int64)
assert_equal(np.where(c, a, b), r)
# non bool mask
c = c.astype(np.int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
tmpmask = c != 0
c[c == 0] = 41247212
c[tmpmask] = 0
assert_equal(np.where(c, b, a), r)
def test_foreign(self):
c = np.array([False, True, False, False, False, False, True, False,
False, False, True, False])
r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.],
dtype=np.float64)
a = np.ones(1, dtype='>i4')
b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.],
dtype=np.float64)
assert_equal(np.where(c, a, b), r)
b = b.astype('>f8')
assert_equal(np.where(c, a, b), r)
a = a.astype('<i4')
assert_equal(np.where(c, a, b), r)
c = c.astype('>i4')
assert_equal(np.where(c, a, b), r)
def test_error(self):
c = [True, True]
a = np.ones((4, 5))
b = np.ones((5, 5))
assert_raises(ValueError, np.where, c, a, a)
assert_raises(ValueError, np.where, c[0], a, b)
def test_string(self):
# gh-4778 check strings are properly filled with nulls
a = np.array("abc")
b = np.array("x" * 753)
assert_equal(np.where(True, a, b), "abc")
assert_equal(np.where(False, b, a), "abc")
# check native datatype sized strings
a = np.array("abcd")
b = np.array("x" * 8)
assert_equal(np.where(True, a, b), "abcd")
assert_equal(np.where(False, b, a), "abcd")
class TestSizeOf(TestCase):
def test_empty_array(self):
x = np.array([])
assert_(sys.getsizeof(x) > 0)
def check_array(self, dtype):
elem_size = dtype(0).itemsize
for length in [10, 50, 100, 500]:
x = np.arange(length, dtype=dtype)
assert_(sys.getsizeof(x) > length * elem_size)
def test_array_int32(self):
self.check_array(np.int32)
def test_array_int64(self):
self.check_array(np.int64)
def test_array_float32(self):
self.check_array(np.float32)
def test_array_float64(self):
self.check_array(np.float64)
def test_view(self):
d = np.ones(100)
assert_(sys.getsizeof(d[...]) < sys.getsizeof(d))
def test_reshape(self):
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
d.resize(50)
assert_(old > sys.getsizeof(d))
d.resize(150)
assert_(old < sys.getsizeof(d))
def test_error(self):
d = np.ones(100)
assert_raises(TypeError, d.__sizeof__, "a")
class TestHashing(TestCase):
def test_arrays_not_hashable(self):
x = np.ones(3)
assert_raises(TypeError, hash, x)
def test_collections_hashable(self):
x = np.array([])
self.assertFalse(isinstance(x, collections.Hashable))
from numpy.core._internal import _view_is_safe
class TestObjViewSafetyFuncs(TestCase):
def test_view_safety(self):
psize = np.dtype('p').itemsize
# creates dtype but with extra character code - for missing 'p' fields
def mtype(s):
n, offset, fields = 0, 0, []
for c in s.split(','): # subarrays won't work
if c != '-':
fields.append(('f{0}'.format(n), c, offset))
n += 1
offset += np.dtype(c).itemsize if c != '-' else psize
names, formats, offsets = zip(*fields)
return np.dtype({'names': names, 'formats': formats,
'offsets': offsets, 'itemsize': offset})
# test nonequal itemsizes with objects:
# these should succeed:
_view_is_safe(np.dtype('O,p,O,p'), np.dtype('O,p,O,p,O,p'))
_view_is_safe(np.dtype('O,O'), np.dtype('O,O,O'))
# these should fail:
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,O'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('O,p'))
assert_raises(TypeError, _view_is_safe, np.dtype('O,O,p'), np.dtype('p,O'))
# test nonequal itemsizes with missing fields:
# these should succeed:
_view_is_safe(mtype('-,p,-,p'), mtype('-,p,-,p,-,p'))
_view_is_safe(np.dtype('p,p'), np.dtype('p,p,p'))
# these should fail:
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,p'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('p,-'))
assert_raises(TypeError, _view_is_safe, mtype('p,p,-'), mtype('-,p'))
# scans through positions at which we can view a type
def scanView(d1, otype):
goodpos = []
for shift in range(d1.itemsize - np.dtype(otype).itemsize+1):
d2 = np.dtype({'names': ['f0'], 'formats': [otype],
'offsets': [shift], 'itemsize': d1.itemsize})
try:
_view_is_safe(d1, d2)
except TypeError:
pass
else:
goodpos.append(shift)
return goodpos
# test partial overlap with object field
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
assert_equal(scanView(np.dtype('p,O,p,p,O,O'), 'O'),
[psize, 4*psize, 5*psize])
# test partial overlap with missing field
assert_equal(scanView(mtype('p,-,p,p,-,-'), 'p'),
[0] + list(range(2*psize, 3*psize+1)))
# test nested structures with objects:
nestedO = np.dtype([('f0', 'p'), ('f1', 'p,O,p')])
assert_equal(scanView(nestedO, 'p'), list(range(psize+1)) + [3*psize])
assert_equal(scanView(nestedO, 'O'), [2*psize])
# test nested structures with missing fields:
nestedM = np.dtype([('f0', 'p'), ('f1', mtype('p,-,p'))])
assert_equal(scanView(nestedM, 'p'), list(range(psize+1)) + [3*psize])
# test subarrays with objects
subarrayO = np.dtype('p,(2,3)O,p')
assert_equal(scanView(subarrayO, 'p'), [0, 7*psize])
assert_equal(scanView(subarrayO, 'O'),
list(range(psize, 6*psize+1, psize)))
#test dtype with overlapping fields
overlapped = np.dtype({'names': ['f0', 'f1', 'f2', 'f3'],
'formats': ['p', 'p', 'p', 'p'],
'offsets': [0, 1, 3*psize-1, 3*psize],
'itemsize': 4*psize})
assert_equal(scanView(overlapped, 'p'), [0, 1, 3*psize-1, 3*psize])
class TestArrayPriority(TestCase):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
binary_ops = [
op.pow, op.add, op.sub, op.mul, op.floordiv, op.truediv, op.mod,
op.and_, op.or_, op.xor, op.lshift, op.rshift, op.mod, op.gt,
op.ge, op.lt, op.le, op.ne, op.eq
]
if sys.version_info[0] < 3:
binary_ops.append(op.div)
class Foo(np.ndarray):
__array_priority__ = 100.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Bar(np.ndarray):
__array_priority__ = 101.
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
class Other(object):
__array_priority__ = 1000.
def _all(self, other):
return self.__class__()
__add__ = __radd__ = _all
__sub__ = __rsub__ = _all
__mul__ = __rmul__ = _all
__pow__ = __rpow__ = _all
__div__ = __rdiv__ = _all
__mod__ = __rmod__ = _all
__truediv__ = __rtruediv__ = _all
__floordiv__ = __rfloordiv__ = _all
__and__ = __rand__ = _all
__xor__ = __rxor__ = _all
__or__ = __ror__ = _all
__lshift__ = __rlshift__ = _all
__rshift__ = __rrshift__ = _all
__eq__ = _all
__ne__ = _all
__gt__ = _all
__ge__ = _all
__lt__ = _all
__le__ = _all
def test_ndarray_subclass(self):
a = np.array([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_ndarray_other(self):
a = np.array([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
def test_subclass_subclass(self):
a = self.Foo([1, 2])
b = self.Bar([1, 2])
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Bar), msg)
assert_(isinstance(f(b, a), self.Bar), msg)
def test_subclass_other(self):
a = self.Foo([1, 2])
b = self.Other()
for f in self.binary_ops:
msg = repr(f)
assert_(isinstance(f(a, b), self.Other), msg)
assert_(isinstance(f(b, a), self.Other), msg)
class TestBytestringArrayNonzero(TestCase):
def test_empty_bstring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.str))
def test_whitespace_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_bstring_array_is_falsey(self):
a = np.array(['spam'], dtype=np.str)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_bstring_array_is_truthy(self):
a = np.array(['spam'], dtype=np.str)
a[0] = ' \0 \0'
self.assertTrue(a)
class TestUnicodeArrayNonzero(TestCase):
def test_empty_ustring_array_is_falsey(self):
self.assertFalse(np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
self.assertFalse(a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
self.assertFalse(a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
self.assertTrue(a)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
ndingwall/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 15 | 10815 | import numpy as np
import scipy.sparse as sp
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_warns
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import Lasso, ElasticNet, LassoCV, ElasticNetCV
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert sp.isspmatrix(clf.sparse_coef_)
assert clf.sparse_coef_.toarray().tolist()[0] == clf.coef_
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(normalize=True)
clf_sparse = ElasticNet(normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert np.sum(s_clf.coef_ != 0.0) < 2 * n_informative
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert s_clf.score(X_test, y_test) > 0.85
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert d_clf.score(X_test, y_test) > 0.85
# check that the coefs are sparse
assert np.sum(s_clf.coef_ != 0.0) == n_informative
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert n_alphas == clf.n_alphas
assert n_alphas == len(clf.alphas_)
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
def test_sparse_enet_coordinate_descent():
"""Test that a warning is issued if model does not converge"""
clf = Lasso(max_iter=2)
n_samples = 5
n_features = 2
X = sp.csc_matrix((n_samples, n_features)) * 1e50
y = np.ones(n_samples)
assert_warns(ConvergenceWarning, clf.fit, X, y)
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/cross_validation.py | 5 | 57208 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
check_array, column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
__all__ = ['KFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = cross_validation.KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold: take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
Pseudo-random number generator state used for random
sampling. If None, use default numpy RNG for shuffling
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = cross_validation.StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
This generator must include all elements in the test set exactly once.
Otherwise, a ValueError is raised.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
p = np.concatenate([p for p, _ in preds_blocks])
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, X.shape[0]):
raise ValueError('cross_val_predict only works for partitions')
preds = p.copy()
preds[locs] = p
return preds
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : cross-validation generator or int, optional, default: None
A cross-validation generator to use. If int, determines
the number of folds in StratifiedKFold if y is binary
or multiclass and estimator is a classifier, or the number
of folds in KFold otherwise. If None, it is equivalent to cv=3.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
class FitFailedWarning(RuntimeWarning):
pass
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, a cv generator instance, or None
The input specifying which cv generator to use. It can be an
integer, in which case it is the number of folds in a KFold,
None, in which case 3 fold is used, or another object, that
will then be used as a cv generator.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
return _check_cv(cv, X=X, y=y, classifier=classifier)
def _check_cv(cv, X=None, y=None, classifier=False):
# This exists for internal use while indices is being deprecated.
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of fold (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = _check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
splitting : list of arrays, length=2 * len(arrays)
List containing train-test split of input array.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> a, b = np.arange(10).reshape((5, 2)), range(5)
>>> a
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(b)
[0, 1, 2, 3, 4]
>>> a_train, a_test, b_train, b_test = train_test_split(
... a, b, test_size=0.33, random_state=42)
...
>>> a_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> b_train
[2, 0, 3]
>>> a_test
array([[2, 3],
[8, 9]])
>>> b_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
dtype = options.pop('dtype', None)
if dtype is not None:
warnings.warn("dtype option is ignored and will be removed in 0.18.",
DeprecationWarning)
allow_nd = options.pop('allow_nd', None)
allow_lists = options.pop('allow_lists', None)
if allow_lists is not None:
warnings.warn("The allow_lists option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if allow_nd is not None:
warnings.warn("The allow_nd option is deprecated and will be "
"assumed True in 0.18 and removed.", DeprecationWarning)
if allow_lists is False or allow_nd is False:
arrays = [check_array(x, 'csr', allow_nd=allow_nd,
force_all_finite=False, ensure_2d=False)
if x is not None else x
for x in arrays]
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| bsd-3-clause |
danlewis85/pycno | pycno/pycno.py | 1 | 8270 | from __future__ import division, print_function
# https://github.com/danlewis85/pycno/blob/master/LICENSE
def pycno(gdf,value_field,cellsize,r=0.2,handle_null = True,converge=3,verbose=True):
"""Returns a smooth pycnophylactic interpolation raster for a given geodataframe
Args:
gdf (geopandas.geodataframe.GeoDataFrame): Input GeoDataFrame.
value_field (str): Field name of values to be used to produce pycnophylactic surface
cellsize (int): Pixel size of raster in planar units (i.e. metres, feet)
r (float, optional): Relaxation parameter, default of 0.2 is generally fine.
handle_null (boolean, optional): Changes how nodata values are smoothed. Default True.
converge (int, optional): Index for stopping value, default 3 is generally fine.
verbose (boolean, optional): Print out progress at each iteration.
Returns:
Numpy Array: Smooth pycnophylactic interpolation.
Rasterio geotransform
GeoPandas crs
"""
import rasterio
from rasterio.features import rasterize
from numpy import copy, absolute, round, unique, asarray, nanmax, pad, nan, power, convolve, nanmean, nansum, apply_along_axis
from numpy.ma import masked_invalid, masked_where
from pandas import DataFrame
from astropy.convolution import convolve as astro_convolve
# set nodata value
nodata = -9999
# work out raster rows and columns based on gdf extent and cellsize
xmin, ymin, xmax, ymax = gdf.total_bounds
xres = int((xmax - xmin) / cellsize)
yres = int((ymax - ymin) / cellsize)
# Work out transform so that we rasterize the area where the data are!
trans = rasterio.Affine.from_gdal(xmin,cellsize,0,ymax,0,-cellsize)
# First make a zone array
# NB using index values as ids can often be too large/alphanumeric. Limit is int32 polygon features.
# create a generator of geom, index pairs to use in rasterizing
shapes = ((geom,value) for geom, value in zip(gdf.geometry,gdf.index))
# burn the features into a raster array
feature_array = rasterize(shapes=shapes, fill=nodata, out_shape=(yres,xres),transform=trans)
# Get cell counts per index value (feature)
unique, count = unique(feature_array, return_counts=True)
cellcounts = asarray((unique, count)).T
# Lose the nodata counts
cellcounts = cellcounts[cellcounts[:,0]!=nodata,:]
# Adjust value totals by cells
# Make cell counts dataframe
celldf = DataFrame(cellcounts[:,1],index=cellcounts[:,0],columns=['cellcount'])
# Merge cell counts
gdf = gdf.merge(celldf,how='left',left_index=True, right_index=True)
# Calculate cell values
gdf['cellvalues'] = gdf[value_field]/gdf['cellcount']
# create a generator of geom, cellvalue pairs to use in rasterizing
shapes = ((geom,value) for geom, value in zip(gdf.geometry,gdf.cellvalues))
# Now burn the initial value raster
value_array = rasterize(shapes=shapes, fill=nodata, out_shape=(yres,xres),transform=trans)
# Set nodata in value array to np.nan
value_array[value_array == -9999] = nan
# Set stopper value based on converge parameter
stopper = nanmax(value_array) * power(10.0,-converge)
# The basic numpy convolve function doesn't handle nulls.
def smooth2D(data):
# Create function that calls a 1 dimensionsal smoother.
s1d = lambda s: convolve(s,[0.5,0.0,0.5],mode='same')
# pad the data array with the mean value
padarray = pad(data,1,'constant',constant_values=nanmean(data))
# make nodata mask
mask = masked_invalid(padarray).mask
# set nodata as zero to avoid eroding the raster
padarray[mask] = 0.0
# Apply the convolution along each axis of the data and average
padarray = (apply_along_axis(s1d,1,padarray) + apply_along_axis(s1d,0,padarray))/2
# Reinstate nodata
padarray[mask] = nan
return padarray[1:-1,1:-1]
# The convolution function from astropy handles nulls.
def astroSmooth2d(data):
s1d = lambda s: astro_convolve(s,[0.5,0,0.5])
# pad the data array with the mean value
padarray = pad(data,1,'constant',constant_values=nanmean(data))
# Apply the convolution along each axis of the data and average
padarray = (apply_along_axis(s1d,1,padarray) + apply_along_axis(s1d,0,padarray))/2
return padarray[1:-1,1:-1]
def correct2Da(data):
for idx, val in gdf[value_field].iteritems():
# Create zone mask from feature_array
mask = masked_where(feature_array == idx,feature_array).mask
# Work out the correction factor
correct = (val - nansum(data[mask]))/mask.sum()
# Apply correction
data[mask] += correct
return data
def correct2Dm(data):
for idx, val in gdf[value_field].iteritems():
# Create zone mask from feature_array
mask = masked_where(feature_array == idx,feature_array).mask
# Work out the correction factor
correct = val / nansum(data[mask])
if correct != 0.0:
# Apply correction
data[mask] *= correct
return data
while True:
# Store the current iteration
old = copy(value_array)
# Smooth the value_array
if handle_null:
sm = astroSmooth2d(value_array)
else:
sm = smooth2d(value_array)
# Relaxation to prevent overcompensation in the smoothing step
value_array = value_array*r + (1.0-r)*sm
# Perform correction
value_array = correct2Da(value_array)
# Reset any negative values to zero.
value_array[value_array<0] = 0.0
# Perform correction
value_array = correct2Dm(value_array)
if verbose:
print("Maximum Change: " + str(round(nanmax(absolute(old - value_array)),4)) + " - will stop at " + str(round(stopper,4)))
if nanmax(absolute(old - value_array)) < stopper:
break
return (value_array, trans, gdf.crs)
def save_pycno(pycno_array,transform,crs,filestring,driver='GTiff'):
"""Saves a numpy array as a raster, largely a helper function for pycno
Args:
pycno_array (numpy array): 2D numpy array of pycnophylactic surface
transform (rasterio geotransform): Relevant transform from pycno()
crs (GeoPandas crs): Coordinate reference system of GeoDataFrame used in pycno()
filestring (str): File path to save raster
driver (str, optional): Format for output raster, default: geoTiff.
Returns:
None
"""
import rasterio
# Save raster
new_dataset = rasterio.open(filestring, 'w', driver=driver,height=pycno_array.shape[0], width=pycno_array.shape[1], count=1, dtype='float64',crs=crs, transform=transform)
new_dataset.write(pycno_array.astype('float64'),1)
new_dataset.close()
return None
def extract_values(pycno_array,gdf,transform,fieldname = 'Estimate'):
"""Extract raster value sums according to a provided polygon geodataframe
Args:
pycno_array (numpy array): 2D numpy array of pycnophylactic surface.
gdf (geopandas.geodataframe.GeoDataFrame): Target GeoDataFrame.
transform (rasterio geotransform): Relevant transform from pycno()
fieldname (str, optional): New gdf field to save estimates in. Default name: 'Estimate'.
Returns:
geopandas.geodataframe.GeoDataFrame: Target GeoDataFrame with appended estimates.
"""
from numpy import nansum
from rasterio.features import geometry_mask
out = gdf.copy()
estimates = []
# Iterate through geodataframe and extract values
for idx, geom in gdf['geometry'].iteritems():
mask = geometry_mask([geom],pycno_array.shape,transform=transform,invert=True)
estimates.append(nansum(pycno_array[mask]))
out[fieldname] = estimates
return out
| gpl-3.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/ensemble/tests/test_voting_classifier.py | 5 | 17078 | """Testing for the VotingClassifier"""
import numpy as np
from sklearn.utils.testing import assert_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_warns_message
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn import datasets
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_multilabel_classification
from sklearn.svm import SVC
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.base import BaseEstimator, ClassifierMixin
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
X, y = iris.data[:, 1:3], iris.target
def test_estimator_init():
eclf = VotingClassifier(estimators=[])
msg = ('Invalid `estimators` attribute, `estimators` should be'
' a list of (string, estimator) tuples')
assert_raise_message(AttributeError, msg, eclf.fit, X, y)
clf = LogisticRegression(random_state=1)
eclf = VotingClassifier(estimators=[('lr', clf)], voting='error')
msg = ('Voting must be \'soft\' or \'hard\'; got (voting=\'error\')')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf)], weights=[1, 2])
msg = ('Number of classifiers and weights must be equal'
'; got 2 weights, 1 estimators')
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr', clf), ('lr', clf)],
weights=[1, 2])
msg = "Names provided are not unique: ['lr', 'lr']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('lr__', clf)])
msg = "Estimator names must not contain __: got ['lr__']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
eclf = VotingClassifier(estimators=[('estimators', clf)])
msg = "Estimator names conflict with constructor arguments: ['estimators']"
assert_raise_message(ValueError, msg, eclf.fit, X, y)
def test_predictproba_hardvoting():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='hard')
msg = "predict_proba is not available when voting='hard'"
assert_raise_message(AttributeError, msg, eclf.predict_proba, X)
def test_notfitted():
eclf = VotingClassifier(estimators=[('lr1', LogisticRegression()),
('lr2', LogisticRegression())],
voting='soft')
msg = ("This VotingClassifier instance is not fitted yet. Call \'fit\'"
" with appropriate arguments before using this method.")
assert_raise_message(NotFittedError, msg, eclf.predict_proba, X)
def test_majority_label_iris():
"""Check classification by majority label on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.95, decimal=2)
def test_tie_situation():
"""Check voting classifier selects smaller class label in tie situation."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2)],
voting='hard')
assert_equal(clf1.fit(X, y).predict(X)[73], 2)
assert_equal(clf2.fit(X, y).predict(X)[73], 1)
assert_equal(eclf.fit(X, y).predict(X)[73], 1)
def test_weights_iris():
"""Check classification by average probabilities on dataset iris."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 2, 10])
scores = cross_val_score(eclf, X, y, cv=5, scoring='accuracy')
assert_almost_equal(scores.mean(), 0.93, decimal=2)
def test_predict_on_toy_problem():
"""Manually check predicted class labels for toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5],
[-1.2, -1.4],
[-3.4, -2.2],
[1.1, 1.2],
[2.1, 1.4],
[3.1, 2.3]])
y = np.array([1, 1, 1, 2, 2, 2])
assert_equal(all(clf1.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf2.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
assert_equal(all(clf3.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 1])
assert_equal(all(eclf.fit(X, y).predict(X)), all([1, 1, 1, 2, 2, 2]))
def test_predict_proba_on_toy_problem():
"""Calculate predicted probabilities on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
clf1_res = np.array([[0.59790391, 0.40209609],
[0.57622162, 0.42377838],
[0.50728456, 0.49271544],
[0.40241774, 0.59758226]])
clf2_res = np.array([[0.8, 0.2],
[0.8, 0.2],
[0.2, 0.8],
[0.3, 0.7]])
clf3_res = np.array([[0.9985082, 0.0014918],
[0.99845843, 0.00154157],
[0., 1.],
[0., 1.]])
t00 = (2*clf1_res[0][0] + clf2_res[0][0] + clf3_res[0][0]) / 4
t11 = (2*clf1_res[1][1] + clf2_res[1][1] + clf3_res[1][1]) / 4
t21 = (2*clf1_res[2][1] + clf2_res[2][1] + clf3_res[2][1]) / 4
t31 = (2*clf1_res[3][1] + clf2_res[3][1] + clf3_res[3][1]) / 4
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[2, 1, 1])
eclf_res = eclf.fit(X, y).predict_proba(X)
assert_almost_equal(t00, eclf_res[0][0], decimal=1)
assert_almost_equal(t11, eclf_res[1][1], decimal=1)
assert_almost_equal(t21, eclf_res[2][1], decimal=1)
assert_almost_equal(t31, eclf_res[3][1], decimal=1)
try:
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='hard')
eclf.fit(X, y).predict_proba(X)
except AttributeError:
pass
else:
raise AssertionError('AttributeError for voting == "hard"'
' and with predict_proba not raised')
def test_multilabel():
"""Check if error is raised for multilabel classification."""
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=123)
clf = OneVsRestClassifier(SVC(kernel='linear'))
eclf = VotingClassifier(estimators=[('ovr', clf)], voting='hard')
try:
eclf.fit(X, y)
except NotImplementedError:
return
def test_gridsearch():
"""Check GridSearch support."""
clf1 = LogisticRegression(random_state=1)
clf2 = RandomForestClassifier(random_state=1)
clf3 = GaussianNB()
eclf = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft')
params = {'lr__C': [1.0, 100.0],
'voting': ['soft', 'hard'],
'weights': [[0.5, 0.5, 0.5], [1.0, 0.5, 0.5]]}
grid = GridSearchCV(estimator=eclf, param_grid=params, cv=5)
grid.fit(iris.data, iris.target)
def test_parallel_fit():
"""Check parallel backend of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=1).fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
n_jobs=2).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_sample_weight():
"""Tests sample_weight parameter of VotingClassifier"""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = SVC(probability=True, random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y, sample_weight=np.ones((len(y),)))
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('svc', clf3)],
voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
sample_weight = np.random.RandomState(123).uniform(size=(len(y),))
eclf3 = VotingClassifier(estimators=[('lr', clf1)], voting='soft')
eclf3.fit(X, y, sample_weight)
clf1.fit(X, y, sample_weight)
assert_array_equal(eclf3.predict(X), clf1.predict(X))
assert_array_equal(eclf3.predict_proba(X), clf1.predict_proba(X))
clf4 = KNeighborsClassifier()
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('svc', clf3), ('knn', clf4)],
voting='soft')
msg = ('Underlying estimator \'knn\' does not support sample weights.')
assert_raise_message(ValueError, msg, eclf3.fit, X, y, sample_weight)
def test_sample_weight_kwargs():
"""Check that VotingClassifier passes sample_weight as kwargs"""
class MockClassifier(BaseEstimator, ClassifierMixin):
"""Mock Classifier to check that sample_weight is received as kwargs"""
def fit(self, X, y, *args, **sample_weight):
assert_true('sample_weight' in sample_weight)
clf = MockClassifier()
eclf = VotingClassifier(estimators=[('mock', clf)], voting='soft')
# Should not raise an error.
eclf.fit(X, y, sample_weight=np.ones((len(y),)))
def test_set_params():
"""set_params should be able to set estimators"""
clf1 = LogisticRegression(random_state=123, C=1.0)
clf2 = RandomForestClassifier(random_state=123, max_depth=None)
clf3 = GaussianNB()
eclf1 = VotingClassifier([('lr', clf1), ('rf', clf2)], voting='soft',
weights=[1, 2])
eclf1.fit(X, y)
eclf2 = VotingClassifier([('lr', clf1), ('nb', clf3)], voting='soft',
weights=[1, 2])
eclf2.set_params(nb=clf2).fit(X, y)
assert_false(hasattr(eclf2, 'nb'))
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
assert_equal(eclf2.estimators[0][1].get_params(), clf1.get_params())
assert_equal(eclf2.estimators[1][1].get_params(), clf2.get_params())
eclf1.set_params(lr__C=10.0)
eclf2.set_params(nb__max_depth=5)
assert_true(eclf1.estimators[0][1].get_params()['C'] == 10.0)
assert_true(eclf2.estimators[1][1].get_params()['max_depth'] == 5)
assert_equal(eclf1.get_params()["lr__C"],
eclf1.get_params()["lr"].get_params()['C'])
def test_set_estimator_none():
"""VotingClassifier set_params should be able to set estimators as None"""
# Test predict
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
eclf1 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2),
('nb', clf3)],
voting='hard', weights=[1, 0, 0.5]).fit(X, y)
eclf2 = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2),
('nb', clf3)],
voting='hard', weights=[1, 1, 0.5])
eclf2.set_params(rf=None).fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_true(dict(eclf2.estimators)["rf"] is None)
assert_true(len(eclf2.estimators_) == 2)
assert_true(all([not isinstance(est, RandomForestClassifier) for est in
eclf2.estimators_]))
assert_true(eclf2.get_params()["rf"] is None)
eclf1.set_params(voting='soft').fit(X, y)
eclf2.set_params(voting='soft').fit(X, y)
assert_array_equal(eclf1.predict(X), eclf2.predict(X))
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
msg = ('All estimators are None. At least one is required'
' to be a classifier!')
assert_raise_message(
ValueError, msg, eclf2.set_params(lr=None, rf=None, nb=None).fit, X, y)
# Test soft voting transform
X1 = np.array([[1], [2]])
y1 = np.array([1, 2])
eclf1 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)],
voting='soft', weights=[0, 0.5]).fit(X1, y1)
eclf2 = VotingClassifier(estimators=[('rf', clf2), ('nb', clf3)],
voting='soft', weights=[1, 0.5])
eclf2.set_params(rf=None).fit(X1, y1)
assert_array_equal(eclf1.transform(X1), np.array([[[0.7, 0.3], [0.3, 0.7]],
[[1., 0.], [0., 1.]]]))
assert_array_equal(eclf2.transform(X1), np.array([[[1., 0.], [0., 1.]]]))
eclf1.set_params(voting='hard')
eclf2.set_params(voting='hard')
assert_array_equal(eclf1.transform(X1), np.array([[0, 0], [1, 1]]))
assert_array_equal(eclf2.transform(X1), np.array([[0], [1]]))
def test_estimator_weights_format():
# Test estimator weights inputs as list and array
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2)],
weights=[1, 2],
voting='soft')
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2)],
weights=np.array((1, 2)),
voting='soft')
eclf1.fit(X, y)
eclf2.fit(X, y)
assert_array_equal(eclf1.predict_proba(X), eclf2.predict_proba(X))
def test_transform():
"""Check transform method of VotingClassifier on toy dataset."""
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf1 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft').fit(X, y)
eclf2 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
flatten_transform=True).fit(X, y)
eclf3 = VotingClassifier(estimators=[
('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
flatten_transform=False).fit(X, y)
warn_msg = ("'flatten_transform' default value will be "
"changed to True in 0.21."
"To silence this warning you may"
" explicitly set flatten_transform=False.")
res = assert_warns_message(DeprecationWarning, warn_msg,
eclf1.transform, X)
assert_array_equal(res.shape, (3, 4, 2))
assert_array_equal(eclf2.transform(X).shape, (4, 6))
assert_array_equal(eclf3.transform(X).shape, (3, 4, 2))
assert_array_equal(res.swapaxes(0, 1).reshape((4, 6)),
eclf2.transform(X))
assert_array_equal(eclf3.transform(X).swapaxes(0, 1).reshape((4, 6)),
eclf2.transform(X))
| mit |
peastman/msmbuilder | msmbuilder/project_templates/tica/tica-sample-coordinate-plot.py | 9 | 1174 | """Plot the result of sampling a tICA coordinate
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from msmbuilder.io import load_trajs, load_generic
sns.set_style('ticks')
colors = sns.color_palette()
## Load
meta, ttrajs = load_trajs('ttrajs')
txx = np.concatenate(list(ttrajs.values()))
inds = load_generic("tica-dimension-0-inds.pickl")
straj = []
for traj_i, frame_i in inds:
straj += [ttrajs[traj_i][frame_i, :]]
straj = np.asarray(straj)
## Overlay sampled trajectory on histogram
def plot_sampled_traj(ax):
ax.hexbin(txx[:, 0], txx[:, 1],
cmap='magma_r',
mincnt=1,
bins='log',
alpha=0.8,
)
ax.plot(straj[:, 0], straj[:, 1], 'o-', label='Sampled')
ax.set_xlabel("tIC 1", fontsize=16)
ax.set_ylabel("tIC 2", fontsize=16)
ax.legend(loc='best')
## Plot
fig, ax = plt.subplots(figsize=(7, 5))
plot_sampled_traj(ax)
fig.tight_layout()
fig.savefig('tica-dimension-0-heatmap.pdf')
# {{xdg_open('tica-dimension-0-heatmap.pdf')}}
| lgpl-2.1 |
tdent/pycbc | pycbc/io/live.py | 2 | 17300 | import logging
import os
import pycbc
import numpy
import lal
from six import u as unicode
from glue.ligolw import ligolw
from glue.ligolw import lsctables
from glue.ligolw import utils as ligolw_utils
from glue.ligolw.utils import process as ligolw_process
from glue.ligolw import param as ligolw_param
from pycbc import version as pycbc_version
from pycbc import pnutils
from pycbc.tmpltbank import return_empty_sngl
from pycbc.results import ifo_color
#FIXME Legacy build PSD xml helpers, delete me when we move away entirely from
# xml formats
def _build_series(series, dim_names, comment, delta_name, delta_unit):
from glue.ligolw import array as ligolw_array
Attributes = ligolw.sax.xmlreader.AttributesImpl
elem = ligolw.LIGO_LW(
Attributes({u"Name": unicode(series.__class__.__name__)}))
if comment is not None:
elem.appendChild(ligolw.Comment()).pcdata = comment
elem.appendChild(ligolw.Time.from_gps(series.epoch, u"epoch"))
elem.appendChild(ligolw_param.Param.from_pyvalue(u"f0", series.f0,
unit=u"s^-1"))
delta = getattr(series, delta_name)
if numpy.iscomplexobj(series.data.data):
data = numpy.row_stack((numpy.arange(len(series.data.data)) * delta,
series.data.data.real, series.data.data.imag))
else:
data = numpy.row_stack((numpy.arange(len(series.data.data)) * delta,
series.data.data))
a = ligolw_array.Array.build(series.name, data, dim_names=dim_names)
a.Unit = str(series.sampleUnits)
dim0 = a.getElementsByTagName(ligolw.Dim.tagName)[0]
dim0.Unit = delta_unit
dim0.Start = series.f0
dim0.Scale = delta
elem.appendChild(a)
return elem
def snr_series_to_xml(snr_series, document, sngl_inspiral_id):
"""Save an SNR time series into an XML document, in a format compatible
with BAYESTAR.
"""
snr_lal = snr_series.lal()
snr_lal.name = 'snr'
snr_lal.sampleUnits = ''
snr_xml = _build_series(snr_lal, (u'Time', u'Time,Real,Imaginary'), None,
'deltaT', 's')
snr_node = document.childNodes[-1].appendChild(snr_xml)
eid_param = ligolw_param.Param.build(u'event_id', u'ilwd:char',
sngl_inspiral_id)
snr_node.appendChild(eid_param)
def make_psd_xmldoc(psddict, xmldoc=None):
"""Add a set of PSDs to a LIGOLW XML document. If the document is not
given, a new one is created first.
"""
xmldoc = ligolw.Document() if xmldoc is None else xmldoc.childNodes[0]
# the PSDs must be children of a LIGO_LW with name "psd"
root_name = u"psd"
Attributes = ligolw.sax.xmlreader.AttributesImpl
lw = xmldoc.appendChild(
ligolw.LIGO_LW(Attributes({u"Name": root_name})))
for instrument, psd in psddict.items():
xmlseries = _build_series(psd, (u"Frequency,Real", u"Frequency"),
None, 'deltaF', 's^-1')
fs = lw.appendChild(xmlseries)
fs.appendChild(ligolw_param.Param.from_pyvalue(u"instrument",
instrument))
return xmldoc
class SingleCoincForGraceDB(object):
"""Create xml files and submit them to gracedb from PyCBC Live"""
def __init__(self, ifos, coinc_results, **kwargs):
"""Initialize a ligolw xml representation of a zerolag trigger
for upload from pycbc live to gracedb.
Parameters
----------
ifos: list of strs
A list of the ifos participating in this trigger.
coinc_results: dict of values
A dictionary of values. The format is defined in
pycbc/events/coinc.py and matches the on disk representation
in the hdf file for this time.
psds: dict of FrequencySeries
Dictionary providing PSD estimates for all involved detectors.
low_frequency_cutoff: float
Minimum valid frequency for the PSD estimates.
followup_data: dict of dicts, optional
Dictionary providing SNR time series for each detector,
to be used in sky localization with BAYESTAR. The format should
be `followup_data['H1']['snr_series']`. More detectors can be
present than given in `ifos`. If so, the extra detectors will only
be used for sky localization.
channel_names: dict of strings, optional
Strain channel names for each detector.
Will be recorded in the sngl_inspiral table.
"""
self.template_id = coinc_results['foreground/%s/template_id' % ifos[0]]
self.coinc_results = coinc_results
self.ifos = ifos
# remember if this should be marked as HWINJ
self.is_hardware_injection = ('HWINJ' in coinc_results
and coinc_results['HWINJ'])
if 'followup_data' in kwargs:
fud = kwargs['followup_data']
assert len({fud[ifo]['snr_series'].delta_t for ifo in fud}) == 1, \
"delta_t for all ifos do not match"
self.snr_series = {ifo: fud[ifo]['snr_series'] for ifo in fud}
usable_ifos = fud.keys()
followup_ifos = list(set(usable_ifos) - set(ifos))
else:
self.snr_series = None
usable_ifos = ifos
followup_ifos = []
# Set up the bare structure of the xml document
outdoc = ligolw.Document()
outdoc.appendChild(ligolw.LIGO_LW())
proc_id = ligolw_process.register_to_xmldoc(
outdoc, 'pycbc', {}, ifos=usable_ifos, comment='',
version=pycbc_version.git_hash,
cvs_repository='pycbc/'+pycbc_version.git_branch,
cvs_entry_time=pycbc_version.date).process_id
# Set up coinc_definer table
coinc_def_table = lsctables.New(lsctables.CoincDefTable)
coinc_def_id = lsctables.CoincDefID(0)
coinc_def_row = lsctables.CoincDef()
coinc_def_row.search = "inspiral"
coinc_def_row.description = "sngl_inspiral<-->sngl_inspiral coincs"
coinc_def_row.coinc_def_id = coinc_def_id
coinc_def_row.search_coinc_type = 0
coinc_def_table.append(coinc_def_row)
outdoc.childNodes[0].appendChild(coinc_def_table)
# Set up coinc inspiral and coinc event tables
coinc_id = lsctables.CoincID(0)
coinc_event_table = lsctables.New(lsctables.CoincTable)
coinc_event_row = lsctables.Coinc()
coinc_event_row.coinc_def_id = coinc_def_id
coinc_event_row.nevents = len(usable_ifos)
coinc_event_row.instruments = ','.join(usable_ifos)
coinc_event_row.time_slide_id = lsctables.TimeSlideID(0)
coinc_event_row.process_id = proc_id
coinc_event_row.coinc_event_id = coinc_id
coinc_event_row.likelihood = 0.
coinc_event_table.append(coinc_event_row)
outdoc.childNodes[0].appendChild(coinc_event_table)
# Set up sngls
sngl_inspiral_table = lsctables.New(lsctables.SnglInspiralTable)
coinc_event_map_table = lsctables.New(lsctables.CoincMapTable)
sngl_populated = None
network_snrsq = 0
for sngl_id, ifo in enumerate(usable_ifos):
sngl = return_empty_sngl(nones=True)
sngl.event_id = lsctables.SnglInspiralID(sngl_id)
sngl.process_id = proc_id
sngl.ifo = ifo
names = [n.split('/')[-1] for n in coinc_results
if 'foreground/%s' % ifo in n]
for name in names:
val = coinc_results['foreground/%s/%s' % (ifo, name)]
if name == 'end_time':
sngl.set_end(lal.LIGOTimeGPS(val))
else:
try:
setattr(sngl, name, val)
except AttributeError:
pass
if sngl.mass1 and sngl.mass2:
sngl.mtotal, sngl.eta = pnutils.mass1_mass2_to_mtotal_eta(
sngl.mass1, sngl.mass2)
sngl.mchirp, _ = pnutils.mass1_mass2_to_mchirp_eta(
sngl.mass1, sngl.mass2)
sngl_populated = sngl
if sngl.snr:
sngl.eff_distance = (sngl.sigmasq)**0.5 / sngl.snr
network_snrsq += sngl.snr ** 2.0
if 'channel_names' in kwargs and ifo in kwargs['channel_names']:
sngl.channel = kwargs['channel_names'][ifo]
sngl_inspiral_table.append(sngl)
# Set up coinc_map entry
coinc_map_row = lsctables.CoincMap()
coinc_map_row.table_name = 'sngl_inspiral'
coinc_map_row.coinc_event_id = coinc_id
coinc_map_row.event_id = sngl.event_id
coinc_event_map_table.append(coinc_map_row)
if self.snr_series is not None:
snr_series_to_xml(self.snr_series[ifo], outdoc, sngl.event_id)
# for subthreshold detectors, respect BAYESTAR's assumptions and checks
bayestar_check_fields = ('mass1 mass2 mtotal mchirp eta spin1x '
'spin1y spin1z spin2x spin2y spin2z').split()
subthreshold_sngl_time = numpy.mean(
[coinc_results['foreground/{}/end_time'.format(ifo)]
for ifo in ifos])
for sngl in sngl_inspiral_table:
if sngl.ifo in followup_ifos:
for bcf in bayestar_check_fields:
setattr(sngl, bcf, getattr(sngl_populated, bcf))
sngl.set_end(lal.LIGOTimeGPS(subthreshold_sngl_time))
outdoc.childNodes[0].appendChild(coinc_event_map_table)
outdoc.childNodes[0].appendChild(sngl_inspiral_table)
# Set up the coinc inspiral table
coinc_inspiral_table = lsctables.New(lsctables.CoincInspiralTable)
coinc_inspiral_row = lsctables.CoincInspiral()
# This seems to be used as FAP, which should not be in gracedb
coinc_inspiral_row.false_alarm_rate = 0
coinc_inspiral_row.minimum_duration = 0.
coinc_inspiral_row.set_ifos(usable_ifos)
coinc_inspiral_row.coinc_event_id = coinc_id
coinc_inspiral_row.mchirp = sngl_populated.mchirp
coinc_inspiral_row.mass = sngl_populated.mtotal
coinc_inspiral_row.end_time = sngl_populated.end_time
coinc_inspiral_row.end_time_ns = sngl_populated.end_time_ns
coinc_inspiral_row.snr = network_snrsq ** 0.5
far = 1.0 / (lal.YRJUL_SI * coinc_results['foreground/ifar'])
coinc_inspiral_row.combined_far = far
coinc_inspiral_table.append(coinc_inspiral_row)
outdoc.childNodes[0].appendChild(coinc_inspiral_table)
# append the PSDs
self.psds = kwargs['psds']
psds_lal = {}
for ifo in self.psds:
psd = self.psds[ifo]
kmin = int(kwargs['low_frequency_cutoff'] / psd.delta_f)
fseries = lal.CreateREAL8FrequencySeries(
"psd", psd.epoch, kwargs['low_frequency_cutoff'], psd.delta_f,
lal.StrainUnit**2 / lal.HertzUnit, len(psd) - kmin)
fseries.data.data = psd.numpy()[kmin:] / pycbc.DYN_RANGE_FAC ** 2.0
psds_lal[ifo] = fseries
make_psd_xmldoc(psds_lal, outdoc)
self.outdoc = outdoc
self.time = sngl_populated.get_end()
def save(self, filename):
"""Write this trigger to gracedb compatible xml format
Parameters
----------
filename: str
Name of file to write to disk.
"""
gz = filename.endswith('.gz')
ligolw_utils.write_filename(self.outdoc, filename, gz=gz)
def upload(self, fname, gracedb_server=None, testing=True,
extra_strings=None):
"""Upload this trigger to gracedb
Parameters
----------
fname: str
The name to give the xml file associated with this trigger
gracedb_server: string, optional
URL to the GraceDB web API service for uploading the event.
If omitted, the default will be used.
testing: bool
Switch to determine if the upload should be sent to gracedb as a
test trigger (True) or a production trigger (False).
"""
from ligo.gracedb.rest import GraceDb
import matplotlib
matplotlib.use('Agg')
import pylab
# first of all, make sure the event is saved on disk
# as GraceDB operations can fail later
self.save(fname)
if self.snr_series is not None:
if fname.endswith('.xml.gz'):
snr_series_fname = fname.replace('.xml.gz', '.hdf')
else:
snr_series_fname = fname.replace('.xml', '.hdf')
snr_series_plot_fname = snr_series_fname.replace('.hdf',
'_snr.png')
psd_series_plot_fname = snr_series_fname.replace('.hdf',
'_psd.png')
pylab.figure()
for ifo in sorted(self.snr_series):
curr_snrs = self.snr_series[ifo]
curr_snrs.save(snr_series_fname, group='%s/snr' % ifo)
pylab.plot(curr_snrs.sample_times, abs(curr_snrs),
c=ifo_color(ifo), label=ifo)
if ifo in self.ifos:
snr = self.coinc_results['foreground/%s/%s' %
(ifo, 'snr')]
endt = self.coinc_results['foreground/%s/%s' %
(ifo, 'end_time')]
pylab.plot([endt], [snr], c=ifo_color(ifo), marker='x')
pylab.legend()
pylab.xlabel('GPS time (s)')
pylab.ylabel('SNR')
pylab.savefig(snr_series_plot_fname)
pylab.close()
pylab.figure()
for ifo in sorted(self.snr_series):
# Undo dynamic range factor
curr_psd = self.psds[ifo].astype(numpy.float64)
curr_psd /= pycbc.DYN_RANGE_FAC ** 2.0
curr_psd.save(snr_series_fname, group='%s/psd' % ifo)
# Can't plot log(0) so start from point 1
pylab.loglog(curr_psd.sample_frequencies[1:],
curr_psd[1:]**0.5, c=ifo_color(ifo), label=ifo)
pylab.legend()
pylab.xlim([10, 1300])
pylab.ylim([3E-24, 1E-20])
pylab.xlabel('Frequency (Hz)')
pylab.ylabel('ASD')
pylab.savefig(psd_series_plot_fname)
gid = None
try:
# try connecting to GraceDB
gracedb = GraceDb(gracedb_server) \
if gracedb_server is not None else GraceDb()
# create GraceDB event
group = 'Test' if testing else 'CBC'
r = gracedb.createEvent(group, "pycbc", fname, "AllSky").json()
gid = r["graceid"]
logging.info("Uploaded event %s", gid)
if self.is_hardware_injection:
gracedb.writeLabel(gid, 'INJ')
logging.info("Tagging event %s as an injection", gid)
# upload PSDs. Note that the PSDs are already stored in the
# original event file and we just upload a copy of that same file
# here. This keeps things as they were in O2 and can be removed
# after updating the follow-up infrastructure
psd_fname = 'psd.xml.gz' if fname.endswith('.gz') else 'psd.xml'
gracedb.writeLog(gid, "PyCBC PSD estimate from the time of event",
psd_fname, open(fname, "rb").read(), "psd")
logging.info("Uploaded PSDs for event %s", gid)
# add other tags and comments
gracedb.writeLog(
gid, "Using PyCBC code hash %s" % pycbc_version.git_hash)
extra_strings = [] if extra_strings is None else extra_strings
for text in extra_strings:
gracedb.writeLog(gid, text, tag_name=['analyst_comments'])
# upload SNR series in HDF format and plots
if self.snr_series is not None:
gracedb.writeLog(gid, 'SNR timeseries HDF file upload',
filename=snr_series_fname)
gracedb.writeLog(gid, 'SNR timeseries plot upload',
filename=snr_series_plot_fname,
tag_name=['background'],
displayName=['SNR timeseries'])
gracedb.writeLog(gid, 'PSD plot upload',
filename=psd_series_plot_fname,
tag_name=['psd'], displayName=['PSDs'])
except Exception as exc:
logging.error('Something failed during the upload/annotation of '
'event %s on GraceDB. The event may not have been '
'uploaded!', fname)
logging.error(str(exc))
return gid
__all__ = ['SingleCoincForGraceDB', 'make_psd_xmldoc', 'snr_series_to_xml']
| gpl-3.0 |
Joshmoss11/x-seq | deeptools/correlation_heatmap.py | 1 | 3479 | from matplotlib import use as mplt_use
mplt_use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy.cluster.hierarchy as sch
from matplotlib import rcParams
import matplotlib.colors as colors
def plot_correlation(corr_matrix, labels, plotFileName, vmax=None,
vmin=None, colormap='jet', image_format=None,
plot_numbers=False):
num_rows = corr_matrix.shape[0]
# set a font size according to figure length
if num_rows < 6:
font_size = 14
elif num_rows > 40:
font_size = 5
else:
font_size = int(14 - 0.25*num_rows)
rcParams.update({'font.size': font_size})
# set the minimum and maximum values
if vmax is None:
vmax = 1
if vmin is None:
vmin = 0 if corr_matrix.min() >= 0 else -1
# Compute and plot dendrogram.
fig = plt.figure(figsize=(11, 9.5))
axdendro = fig.add_axes([0.02, 0.12, 0.1, 0.66])
axdendro.set_axis_off()
y_var = sch.linkage(corr_matrix, method='complete')
z_var = sch.dendrogram(y_var, orientation='right',
link_color_func=lambda k: 'darkred')
axdendro.set_xticks([])
axdendro.set_yticks([])
cmap = plt.get_cmap(colormap)
# this line simply makes a new cmap, based on the original
# colormap that goes from 0.0 to 0.9
# This is done to avoid colors that
# are too dark at the end of the range that do not offer
# a good contrast between the correlation numbers that are
# plotted on black.
if plot_numbers:
cmap = cmap.from_list(colormap + "clipped",
cmap(np.linspace(0, 0.9, 10)))
cmap.set_under((0., 0., 1.))
# Plot distance matrix.
axmatrix = fig.add_axes([0.13, 0.1, 0.6, 0.7])
index = z_var['leaves']
corr_matrix = corr_matrix[index, :]
corr_matrix = corr_matrix[:, index]
img_mat = axmatrix.pcolormesh(corr_matrix,
edgecolors='black',
cmap=cmap,
vmax=vmax,
vmin=vmin)
axmatrix.set_xlim(0, num_rows)
axmatrix.set_ylim(0, num_rows)
axmatrix.yaxis.tick_right()
axmatrix.set_yticks(np.arange(corr_matrix.shape[0])+0.5)
axmatrix.set_yticklabels(np.array(labels).astype('str')[index])
# axmatrix.xaxis.set_label_position('top')
axmatrix.xaxis.set_tick_params(labeltop='on')
axmatrix.xaxis.set_tick_params(labelbottom='off')
axmatrix.set_xticks(np.arange(corr_matrix.shape[0])+0.5)
axmatrix.set_xticklabels(np.array(labels).astype('str')[index],
rotation=45,
ha='left')
axmatrix.tick_params(\
axis='x',
which='both',
bottom='off',
top='off')
axmatrix.tick_params(\
axis='y',
which='both',
left='off',
right='off')
# axmatrix.set_xticks([])
# Plot colorbar.
axcolor = fig.add_axes([0.13, 0.065, 0.6, 0.02])
cobar = plt.colorbar(img_mat, cax=axcolor, orientation='horizontal')
cobar.solids.set_edgecolor("face")
if plot_numbers:
for row in range(num_rows):
for col in range(num_rows):
axmatrix.text(row+0.5, col+0.5,
"{:.2f}".format(corr_matrix[row, col]),
ha='center', va='center')
fig.savefig(plotFileName, format=image_format)
| gpl-3.0 |
RomainBrault/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 94 | 2264 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn import datasets
##############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
###############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
#plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
#plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
surgebiswas/poker | PokerBots_2017/Johnny/numpy/core/fromnumeric.py | 22 | 98126 | """Module containing non-deprecated functions borrowed from Numeric.
"""
from __future__ import division, absolute_import, print_function
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
from . import umath as um
from . import numerictypes as nt
from .numeric import asarray, array, asanyarray, concatenate
from . import _methods
_dt_ = nt.sctype2char
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = type(None)
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj), method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def _wrapfunc(obj, method, *args, **kwds):
try:
return getattr(obj, method)(*args, **kwds)
# An AttributeError occurs if the object does not have
# such a method in its class.
# A TypeError occurs if the object does have such a method
# in its class, but its signature is not identical to that
# of NumPy's. This situation has occurred in the case of
# a downstream library like 'pandas'.
except (AttributeError, TypeError):
return _wrapit(obj, method, *args, **kwds)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
.. versionadded:: 1.8.0
Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
compress : Take elements using a boolean mask
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, [[0, 1], [2, 3]])
array([[4, 3],
[5, 7]])
"""
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C', 'F', 'A'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to read / write the
elements using Fortran-like index order, with the first index
changing fastest, and the last index changing slowest. Note that
the 'C' and 'F' options take no account of the memory layout of
the underlying array, and only refer to the order of indexing.
'A' means to read / write the elements in Fortran-like index
order if `a` is Fortran *contiguous* in memory, C-like order
otherwise.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy. Note there is no guarantee of the *memory layout* (C- or
Fortran- contiguous) of the returned array.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
from `a`, and then *placing* the values into the output array.
For example, let's say you have an array:
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0, 1],
[2, 3],
[4, 5]])
You can think of reshaping as first raveling the array (using the given
index order), then inserting the elements from the raveled array into the
new array using the same kind of index ordering as was used for the
raveling.
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0, 1, 2],
[3, 4, 5]])
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering
array([[0, 4, 3],
[2, 1, 5]])
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F')
array([[0, 4, 3],
[2, 1, 5]])
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
return _wrapfunc(a, 'reshape', newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int or array of ints
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _wrapfunc(a, 'repeat', repeats, axis=axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
try:
put = a.put
except AttributeError:
raise TypeError("argument 1 must be numpy.ndarray, "
"not {name}".format(name=type(a).__name__))
return put(ind, v, mode=mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is
returned; otherwise a new array is created. For earlier NumPy
versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
return _wrapfunc(a, 'swapaxes', axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
moveaxis
argsort
Notes
-----
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
Transposing a 1-D array returns an unchanged view of the original array.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _wrapfunc(a, 'transpose', axes)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
Creates a copy of the array with its elements rearranged in such a
way that the value of the element in k-th position is in the
position it would be in a sorted array. All elements smaller than
the k-th element are moved before this element and all equal or
greater are moved behind it. The ordering of the elements in the two
partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to be sorted.
kth : int or sequence of ints
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
it. The order all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string. Not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
partitioned_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.partition : Method to sort an array in-place.
argpartition : Indirect partition.
sort : Full sorting
Notes
-----
The various selection algorithms are characterized by their average
speed, worst case performance, work space size, and whether they are
stable. A stable sort keeps items with the same key in the same
relative order. The available algorithms have the following
properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
================= ======= ============= ============ =======
'introselect' 1 O(n) 0 no
================= ======= ============= ============ =======
All the partition algorithms make temporary copies of the data when
partitioning along any but the last axis. Consequently,
partitioning along the last axis is faster and uses less space than
partitioning along any other axis.
The sort order for complex numbers is lexicographic. If both the
real and imaginary parts are non-nan then the order is determined by
the real parts except when they are equal, in which case the order
is determined by the imaginary parts.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> np.partition(a, 3)
array([2, 1, 3, 4])
>>> np.partition(a, (1, 3))
array([1, 2, 3, 4])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.partition(kth, axis=axis, kind=kind, order=order)
return a
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
algorithm specified by the `kind` keyword. It returns an array of
indices of the same shape as `a` that index data along the given
axis in partitioned order.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
Array to sort.
kth : int or sequence of ints
Element index to partition by. The k-th element will be in its
final sorted position and all smaller elements will be moved
before it and all larger elements behind it. The order all
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'
order : str or list of str, optional
When `a` is an array with fields defined, this argument
specifies which fields to compare first, second, etc. A single
field can be specified as a string, and not all fields need be
specified, but unspecified fields will still be used, in the
order in which they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that partition `a` along the specified axis.
In other words, ``a[index_array]`` yields a partitioned `a`.
See Also
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
argsort : Full indirect sort
Notes
-----
See `partition` for notes on the different selection algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 4, 2, 1])
>>> x[np.argpartition(x, 3)]
array([2, 1, 3, 4])
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
>>> x = [3, 4, 2, 1]
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
partition : Partial sort.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
.. versionadded:: 1.12.0
quicksort has been changed to an introsort which will switch
heapsort when it does not make enough progress. This makes its
worst case O(n*log(n)).
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy(order="K")
a.sort(axis=axis, kind=kind, order=order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
array([0, 0, 0])
>>> np.argmin(a, axis=1)
array([0, 0])
>>> b = np.arange(6)
>>> b[4] = 0
>>> b
array([0, 1, 2, 3, 0, 5])
>>> np.argmin(b) # Only the first occurrence is returned.
0
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the
corresponding elements in `v` were inserted before the indices, the
order of `a` would be preserved.
Parameters
----------
a : 1-D array_like
Input array. If `sorter` is None, then it must be sorted in
ascending order, otherwise `sorter` must be an array of indices
that sort it.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
sorter : 1-D array_like, optional
Optional array of integer indices that sort array a into ascending
order. They are typically the result of argsort.
.. versionadded:: 1.7.0
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(2,3))
array([[0, 1, 2],
[3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na:
return mu.zeros(new_shape, a.dtype)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate((a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
.. versionadded:: 1.7.0
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=(2,)).shape
(1, 3)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
try:
# First try to use the new axis= parameter
return squeeze(axis=axis)
except TypeError:
# For backwards compatibility
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form ``a[i, i+offset]``. If
`a` has more than two dimensions, then the axes specified by `axis1`
and `axis2` are used to determine the 2-D sub-array whose diagonal is
returned. The shape of the resulting array can be determined by
removing `axis1` and `axis2` and appending an index to the right equal
to the size of the resulting diagonals.
In versions of NumPy prior to 1.7, this function always returned a new,
independent array containing a copy of the values in the diagonal.
In NumPy 1.7 and 1.8, it continues to return a copy of the diagonal,
but depending on this fact is deprecated. Writing to the resulting
array continues to work as it used to, but a FutureWarning is issued.
Starting in NumPy 1.9 it returns a read-only view on the original array.
Attempting to write to the resulting array will produce an error.
In some future release, it will return a read/write view and writing to
the returned array will alter your original array. The returned array
will have the same type as the input array.
If you don't write to the array returned by this function, then you can
just ignore all of the above.
If you depend on the current behavior, then we suggest copying the
returned array explicitly, i.e., use ``np.diagonal(a).copy()`` instead
of just ``np.diagonal(a)``. This will work with both past and future
versions of NumPy.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be positive or
negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D sub-arrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D sub-arrays from
which the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D and not a matrix, a 1-D array of the same type as `a`
containing the diagonal is returned. If `a` is a matrix, a 1-D
array containing the diagonal is returned in order to maintain
backward compatibility. If the dimension of `a` is greater than
two, then an array of diagonals is returned, "packed" from
left-most dimension to right-most (e.g., if `a` is 3-D, then the
diagonals are "packed" along rows).
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : MATLAB work-a-like for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
A 3-D example:
>>> a = np.arange(8).reshape(2,2,2); a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0, # Main diagonals of two arrays created by skipping
... 0, # across the outer(left)-most axis last and
... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
The sub-arrays whose main diagonals we just obtained; note that each
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
>>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
>>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
return asarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
else:
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
if isinstance(a, np.matrix):
# Get trace of matrix via an array to preserve backward compatibility.
return asarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
else:
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
As of NumPy 1.10, the returned array will have the same type as the input
array. (for example, a masked array will be returned for a masked array
input)
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F', 'A', 'K'}, optional
The elements of `a` are read using this index order. 'C' means
to index the elements in row-major, C-style order,
with the last axis index changing fastest, back to the first
axis index changing slowest. 'F' means to index the elements
in column-major, Fortran-style order, with the
first index changing fastest, and the last index changing
slowest. Note that the 'C' and 'F' options take no account of
the memory layout of the underlying array, and only refer to
the order of axis indexing. 'A' means to read the elements in
Fortran-like index order if `a` is Fortran *contiguous* in
memory, C-like order otherwise. 'K' means to read the
elements in the order they occur in memory, except for
reversing the data when strides are negative. By default, 'C'
index order is used.
Returns
-------
y : array_like
If `a` is a matrix, y is a 1-D ndarray, otherwise y is an array of
the same subtype as `a`. The shape of the returned array is
``(a.size,)``. Matrices are special cased for backward
compatibility.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
In row-major, C-style order, in two dimensions, the row index
varies the slowest, and the column index the quickest. This can
be generalized to multiple dimensions, where row-major order
implies that the index along the first axis varies slowest, and
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
When a view is desired in as many cases as possible, ``arr.reshape(-1)``
may be preferable.
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1 2 3 4 5 6]
>>> print(x.reshape(-1))
[1 2 3 4 5 6]
>>> print(np.ravel(x, order='F'))
[1 4 2 5 3 6]
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
>>> print(np.ravel(x.T))
[1 4 2 5 3 6]
>>> print(np.ravel(x.T, order='A'))
[1 2 3 4 5 6]
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
>>> a = np.arange(3)[::-1]; a
array([2, 1, 0])
>>> a.ravel(order='C')
array([2, 1, 0])
>>> a.ravel(order='K')
array([2, 1, 0])
>>> a = np.arange(12).reshape(2,3,2).swapaxes(1,2); a
array([[[ 0, 2, 4],
[ 1, 3, 5]],
[[ 6, 8, 10],
[ 7, 9, 11]]])
>>> a.ravel(order='C')
array([ 0, 2, 4, 1, 3, 5, 6, 8, 10, 7, 9, 11])
>>> a.ravel(order='K')
array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
"""
if isinstance(a, np.matrix):
return asarray(a).ravel(order=order)
else:
return asanyarray(a).ravel(order=order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
row-major, C-style order. The corresponding non-zero
values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
count_nonzero :
Counts the number of non-zero elements in the input array.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
return _wrapfunc(a, 'nonzero')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method in ndarray
np.extract: Equivalent method when working on 1-D arrays
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
return _wrapfunc(a, 'clip', a_min, a_max, out=out)
def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a sum is performed on all of the axes
specified in the tuple instead of a single axis or all the axes as
before.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The dtype of `a` is used by default unless `a`
has an integer dtype of less precision than the default platform
integer. In that case, if `a` is signed then the platform integer
is used while if `a` is unsigned then an unsigned integer of the
same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
The sum of an empty array is the neutral element 0:
>>> np.sum([])
0.0
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
if type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
pass
else:
return sum(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._sum(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def product(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, **kwargs)
def sometrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def alltrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical OR reduction is performed.
The default (`axis` = `None`) is to perform a logical OR over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `any` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
any : bool or ndarray
A new boolean or `ndarray` is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all elements along a given axis evaluate to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity evaluate
to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.any(axis=axis, out=out, **kwargs)
def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (`axis` = `None`) is to perform a logical AND over all
the dimensions of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
out : ndarray, optional
Alternate output array in which to place the result.
It must have the same shape as the expected output and its
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `all` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is specified,
in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
arr = asanyarray(a)
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
return arr.all(axis=axis, out=out, **kwargs)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
diff : Calculate the n-th discrete difference along given axis.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
return _wrapfunc(a, 'ptp', axis=axis, out=out)
def amax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the maximum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amax` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amax : ndarray or scalar
Maximum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amin :
The minimum value of an array along a given axis, propagating any NaNs.
nanmax :
The maximum value of an array along a given axis, ignoring any NaNs.
maximum :
Element-wise maximum of two arrays, propagating any NaNs.
fmax :
Element-wise maximum of two arrays, ignoring any NaNs.
argmax :
Return the indices of the maximum values.
nanmin, minimum, fmin
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding max value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmax.
Don't use `amax` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``amax(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a) # Maximum of the flattened array
3
>>> np.amax(a, axis=0) # Maxima along the first axis
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
pass
else:
return amax(axis=axis, out=out, **kwargs)
return _methods._amax(a, axis=axis,
out=out, **kwargs)
def amin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which to operate. By default, flattened input is
used.
.. versionadded:: 1.7.0
If this is a tuple of ints, the minimum is selected over multiple axes,
instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `amin` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
amin : ndarray or scalar
Minimum of `a`. If `axis` is None, the result is a scalar value.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
amax :
The maximum value of an array along a given axis, propagating any NaNs.
nanmin :
The minimum value of an array along a given axis, ignoring any NaNs.
minimum :
Element-wise minimum of two arrays, propagating any NaNs.
fmin :
Element-wise minimum of two arrays, ignoring any NaNs.
argmin :
Return the indices of the minimum values.
nanmax, maximum, fmax
Notes
-----
NaN values are propagated, that is if at least one item is NaN, the
corresponding min value will be NaN as well. To ignore NaN values
(MATLAB behavior), please use nanmin.
Don't use `amin` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``amin(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
pass
else:
return amin(axis=axis, out=out, **kwargs)
return _methods._amin(a, axis=axis,
out=out, **kwargs)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
alen : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
The product of an empty array is the neutral element 1:
>>> np.prod([])
1.0
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
pass
else:
return prod(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._prod(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default
the input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If *dtype* is not specified, it
defaults to the dtype of `a`, unless `a` has an integer dtype with
a precision less than that of the default platform integer. In
that case, the default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows) of `a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns) of `a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
.. note::
This function is deprecated in NumPy 1.9 to avoid confusion with
`numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
should be used instead.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in NumPy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
# 2014-04-12, 1.9
warnings.warn(
"`rank` is deprecated; use the `ndim` attribute or function instead. "
"To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
VisibleDeprecationWarning, stacklevel=2)
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _wrapfunc(a, 'round', decimals=decimals, out=out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
return around(a, decimals=decimals, out=out)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken over
the flattened array by default, otherwise over the specified axis.
`float64` intermediate and return values are used for integer inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `mean` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
std, var, nanmean, nanstd, nanvar
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the
same precision the input has. Depending on the input data, this can
cause the results to be inaccurate, especially for `float32` (see
example below). Specifying a higher-precision accumulator using the
`dtype` keyword can alleviate this issue.
By default, `float16` results are computed using `float32` intermediates
for extra precision.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.54999924
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
mean = a.mean
except AttributeError:
pass
else:
return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
out=out, **kwargs)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925494177
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
std = a.std
except AttributeError:
pass
else:
return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
See Also
--------
std , mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of a hypothetical infinite population.
``ddof=0`` provides a maximum likelihood estimate of the variance for
normally distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for `float32` (see example
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
array([ 1., 1.])
>>> np.var(a, axis=1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
0.20250003
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932944759
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
kwargs = {}
if keepdims is not np._NoValue:
kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
var = a.var
except AttributeError:
pass
else:
return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
**kwargs)
| mit |
mhdella/scikit-learn | examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py | 218 | 3893 | """
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
| bsd-3-clause |
samuelshaner/openmc | tests/test_mgxs_library_no_nuclides/test_mgxs_library_no_nuclides.py | 1 | 2589 | #!/usr/bin/env python
import os
import sys
import glob
import hashlib
sys.path.insert(0, os.pardir)
from testing_harness import PyAPITestHarness
from input_set import PinCellInputSet
import openmc
import openmc.mgxs
class MGXSTestHarness(PyAPITestHarness):
def _build_inputs(self):
# Set the input set to use the pincell model
self._input_set = PinCellInputSet()
# Generate inputs using parent class routine
super(MGXSTestHarness, self)._build_inputs()
# Initialize a two-group structure
energy_groups = openmc.mgxs.EnergyGroups(group_edges=[0, 0.625, 20.e6])
# Initialize MGXS Library for a few cross section types
self.mgxs_lib = openmc.mgxs.Library(self._input_set.geometry)
self.mgxs_lib.by_nuclide = False
# Test all MGXS types
self.mgxs_lib.mgxs_types = openmc.mgxs.MGXS_TYPES + \
openmc.mgxs.MDGXS_TYPES
self.mgxs_lib.energy_groups = energy_groups
self.mgxs_lib.num_delayed_groups = 6
self.mgxs_lib.legendre_order = 3
self.mgxs_lib.domain_type = 'material'
self.mgxs_lib.build_library()
# Initialize a tallies file
self._input_set.tallies = openmc.Tallies()
self.mgxs_lib.add_to_tallies_file(self._input_set.tallies, merge=False)
self._input_set.tallies.export_to_xml()
def _get_results(self, hash_output=False):
"""Digest info in the statepoint and return as a string."""
# Read the statepoint file.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
# Load the MGXS library from the statepoint
self.mgxs_lib.load_from_statepoint(sp)
# Build a string from Pandas Dataframe for each MGXS
outstr = ''
for domain in self.mgxs_lib.domains:
for mgxs_type in self.mgxs_lib.mgxs_types:
mgxs = self.mgxs_lib.get_mgxs(domain, mgxs_type)
df = mgxs.get_pandas_dataframe()
outstr += df.to_string() + '\n'
# Hash the results if necessary
if hash_output:
sha512 = hashlib.sha512()
sha512.update(outstr.encode('utf-8'))
outstr = sha512.hexdigest()
return outstr
def _cleanup(self):
super(MGXSTestHarness, self)._cleanup()
f = os.path.join(os.getcwd(), 'tallies.xml')
if os.path.exists(f): os.remove(f)
if __name__ == '__main__':
harness = MGXSTestHarness('statepoint.10.*', True)
harness.main()
| mit |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/scattergl/_error_y.py | 1 | 18744 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ErrorY(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattergl"
_path_str = "scattergl.error_y"
_valid_props = {
"array",
"arrayminus",
"arrayminussrc",
"arraysrc",
"color",
"symmetric",
"thickness",
"traceref",
"tracerefminus",
"type",
"value",
"valueminus",
"visible",
"width",
}
# array
# -----
@property
def array(self):
"""
Sets the data corresponding the length of each error bar.
Values are plotted relative to the underlying data.
The 'array' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["array"]
@array.setter
def array(self, val):
self["array"] = val
# arrayminus
# ----------
@property
def arrayminus(self):
"""
Sets the data corresponding the length of each error bar in the
bottom (left) direction for vertical (horizontal) bars Values
are plotted relative to the underlying data.
The 'arrayminus' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["arrayminus"]
@arrayminus.setter
def arrayminus(self, val):
self["arrayminus"] = val
# arrayminussrc
# -------------
@property
def arrayminussrc(self):
"""
Sets the source reference on Chart Studio Cloud for arrayminus
.
The 'arrayminussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arrayminussrc"]
@arrayminussrc.setter
def arrayminussrc(self, val):
self["arrayminussrc"] = val
# arraysrc
# --------
@property
def arraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for array .
The 'arraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["arraysrc"]
@arraysrc.setter
def arraysrc(self, val):
self["arraysrc"] = val
# color
# -----
@property
def color(self):
"""
Sets the stoke color of the error bars.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# symmetric
# ---------
@property
def symmetric(self):
"""
Determines whether or not the error bars have the same length
in both direction (top/bottom for vertical bars, left/right for
horizontal bars.
The 'symmetric' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["symmetric"]
@symmetric.setter
def symmetric(self, val):
self["symmetric"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness (in px) of the error bars.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# traceref
# --------
@property
def traceref(self):
"""
The 'traceref' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["traceref"]
@traceref.setter
def traceref(self, val):
self["traceref"] = val
# tracerefminus
# -------------
@property
def tracerefminus(self):
"""
The 'tracerefminus' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["tracerefminus"]
@tracerefminus.setter
def tracerefminus(self, val):
self["tracerefminus"] = val
# type
# ----
@property
def type(self):
"""
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value. Set this
constant in `value`. If "percent", the bar lengths correspond
to a percentage of underlying data. Set this percentage in
`value`. If "sqrt", the bar lengths correspond to the square of
the underlying data. If "data", the bar lengths are set with
data set `array`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['percent', 'constant', 'sqrt', 'data']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# value
# -----
@property
def value(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars.
The 'value' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# valueminus
# ----------
@property
def valueminus(self):
"""
Sets the value of either the percentage (if `type` is set to
"percent") or the constant (if `type` is set to "constant")
corresponding to the lengths of the error bars in the bottom
(left) direction for vertical (horizontal) bars
The 'valueminus' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["valueminus"]
@valueminus.setter
def valueminus(self, val):
self["valueminus"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this set of error bars is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the cross-bar at both ends of the
error bars.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
"""
def __init__(
self,
arg=None,
array=None,
arrayminus=None,
arrayminussrc=None,
arraysrc=None,
color=None,
symmetric=None,
thickness=None,
traceref=None,
tracerefminus=None,
type=None,
value=None,
valueminus=None,
visible=None,
width=None,
**kwargs
):
"""
Construct a new ErrorY object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergl.ErrorY`
array
Sets the data corresponding the length of each error
bar. Values are plotted relative to the underlying
data.
arrayminus
Sets the data corresponding the length of each error
bar in the bottom (left) direction for vertical
(horizontal) bars Values are plotted relative to the
underlying data.
arrayminussrc
Sets the source reference on Chart Studio Cloud for
arrayminus .
arraysrc
Sets the source reference on Chart Studio Cloud for
array .
color
Sets the stoke color of the error bars.
symmetric
Determines whether or not the error bars have the same
length in both direction (top/bottom for vertical bars,
left/right for horizontal bars.
thickness
Sets the thickness (in px) of the error bars.
traceref
tracerefminus
type
Determines the rule used to generate the error bars. If
*constant`, the bar lengths are of a constant value.
Set this constant in `value`. If "percent", the bar
lengths correspond to a percentage of underlying data.
Set this percentage in `value`. If "sqrt", the bar
lengths correspond to the square of the underlying
data. If "data", the bar lengths are set with data set
`array`.
value
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars.
valueminus
Sets the value of either the percentage (if `type` is
set to "percent") or the constant (if `type` is set to
"constant") corresponding to the lengths of the error
bars in the bottom (left) direction for vertical
(horizontal) bars
visible
Determines whether or not this set of error bars is
visible.
width
Sets the width (in px) of the cross-bar at both ends of
the error bars.
Returns
-------
ErrorY
"""
super(ErrorY, self).__init__("error_y")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattergl.ErrorY
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergl.ErrorY`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("array", None)
_v = array if array is not None else _v
if _v is not None:
self["array"] = _v
_v = arg.pop("arrayminus", None)
_v = arrayminus if arrayminus is not None else _v
if _v is not None:
self["arrayminus"] = _v
_v = arg.pop("arrayminussrc", None)
_v = arrayminussrc if arrayminussrc is not None else _v
if _v is not None:
self["arrayminussrc"] = _v
_v = arg.pop("arraysrc", None)
_v = arraysrc if arraysrc is not None else _v
if _v is not None:
self["arraysrc"] = _v
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("symmetric", None)
_v = symmetric if symmetric is not None else _v
if _v is not None:
self["symmetric"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("traceref", None)
_v = traceref if traceref is not None else _v
if _v is not None:
self["traceref"] = _v
_v = arg.pop("tracerefminus", None)
_v = tracerefminus if tracerefminus is not None else _v
if _v is not None:
self["tracerefminus"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("value", None)
_v = value if value is not None else _v
if _v is not None:
self["value"] = _v
_v = arg.pop("valueminus", None)
_v = valueminus if valueminus is not None else _v
if _v is not None:
self["valueminus"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
keflavich/APEX_CMZ_H2CO | analysis/constrain_parameters_4to3.py | 1 | 26715 | raise "Use https://github.com/keflavich/h2co_modeling/blob/master/h2co_modeling/constrain_parameters.py instead"
"""
Functions for fitting temperature (and density and column) from the line ratio
plus whatever other constraints are available
"""
import inspect
import time
import os
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from astropy import units as u
from astropy import log
import pylab as pl
from astropy.io import fits
from h2co_modeling import grid_fitter
from h2co_modeling.paraH2COmodel import generic_paraH2COmodel
def gpath(fn, gridpath='/Users/adam/work/h2co/radex/thermom/'):
return os.path.join(gridpath, fn)
class paraH2COmodel(generic_paraH2COmodel):
def __init__(self, tbackground=2.73, gridsize=[250.,101.,100.]):
t0 = time.time()
self.texgrid303 = texgrid303 = fits.getdata(gpath('fjdu_pH2CO_303_tex_5kms.fits'))
self.taugrid303 = taugrid303 = fits.getdata(gpath('fjdu_pH2CO_303_tau_5kms.fits'))
self.texgrid321 = texgrid321 = fits.getdata(gpath('fjdu_pH2CO_321_tex_5kms.fits'))
self.taugrid321 = taugrid321 = fits.getdata(gpath('fjdu_pH2CO_321_tau_5kms.fits'))
self.texgrid322 = texgrid322 = fits.getdata(gpath('fjdu_pH2CO_322_tex_5kms.fits'))
self.taugrid322 = taugrid322 = fits.getdata(gpath('fjdu_pH2CO_322_tau_5kms.fits'))
self.texgrid404 = texgrid404 = fits.getdata(gpath('fjdu_pH2CO_404_tex_5kms.fits'))
self.taugrid404 = taugrid404 = fits.getdata(gpath('fjdu_pH2CO_404_tau_5kms.fits'))
self.texgrid422 = texgrid422 = fits.getdata(gpath('fjdu_pH2CO_422_tex_5kms.fits'))
self.taugrid422 = taugrid422 = fits.getdata(gpath('fjdu_pH2CO_422_tau_5kms.fits'))
self.texgrid423 = texgrid423 = fits.getdata(gpath('fjdu_pH2CO_423_tex_5kms.fits'))
self.taugrid423 = taugrid423 = fits.getdata(gpath('fjdu_pH2CO_423_tau_5kms.fits'))
self.hdr = hdr = hdrb = fits.getheader(gpath('fjdu_pH2CO_303_tex_5kms.fits'))
t1 = time.time()
log.debug("Loading grids took {0:0.1f} seconds".format(t1-t0))
self.Tbackground = tbackground
self.tline303a = ((1.0-np.exp(-np.array(self.taugrid303))) *
(self.texgrid303-self.Tbackground))
self.tline321a = ((1.0-np.exp(-np.array(self.taugrid321))) *
(self.texgrid321-self.Tbackground))
self.tline322a = ((1.0-np.exp(-np.array(self.taugrid322))) *
(self.texgrid322-self.Tbackground))
self.tline404a = ((1.0-np.exp(-np.array(self.taugrid404))) *
(self.texgrid404-self.Tbackground))
self.tline423a = ((1.0-np.exp(-np.array(self.taugrid423))) *
(self.texgrid423-self.Tbackground))
self.tline422a = ((1.0-np.exp(-np.array(self.taugrid422))) *
(self.texgrid422-self.Tbackground))
zinds,yinds,xinds = np.indices(self.tline303a.shape)
upsample_factor = np.array([gridsize[0]/self.tline303a.shape[0], # temperature
gridsize[1]/self.tline303a.shape[1], # density
gridsize[2]/self.tline303a.shape[2]], # column
dtype='float')
uzinds,uyinds,uxinds = upsinds = np.indices([int(x*us)
for x,us in zip(self.tline303a.shape,
upsample_factor)],
dtype='float')
self.tline303 = map_coordinates(self.tline303a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline321 = map_coordinates(self.tline321a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline322 = map_coordinates(self.tline322a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline404 = map_coordinates(self.tline404a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline422 = map_coordinates(self.tline422a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline423 = map_coordinates(self.tline423a,
upsinds/upsample_factor[:,None,None,None],
mode='nearest')
self.tline = {303: self.tline303,
321: self.tline321,
322: self.tline322,
422: self.tline422,
423: self.tline423,
404: self.tline404,
}
assert self.hdr['CTYPE2'].strip() == 'LOG-DENS'
assert self.hdr['CTYPE1'].strip() == 'LOG-COLU'
self.columnarr = ((uxinds + self.hdr['CRPIX1']-1)*self.hdr['CDELT1'] /
float(upsample_factor[2])+self.hdr['CRVAL1']) # log column
self.densityarr = ((uyinds + self.hdr['CRPIX2']-1)*self.hdr['CDELT2'] /
float(upsample_factor[1])+self.hdr['CRVAL2']) # log density
self.temparr = ((uzinds + self.hdr['CRPIX3']-1)*self.hdr['CDELT3'] /
float(upsample_factor[0])+self.hdr['CRVAL3']) # lin temperature
self.drange = [self.densityarr.min(), self.densityarr.max()]
self.crange = [self.columnarr.min(), self.columnarr.max()]
self.trange = [self.temparr.min(), self.temparr.max()]
self.darr = self.densityarr[0,:,0]
self.carr = self.columnarr[0,0,:]
self.tarr = self.temparr[:,0,0]
self.axes = {'dens': self.darr,
'col': self.carr,
'tem': self.tarr}
self.labels = {'dens': 'Density $n(\mathrm{H}_2)$ [log cm$^{-3}$]',
'col': 'p-H$_2$CO [log cm$^{-2}$/(km s$^{-1}$ pc)]',
'tem': 'Temperature (K)'}
# While the individual lines are subject to filling factor uncertainties, the
# ratio is not.
self.modelratio1 = self.tline321/self.tline303
self.modelratio2 = self.tline322/self.tline321
self.modelratio_423_404 = self.tline423/self.tline404
self.modelratio_422_404 = self.tline422/self.tline404
self.modelratio_404_303 = self.tline404/self.tline303
self.model_logabundance = np.log10(10**self.columnarr / u.pc.to(u.cm) /
10**self.densityarr)
t2 = time.time()
log.debug("Grid initialization took {0:0.1f} seconds total,"
" {1:0.1f} since loading grids.".format(t2-t0,t2-t1))
def grid_getmatch_321to303(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio1)
return chi2r
def grid_getmatch_404to303(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio_404_303)
return chi2r
def grid_getmatch_422to404(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio_422_404)
return chi2r
def grid_getmatch_423to404(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio_423_404)
return chi2r
def grid_getmatch_322to321(self, ratio, eratio):
match,indbest,chi2r = grid_fitter.grid_getmatch(ratio, eratio,
self.modelratio2)
return chi2r
def list_parameters():
raise NotImplementedError("Not implemented yet for 4-3")
return ['taline303', 'etaline303', 'taline321', 'etaline321',
'taline322', 'etaline322', 'logabundance', 'elogabundance',
'logh2column', 'elogh2column', 'ratio321303', 'eratio321303',
'ratio321322', 'eratio321322', 'linewidth']
def set_constraints_fromrow(self, row, **kwargs):
raise NotImplementedError("Not implemented yet for 4-3")
mapping = {'e321':'etaline321',
'Smean321':'taline321',
'Smean303':'taline303',
'er321303':'eratio321303',
'eratio321303':'eratio321303',
'e303':'etaline303',
'r321303':'ratio321303',
'ratio321303':'ratio321303',
'r321303':'ratio321303',
'er321303':'eratio321303',
'logabundance':'logabundance',
'elogabundance':'elogabundance',
'logh2column':'logh2column',
'elogh2column':'elogh2column',
'dustmindens':'linmindens',
'v_rms':'linewidth',
}
pars = {mapping[k]: row[k] for k in row.colnames if k in mapping}
pars.update(**kwargs)
self.set_constraints(**pars)
def set_constraints(self,
taline303=None, etaline303=None,
taline321=None, etaline321=None,
taline322=None, etaline322=None,
taline404=None, etaline404=None,
taline422=None, etaline422=None,
taline423=None, etaline423=None,
logabundance=None, elogabundance=None,
logh2column=None, elogh2column=None,
ratio321303=None, eratio321303=None,
ratio321322=None, eratio321322=None,
ratio404303=None, eratio404303=None,
ratio422404=None, eratio422404=None,
ratio423404=None, eratio423404=None,
linmindens=None,
mindens=None, emindens=0.2,
linewidth=None):
"""
Set parameter constraints from a variety of inputs. This will fill in
a variety of .chi2_[x] values.
All errors are 1-sigma Gaussian errors.
The ``taline`` parameters are only used as lower limits.
Logabundance and logh2column are both log_10 values, so the errorbars
are effectively lognormal 1-sigma errors.
The ratios are generally the most important constraints.
A minimum volume density, with 1-sigma lognormal one-sided error
``emindens``, can be included. ``mindens`` is logarithmic, but you can
use ``linmindens`` instead. ``linewidth`` also needs to be specified
in km/s.
"""
argspec=inspect.getargvalues(inspect.currentframe())
for arg in argspec.args:
if argspec.locals[arg] is not None:
setattr(self, arg, argspec.locals[arg])
self.chi2_X = (self.chi2_abundance(logabundance, elogabundance)
if not any(arg is None for arg in (logabundance,
elogabundance))
else 0)
self.chi2_h2 = (self.chi2_column(logh2column, elogh2column,
logabundance, linewidth)
if not
any(arg is None for arg in (logabundance, logh2column,
elogh2column, linewidth))
else 0)
self.chi2_ff1 = (self.chi2_fillingfactor(taline303, etaline303, 303)
if not any(arg is None for arg in (taline303,
etaline303))
else 0)
self.chi2_ff2 = (self.chi2_fillingfactor(taline321, etaline321, 321)
if not any(arg is None for arg in (taline321,
etaline321))
else 0)
self.chi2_r321303 = (self.grid_getmatch_321to303(ratio321303,
eratio321303)
if not any(arg is None for arg in (ratio321303,
eratio321303))
else 0)
if np.all(~np.isfinite(self.chi2_r321303)):
self.chi2_r321303 = 0
self.chi2_r423404 = (self.grid_getmatch_423to404(ratio423404,
eratio423404)
if not any(arg is None for arg in (ratio423404,
eratio423404))
else 0)
if np.all(~np.isfinite(self.chi2_r423404)):
self.chi2_r423404 = 0
self.chi2_r422404 = (self.grid_getmatch_422to404(ratio422404,
eratio422404)
if not any(arg is None for arg in (ratio422404,
eratio422404))
else 0)
if np.all(~np.isfinite(self.chi2_r422404)):
self.chi2_r422404 = 0
self.chi2_r404303 = (self.grid_getmatch_404to303(ratio404303,
eratio404303)
if not any(arg is None for arg in (ratio404303,
eratio404303))
else 0)
if np.all(~np.isfinite(self.chi2_r404303)):
self.chi2_r404303 = 0
self.chi2_r321322 = (self.grid_getmatch_322to321(ratio321322,
eratio321322)
if not any(arg is None for arg in (ratio321322,
eratio321322))
else 0)
if np.all(~np.isfinite(self.chi2_r321322)):
self.chi2_r321322 = 0
if linmindens is not None:
if mindens is not None:
raise ValueError("Both linmindens and logmindens were set.")
mindens = np.log10(linmindens)
if mindens is not None:
self.chi2_dens = (((self.densityarr - mindens)/emindens)**2
* (self.densityarr < (mindens-emindens)))
else:
self.chi2_dens = 0
self.compute_chi2_fromcomponents()
def compute_chi2_fromcomponents(self):
"""
Compute the total chi2 from the individual chi2 components
"""
self.chi2 = (self.chi2_X + self.chi2_h2 + self.chi2_ff1 + self.chi2_ff2
+ self.chi2_r321322 + self.chi2_r321303 + self.chi2_dens +
self.chi2_r404303 + self.chi2_r423404 + self.chi2_r422404)
def denstemplot(self):
self.parplot('dens','tem')
def denscolplot(self):
self.parplot('col','dens')
def coltemplot(self):
self.parplot('col','tem')
def parplot(self, par1='col', par2='dens', nlevs=5, levels=None,
colors=[(0.5,0,0), (0.75,0,0), (1.0,0,0), (1.0,0.25,0), (0.75,0.5,0)],
colorsf=[0.0, 0.33, 0.66, 1.0, 'w']):
cdict = {x: [(0.0, 0.0, 0.0),
(1.0, 1.0, 1.0)]
for x in ('red','green','blue')}
cdict['blue'] = [(0.0, 1., 1.), (1.0, 1.0, 1.0)]
cm = matplotlib.colors.LinearSegmentedColormap('mycm', cdict)
colorsf = [cm(float(ii)) if isinstance(ii, (float,int))
else ii
for ii in colorsf]
xax = self.axes[par1]
yax = self.axes[par2]
xlabel = self.labels[par1]
ylabel = self.labels[par2]
amapping = {('col','dens'): 0,
('dens','tem'): 2,
('col','tem'): 1}
if (par1,par2) in amapping:
axis = amapping[(par1,par2)]
swaps = (0,0)
elif (par2,par1) in amapping:
axis = amapping[(par2,par1)]
swaps = (0,1)
if levels is None:
levels = ([0]+[(stats.norm.cdf(ii)-stats.norm.cdf(-ii))
for ii in range(1,nlevs)]+[1])
xmaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par1])]
ymaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par2])]
xexpect = self.parconstraints['expected_{0}'.format(short_mapping[par1])]
yexpect = self.parconstraints['expected_{0}'.format(short_mapping[par2])]
fig = pl.gcf()
fig.clf()
ax1 = pl.subplot(2,2,1)
if 'chi2_r321303' in self.individual_likelihoods:
like = (self.individual_likelihoods['chi2_r321303'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
if self.chi2_r321322 is not 0:
like = cdf_of_like(self.individual_likelihoods['chi2_r321322'])
pl.contour(xax, yax, like.sum(axis=axis).swapaxes(*swaps),
levels=levels,
cmap=pl.cm.bone)
pl.title("Ratio $3_{0,3}-2_{0,2}/3_{2,1}-2_{2,0}$")
ax4 = pl.subplot(2,2,2)
if hasattr(self.chi2_X, 'size'):
like = self.individual_likelihoods['chi2_X']
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
pl.title("log(p-H$_2$CO/H$_2$) "
"$= {0:0.1f}\pm{1:0.1f}$".format(self.logabundance,
self.elogabundance))
ax3 = pl.subplot(2,2,3)
if hasattr(self.chi2_h2, 'size'):
like = (self.individual_likelihoods['chi2_h2'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
pl.title("Total log$(N(\\mathrm{{H}}_2))$ ")
# "= {0:0.1f}\pm{1:0.1f}$".format(self.logh2column,
# self.elogh2column))
ax5 = pl.subplot(2,2,4)
if hasattr(self.chi2_ff1, 'size'):
cdict = {x: [(0.0, 0.5, 0.5),
(1.0, 0.0, 0.0)]
for x in ('red','green','blue')}
cdict['green'] = [(0, 0.5, 0.5), (1,1,1)]
cdict['red'] = [(0, 0.5, 0.5), (1,0.7,0.7)]
cdict['blue'] = [(0, 0.0, 0.0), (1,0,0)]
#cdict['alpha'] = [(0.0, 0.0, 0.0), (1.0, 0.3, 0.3)]
darker = matplotlib.colors.LinearSegmentedColormap('darker', cdict)
like = (self.individual_likelihoods['chi2_ff1'])
plim = cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps)
pl.contour(xax, yax, plim, levels=levels,
cmap=darker, zorder=5)
if hasattr(self.chi2_dens, 'size'):
like = (self.individual_likelihoods['chi2_dens'])
pl.contourf(xax, yax, cdf_of_like(like.sum(axis=axis)).swapaxes(*swaps),
levels=levels, alpha=0.5, zorder=-5, colors=colorsf)
pl.contour(xax, yax,
cdf_of_like(self.likelihood.sum(axis=axis)).swapaxes(*swaps),
levels=levels, colors=colors, zorder=10)
#if hasattr(self, 'taline303'):
# ff1_mask = (self.tline303 < 10*self.taline303)
# pl.contour(xax, yax, ff1_mask.max(axis=axis).swapaxes(*swaps),
# levels=[0.5], colors='k')
pl.plot(xmaxlike, ymaxlike, 'o', markerfacecolor='none', markeredgecolor='k')
pl.plot(xexpect, yexpect, 'x', markerfacecolor='none', markeredgecolor='k')
#pl.contour(xax, yax, (tline303 < 100*par1).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k')
#pl.contour(xax, yax, (tline321 < 10*par2).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k', linestyles='--')
#pl.contour(xax, yax, (tline321 < 100*par2).max(axis=axis).swapaxes(*swaps), levels=[0.5], colors='k', linestyles='--')
#pl.title("Line Brightness + $ff\leq1$")
pl.title("Minimum Density & $ff$")
fig.text(0.05, 0.5, ylabel, horizontalalignment='center',
verticalalignment='center',
rotation='vertical', transform=fig.transFigure)
fig.text(0.5, 0.02, xlabel, horizontalalignment='center', transform=fig.transFigure)
if par1 == 'col':
for ss in range(1,5):
ax = pl.subplot(2,2,ss)
ax.xaxis.set_ticks(np.arange(self.carr.min(), self.carr.max()))
pl.subplots_adjust(wspace=0.25, hspace=0.45)
def parplot1d(self, par='col', levels=None, clf=True,
legend=True, legendfontsize=14):
xax = self.axes[par]
xlabel = self.labels[par]
amapping = {'col':(2,(0,1)),
'dens':(1,(0,2)),
'tem':(0,(1,2))}
axis,axes = amapping[par]
xmaxlike = self.parconstraints['{0}_chi2'.format(short_mapping[par])]
xexpect = self.parconstraints['expected_{0}'.format(short_mapping[par])]
like = self.likelihood.sum(axis=axes)
like /= like.sum()
inds_cdf = np.argsort(like)
cdf = like[inds_cdf]
fig = pl.gcf()
if clf:
fig.clf()
ax = fig.gca()
ax.plot(xax, like, 'k-', label='Posterior')
for key in self.individual_likelihoods:
if key in ('chi2','_chi2'):
continue # already done
ilike = self.individual_likelihoods[key].sum(axis=axes)
ilike /= ilike.sum()
ax.plot(xax, ilike, label=chi2_mapping[key.replace("chi2_","")])
ax.vlines((xmaxlike,), 0, like.max(), linestyle='--', color='r',
label='Maximum Likelihood')
ax.vlines((xexpect,), 0, like.max(), linestyle='--', color='b',
label='E[{0}]'.format(xlabel))
xexpect_v2 = (like*xax).sum()/like.sum()
ax.vlines((xexpect_v2,), 0, like.max(), linestyle='--', color='c',
zorder=-1)
print("par:{4} xmaxlike: {0}, xexpect: {1}, xexpect_v2: {2},"
"maxlike: {3}, diff:{5}"
.format(xmaxlike, xexpect, xexpect_v2, like.max(), par,
xexpect-xmaxlike))
if levels is not None:
if not isinstance(levels, collections.Iterable):
levels = [levels]
cdf_inds = np.argsort(like)
ppf = 1-like[cdf_inds].cumsum()
cutoff_likes = [like[cdf_inds[np.argmin(np.abs(ppf-lev))]]
for lev in levels]
for fillind,cutoff in enumerate(sorted(cutoff_likes)):
selection = like > cutoff
ax.fill_between(xax[selection], like[selection]*0,
like[selection], alpha=0.1, zorder=fillind-20)
if np.abs(like[selection].sum() - levels[0]) > 0.05:
# we want the sum of the likelihood to be right!
#import ipdb; ipdb.set_trace()
warnings.warn("Likelihood is not self-consistent.")
if legend:
ax.legend(loc='best', fontsize=legendfontsize)
ax.set_xlabel(xlabel)
ax.set_ylabel('$P(${0}$)$'.format(xlabel))
def parplot1d_all(self, legendfontsize=14, **kwargs):
fig = pl.gcf()
if not all(fig.get_size_inches() == [12,16]):
num = fig.number
pl.close(fig)
fig = pl.figure(num, figsize=(12,16))
for axindex,par in enumerate(('col','dens','tem')):
ax = fig.add_subplot(3,1,axindex+1)
self.parplot1d(par=par, clf=False, legend=False, **kwargs)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
if axindex == 1:
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fontsize=legendfontsize)
pl.subplots_adjust(hspace=0.45)
@property
def individual_likelihoods(self):
if hasattr(self, '_likelihoods') and self._likelihoods is not None:
return self._likelihoods
else:
self._likelihoods = {}
for key in self.__dict__:
if 'chi2' in key and getattr(self,key) is not 0:
self._likelihoods[key] = np.exp(-getattr(self,key)/2.)
self._likelihoods[key] /= self._likelihoods[key].sum()
return self._likelihoods
def cdf_of_like(like):
"""
There is probably an easier way to do this, BUT it works:
Turn a likelihood image into a CDF image
"""
like = like/like.sum()
order = np.argsort(like.flat)[::-1]
cdf = like.flat[order].cumsum()[np.argsort(order)].reshape(like.shape)
cdf[like == like.max()] = 0
return cdf
def ppf_of_like(like):
return 1-cdf_of_like(like)
| bsd-3-clause |
xuewei4d/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
Chancylin/specfem2d | utils/Visualization/plotIntegratedEnergyFile.py | 2 | 24376 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 17:36:51 2016
Draw integrated energy plot
@author: bottero
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import os,sys,glob,shutil
import argparse # To deal with arguments :
# https://docs.python.org/2/library/argparse.html
import scipy.ndimage
from scipy import interpolate
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def representsFloat(s):
try:
float(s)
return True
except ValueError:
return False
class ParFile:
""" This class is used to store the data contained on a specfem2d
par_file"""
def __init__(self,pathToParFile):
""" Constructor (what happen when we do aParFile=ParFile(path)) """
self.path=pathToParFile
self.nt=''
self.dt=''
if os.path.exists(self.path):
self.readParFile()
else:
raise IOError('Impossible to read '+pathToParFile)
def readParFile(self):
""" Open a the par_file and read some values """
with open(self.path) as parFile:
for line in parFile:
if 'NSTEP=' in line.replace(" ", ""):
self.nt=line.split(" = ")[1].split("#")[0].strip()
if representsInt(self.nt): # Verify that the string extracted is a int
self.nt = int(self.nt)
else:
raise ValueError('Incorrect value of NSTEP read')
if 'DT=' in line.replace(" ", ""):
self.dt=line.split(" = ")[1].split("#")[0].strip()
self.dt=self.dt.replace("d","e").replace("D","E") # Convert to real scientific fomat
if representsFloat(self.dt): # Verify that the string extracted is a int
self.dt = float(self.dt)
else:
raise ValueError('Incorrect value of DT read')
def find_index(x,z,xil,zil):
"""Return the indices of the closest point in 2D array"""
idxX=np.searchsorted(xil,x)
idxZ=np.searchsorted(zil,z)
return idxX,idxZ
def interpolateValue(array,xil,zil,x,z):
"""Return the value of the 2D field described by array,xil and zil at (x,z)"""
idxX,idxZ = find_index(x,z,xil,zil)
xLine, zLine = np.array([idxX]), np.array([idxZ])
# Extract the values along the line, using cubic interpolation
if type(intEnergyi) == np.ndarray:
zi = scipy.ndimage.map_coordinates(array, np.vstack((zLine,xLine)),order=1)
else:
zi = scipy.ndimage.map_coordinates(array.filled(), np.vstack((zLine,xLine)),order=1)
return zi[0]
####################### PARSE ARGUMENTS #######################
# Here we read the argument given and we check them
parser = argparse.ArgumentParser(
description="This script plot the files total_integrated_energy_fieldXXXXX representing the energy that have crossed each point")
parser.add_argument("--input_directory","-d",type=str,default="./",
help="input_directory: directory where we can find the files total_integrated_energy_fieldXXXXX")
parser.add_argument("--par_file_directory",type=str,default="../",
help="par_file_directory: directory where we can find the Par_file of the run. Default: input_directory/../")
parser.add_argument("--name_of_files","-n",type=str,default="total_integrated_energy_field",
help="name_of_files: to plot something different than total_integrated_energy_fieldXXXXX")
parser.add_argument('-p','--profiles', action='store_true',
help='profiles: calculate energy profiles')
parser.add_argument('-nc','--no_concatenate_files', action='store_true',
help='no_concatenate_files: don t concatenate files at the beginning of the script')
parser.add_argument('-nl','--nolog', action='store_true',
help='nolog: no apply log')
parser.add_argument("--title","-t",type=str,default="",
help="title : title of the figures")
parser.add_argument("-nx",type=int,default=300,
help="nx: number of x points for interpolated field")
parser.add_argument("-nz",type=int,default=300,
help="nz: number of z points for interpolated field")
parser.add_argument('-w','--writeInterpolatedField', action='store_true',
help='writeInterpolatedField: write Interpolated field on file and integrated energy a a function of range if needed')
parser.add_argument('-sp','--saveOneProfile',nargs=4,
help='saveOneProfile: Save one X or Z profile to file (set it in source code). -sp x0 x1 z0 z1. Ex: -sp 0.0 10000.0 -300 -300')
parser.add_argument('-q','--quickDisplay', action='store_true',
help='quickDisplay: display after a simple linear interpolation')
parser.add_argument('--displayPoints', action='store_true',
help='displayPoints: plot the data points')
parser.add_argument('--clim',nargs=2,default=[],
help='clim: set limits of the color bar')
parser.add_argument('--xlim',nargs=2,default=[],
help='xlim: set the x limits of the plots')
parser.add_argument('--zlim',nargs=2,default=[],
help='zlim: set the z limits of the plots')
parser.add_argument('--ref',nargs=2,default=[300,-2500],
help='ref: Point that we set to 1')
parser.add_argument('--rgs', action='store_true',
help='rgs: Compensate geometrical speading by multiplying by r')
parser.add_argument('--nxnzProfiles',nargs=2,default=[10,10],
help='nxnz: set the number of x and z profiles to plot')
parser.add_argument('--noplot', action='store_true',
help='noplot: do not plot anything')
parser.add_argument("--substract","-s",type=str,default="",
help="substract : substract the field with the field given here")
parser.add_argument('-r','--reset', action='store_true',
help='reset: delete all field previously built')
parser.add_argument('-v','--verbose', action='store_true',
help='verbose: display more information')
args = parser.parse_args()
directory=args.input_directory
fontsize = 14
zminProfiles = -10000
zmaxProfiles = -100 #-200 # TODO (-650m for 0.5Hz, -300m for 2Hz...)
num = 2000 # Number of points to describe the profiles
# Check
if not os.path.isdir(directory):
print("Wrong directory! "+directory)
sys.exit(0)
if directory[-1] != '/': #If the path given don't finish by slash...
directory=directory+'/' #... we add one
if args.par_file_directory:
if args.par_file_directory[-1] != '/': #If the path given don't finish by slash...
par_file_directory=args.par_file_directory+'/' #... we add one
else:
par_file_directory = args.par_file_directory
else:
par_file_directory = directory+"../../"
if args.name_of_files[0] == '/': # If the full path has been given
directory = ""
if not glob.glob(directory+args.name_of_files+"*"): # If we don't find any matching energy file...
print("No files "+directory+args.name_of_files+"* were found!")
sys.exit(0)
# Concatenate all the files (if specfem has been run on parallel each proc has created its own files)
if not os.path.isfile(directory+args.name_of_files+"All") or args.reset: # If the concatenation has not already been done
if args.verbose:
print("Concatenate files...")
if not args.no_concatenate_files:
with open(directory+args.name_of_files+"All", 'w') as outfile:
for infile in glob.glob(directory+args.name_of_files+"0*"): # !!Warning!! The 0 is important!! Otherwise infinite loop
shutil.copyfileobj(open(infile), outfile)
else:
print(directory+args.name_of_files+"All has been found!")
#### OPTION SUBSTRACT ####
if args.substract:
if not glob.glob(args.substract+"*"): # If we don't find any matching energy file...
print("No files "+args.substract+"* were found!")
sys.exit(0)
# Concatenate all the files (if specfem has been run on parallel each proc has created its own files)
if not os.path.isfile(args.substract+"All") or args.reset: # If the concatenation has not already been done
if args.verbose:
print("Concatenate files of substracted field...")
if not args.no_concatenate_files:
with open(args.substract+"All", 'w') as outfile:
for infile in glob.glob(args.substract+"0*"): # !!Warning!! The 0 is important!! Otherwise infinite loop
shutil.copyfileobj(open(infile), outfile)
else:
print(args.substract+"All has been found!")
##########################
if args.verbose:
print("Done")
plt.close('all')
# Load data
if args.verbose:
print("Load data in "+directory+args.name_of_files+"All")
x,z,intEnergy = np.loadtxt(directory+args.name_of_files+"All").T
#### OPTION SUBSTRACT ####
if args.substract:
if args.verbose:
print("Load data in "+args.substract+"All")
xSubstract,zSubstract,intEnergySubstract = np.loadtxt(args.substract+"All").T
##########################
#if args.verbose:
# print("Load seismograms "+directory+"AA.S0001.BXX.semv AA.S0001.BXZ.semv at 300m from the source")
#t,vx0=np.loadtxt(directory+"AA.S0001.BXX.semv").T
#t,vz0=np.loadtxt(directory+"AA.S0001.BXZ.semv").T
if args.verbose:
print("Done")
factorGs = 1.0 # Factor to compensate geometrical spreading if asked
factorGsSubstract = 1.0 # Factor to compensate geometrical spreading if asked
if args.rgs: # Remove geometrical spreading
factorGs = x
if args.substract:
factorGsSubstract = xSubstract
if "integrated" in args.name_of_files: # We have to multiply by dt
#scalingFactor = (vx0**2+vz0**2).sum()
if args.verbose:
print("Opening Par_file in ",par_file_directory,"...")
par_file=ParFile(par_file_directory+'Par_file') # Open and read the Par_file
intEnergy = intEnergy * par_file.dt * factorGs
#intEnergy = intEnergy/scalingFactor
if args.substract:
intEnergySubstract = intEnergySubstract * par_file.dt * factorGsSubstract
if "max" in args.name_of_files:
#scalingFactor = (vx0**2+vz0**2).max()
intEnergy = intEnergy * factorGs
#intEnergy = intEnergy/scalingFactor
if args.substract:
intEnergySubstract = intEnergySubstract * factorGsSubstract
mask0=~np.isinf(intEnergy)
intEnergy[~mask0]=min(intEnergy[mask0])
if args.substract:
mask0substract=~np.isinf(intEnergySubstract)
intEnergySubstract[~mask0substract]=min(intEnergySubstract[mask0substract])
nxProfiles = int(args.nxnzProfiles[0])
nzProfiles = int(args.nxnzProfiles[1])
# Color map to use:
cmap = cm.BuPu #cm.Greys #cm.BuPu
if args.clim:
climMin = float(args.clim[0])
climMax = float(args.clim[1])
# Display limits:
if args.xlim:
xmin=float(args.xlim[0])
xmax=float(args.xlim[1])
else:
xmin=x.min()
xmax=0.98*x.max()
if args.zlim:
zmin=float(args.zlim[0])
zmax=float(args.zlim[1])
else:
zmin=z.min()+0.001*(z.max()-z.min())
zmax=z.max()-0.001*(z.max()-z.min())
#print("zmin:",zmin,"zmax:",zmax)
if args.displayPoints:
if not args.noplot:
plt.plot(x,z,'o')
plt.show()
if args.quickDisplay: # Quick ways to plot the energy using the non homogeneous grid:
if not args.nolog:
intEnergy = 10.0*np.log10(intEnergy)
if not args.noplot:
#plt.tricontourf(x,z,intEnergy,20,shading='gouraud',extend="both",cmap=cmap)
plt.figure(figsize=(6,2))
plt.tripcolor(x,z,intEnergy,shading='gouraud',cmap=cmap)
plt.axis([xmin, xmax, zmin, zmax])
plt.colorbar()
if args.clim:
plt.clim(climMin,climMax)
plt.show()
sys.exit()
#cmap.set_bad('w',1.)
#%%
# Interpolation on a regular grid
# Size of regular grid
nx, nz = args.nx,args.nz
# Margins around the model
xmargin = (xmax - xmin) / 1000.0
zmargin = (zmax - zmin) / 1000.0
# Generate a regular grid to interpolate the data.
xil = np.linspace(xmin-xmargin, xmax+xmargin, nx)
zil = np.linspace(zmin-zmargin, zmax+zmargin, nz)
xi, zi = np.meshgrid(xil, zil)
#print("TODO max zil:",zil.max()," min zil:",zil.min())
if os.path.isfile(directory+args.name_of_files+"AllInterpolatedx"+str(nx)+"z"+str(nz)) and not args.reset: # If the interpolation has already been done and written
if args.verbose:
print("Interpolated field file has been found. Loading...")
intEnergyi = np.load(directory+args.name_of_files+"AllInterpolatedx"+str(nx)+"z"+str(nz))
if args.verbose:
print("Done")
else:
# Interpolate using delaunay triangularization:
if args.verbose:
print("Interpolation...")
intEnergyi = mlab.griddata(x,z,intEnergy,xi,zi,interp="linear")
if args.verbose:
print("Done")
if args.writeInterpolatedField:
if args.verbose:
print("Writing the interpolated field to file..."+directory+args.name_of_files+"AllInterpolatedx"+str(nx)+"z"+str(nz))
intEnergyi.dump(directory+args.name_of_files+"AllInterpolatedx"+str(nx)+"z"+str(nz))
if args.verbose:
print("Done")
#### OPTION SUBSTRACT ####
if args.substract:
if os.path.isfile(args.substract+"AllInterpolatedx"+str(nx)+"z"+str(nz)) and not args.reset: # If the interpolation has already been done and written
if args.verbose:
print("Interpolated substracted field file has been found. Loading...")
intEnergyiSubstract = np.load(args.substract+"AllInterpolatedx"+str(nx)+"z"+str(nz))
if args.verbose:
print("Done")
else:
# Interpolate using delaunay triangularization:
if args.verbose:
print("Interpolation of substracted file...")
intEnergyiSubstract = mlab.griddata(xSubstract,zSubstract,intEnergySubstract,xi,zi,interp="linear")
if args.verbose:
print("Done")
if args.writeInterpolatedField:
if args.verbose:
print("Writing the interpolated substracted field to file..."+args.substract+"AllInterpolatedx"+str(nx)+"z"+str(nz))
intEnergyiSubstract.dump(args.substract+"AllInterpolatedx"+str(nx)+"z"+str(nz))
if args.verbose:
print("Done")
intEnergyi = abs(intEnergyi - intEnergyiSubstract) # Substract field with given other field
##########################
intEnergyi = np.log10(intEnergyi)
# Normalize
#if "max" in args.name_of_files or "integrated" in args.name_of_files:
# if not args.substract: # TODO maybe not needed but could create problem
# if args.verbose:
# print("Normalizing...")
# valueAtRef = interpolateValue(intEnergyi,xil,zil,float(args.ref[0]),float(args.ref[1]))
# if args.verbose:
# print("Value at reference point (",args.ref[0],",",args.ref[1],") is ",valueAtRef)
# intEnergyi = intEnergyi - valueAtRef
# valueAtRef = interpolateValue(intEnergyi,xil,zil,float(args.ref[0]),float(args.ref[1]))
# if args.verbose:
# print("Value at reference point (",args.ref[0],",",args.ref[1],") is ",valueAtRef)
# if args.verbose:
# print("Done")
#Plot:
if not args.noplot:
if args.verbose:
print("Plots...")
plt.figure(1,figsize=(15,6))
plt.pcolormesh(xi,zi,intEnergyi,shading='gouraud',cmap=cmap)
plt.colorbar()
plt.axis([xmin, xmax, zmin, zmax])
if args.clim:
plt.clim(climMin,climMax)
plt.title(args.title)
font = {'family' : 'serif','size':fontsize}
plt.rc('font', **font)
plt.xlabel("Range (m)",fontsize=fontsize+3)
plt.ylabel("Depth (m)",fontsize=fontsize+3)
#plt.rc('text', usetex=True)
if args.verbose:
print("Done")
if args.profiles:
# Plot energy profiles:
if args.verbose:
print("Print profiles...")
cmap2 = plt.get_cmap('prism')
if not args.noplot and nzProfiles > 1:
plt.figure(2)
zVector=np.linspace(zminProfiles-zmaxProfiles,zmaxProfiles,nzProfiles) # z coordinates of horizontal profiles
#zVector=np.arange(zmin/(nzProfiles + 1),zmin,zmin/(nzProfiles + 1)) # z coordinates of horizontal profiles
#print("zVector:",zVector,"zmin:",zmin,"nzProfiles:",nzProfiles)
colors = [cmap2(i) for i in np.linspace(0, 1, len(zVector))] # Color vector
xvect=np.linspace(xmin,xmax,num) # x vector
for i,zz in enumerate(zVector): # loop on the depths, plot all horizontal profiles in a figure (figure 2)
x0,z0=xmin,zz
x1,z1=xmax,zz
idxX0,idxZ0 = find_index(x0,z0,xil,zil) # indices of the closest point in the 2D grid
idxX1,idxZ1 = find_index(x1,z1,xil,zil) # indices of the closest point in the 2D grid
if not args.noplot and nzProfiles > 1:
plt.figure(1)
plt.hold(True)
xLine, zLine = np.linspace(idxX0,idxX1, num), np.linspace(idxZ0, idxZ1, num) # vector containing the indices
if args.verbose:
print("Profile 1 to be saved: (x0,z0) = (",x0,",",z0,") (x1,z1) = (",x1,",",z1,")")
#print("xmin:",xmin,"xmax:",xmax,"zz:",zz)
zi = intEnergyi[zLine.astype(np.int),xLine.astype(np.int)] # If you have got an error here try to choose a lower z1 or a bigger z0! 1
# Extract the values along the line, using cubic interpolation
if type(intEnergyi) == np.ndarray:
zi = scipy.ndimage.map_coordinates(intEnergyi, np.vstack((zLine,xLine)),order=1)
else:
zi = scipy.ndimage.map_coordinates(intEnergyi.filled(), np.vstack((zLine,xLine)),order=1)
if not args.noplot and nzProfiles > 1:
plt.plot([x0, x1], [z0, z1], 'o-',color=colors[i])
plt.figure(2)
plt.plot(xvect,zi,color=colors[i])
plt.xlabel("Range (m)",fontsize=fontsize+3)
if not args.nolog:
plt.ylabel("Log of integrated energy",fontsize=fontsize+3)
else:
plt.ylabel("Integrated energy",fontsize=fontsize+3)
plt.title(args.title)
if not args.noplot and nzProfiles > 1:
plt.xlim([xmin,xmax])
if not args.noplot and nxProfiles > 1:
plt.figure(3)
xVector=np.arange(xmax/(nxProfiles + 1),xmax,xmax/(nxProfiles + 1))
colors = [cmap2(i) for i in np.linspace(0, 1, len(xVector))]
z0=zminProfiles
z1=zmaxProfiles # Be careful! This point can't be too close to zmax!
zvect=np.linspace(z0,z1,num)
depthIntegratedEnergy=np.zeros(len(xVector))
#depthIntegratedEnergy2=np.zeros(len(xVector))
for i,xx in enumerate(xVector): # Loop on the ranges, plot all vertical profiles in a figure.
x0=xx
x1=xx
idxX0,idxZ0 = find_index(x0,z0,xil,zil) # indices of the closest point in the 2D grid
idxX1,idxZ1 = find_index(x1,z1,xil,zil) # indices of the closest point in the 2D grid
if not args.noplot and nxProfiles > 1:
plt.figure(1)
plt.hold(True)
if args.verbose:
print("Profile 2 to be saved: (x0,z0) = (",x0,",",z0,") (x1,z1) = (",x1,",",z1,")")
xLine, zLine = np.linspace(idxX0,idxX1, num), np.linspace(idxZ0, idxZ1, num)
#print("xx:",xmin,"xil:",xil,"zil:",zil)
zi = intEnergyi[zLine.astype(np.int),xLine.astype(np.int)] # If you have got an error here try to choose a lower z1 or a bigger z0! 2
# Extract the values along the line, using cubic interpolation
if type(intEnergyi) == np.ndarray:
zi = scipy.ndimage.interpolation.map_coordinates(intEnergyi, np.vstack((zLine,xLine)),order=1)
else:
zi = scipy.ndimage.interpolation.map_coordinates(intEnergyi.filled(), np.vstack((zLine,xLine)),order=1)
#depthIntegratedEnergy[i]=zi2.sum()
#depthIntegratedEnergy2[i]=zi.sum()
if not args.nolog:
depthIntegratedEnergy[i]=10*np.log10(np.power(10,zi/10.0).sum())
else:
depthIntegratedEnergy[i]=np.power(10,zi/10.0).sum()
if not args.noplot and nxProfiles > 1:
plt.plot([x0, x1], [z0, z1], 'o-',color=colors[i])
plt.figure(3)
plt.plot(zi,zvect,color=colors[i]) # Without filtering
plt.xlabel("Depth (m)",fontsize=fontsize+3)
if not args.nolog:
plt.ylabel("Log of integrated energy",fontsize=fontsize+3)
else:
plt.ylabel("Integrated energy",fontsize=fontsize+3)
#plt.plot(zi2,zvect,color=colors[i])
plt.ylim([z0,z1])
plt.title(args.title)
if not args.noplot and nxProfiles > 1:
plt.figure(4)
plt.plot(xVector,depthIntegratedEnergy,'o-')
#plt.plot(xVector,depthIntegratedEnergy2,'o-')
plt.xlabel("Range (m)",fontsize=fontsize+3)
if not args.nolog:
plt.ylabel("Log 10 of total energy in water",fontsize=fontsize+3)
else:
plt.ylabel("Total energy in water",fontsize=fontsize+3)
plt.title(args.title)
if args.verbose:
print("Done")
if args.writeInterpolatedField:
if args.verbose:
print("Saving energy vs range...")
np.savetxt(directory+args.name_of_files+"_energy_vs_range",np.dstack((xVector,depthIntegratedEnergy))[0])
print("File ",directory+args.name_of_files+"_energy_vs_range has been written")
### SAVE ONE PROFILE ###
if args.saveOneProfile:
if args.verbose:
print("Saving one profile...")
# properties of profile to be saved (if option --sp given):
x0profile = float(args.saveOneProfile[0])
x1profile = float(args.saveOneProfile[1])
z0profile = float(args.saveOneProfile[2])
z1profile = float(args.saveOneProfile[3])
x0,z0 = x0profile,z0profile
x1,z1 = x1profile,z1profile
if z0 == z1:
vect = np.linspace(x0,x1,num)
xlabel = "Range (m)"
elif x0 == x1:
vect = np.linspace(z0,z1,num)
xlabel = "Depth (m)"
else:
sys.exit("Tilted profiles are not handled for now!")
if args.verbose:
print("Profile to be saved: (x0,z0) = (",x0,",",z0,") (x1,z1) = (",x1,",",z1,")")
idxX0,idxZ0 = find_index(x0,z0,xil,zil)
idxX1,idxZ1 = find_index(x1,z1,xil,zil)
if not args.noplot:
plt.figure(1)
plt.hold(True)
xLine, zLine = np.linspace(idxX0,idxX1, num), np.linspace(idxZ0, idxZ1, num)
# Extract the values along the line, using cubic interpolation
zi1 = intEnergyi[zLine.astype(np.int),xLine.astype(np.int)] # If you have got an error here try to choose a lower z1 or a bigger z0! 3
if type(intEnergyi) == np.ndarray:
#zi2 = scipy.ndimage.map_coordinates(np.transpose(intEnergyi), np.vstack((xLine,zLine)),order=1)
sp = interpolate.RectBivariateSpline(zil,xil,intEnergyi, kx=3, ky=3, s=7)
else:
#zi2 = scipy.ndimage.map_coordinates(np.transpose(intEnergyi).filled(), np.vstack((xLine,zLine)),order=1)
sp = interpolate.RectBivariateSpline(zil,xil,intEnergyi, kx=3, ky=3, s=7)
if x0 == x1:
zi = [float(sp([vect[i]],[x0])) for i in range(num)]
if z0 == z1:
zi = [float(sp([z0],[vect[i]])) for i in range(num)]
#print(zi2,sp([140000.0],[-2000]),sp([140000.0],[-2500]))
#depthIntegratedEnergy2[i]=zi.sum()
#if not args.nolog:
# depthIntegratedEnergy[i]=np.power(10,zi/10.0).sum()
#else:
# depthIntegratedEnergy[i]=zi.sum()
if not args.noplot:
plt.hold(True)
plt.plot([x0, x1], [z0, z1], 'o-',color="black",linewidth=3)
plt.figure(2)
plt.plot(vect,zi,color="black",linewidth=3)
plt.plot(vect,zi1,color="green",linewidth=3)
#plt.plot(vect,zi2,color="red",linewidth=3)
plt.xlabel(xlabel,fontsize=fontsize+3)
if not args.nolog:
plt.ylabel("Log of integrated energy",fontsize=fontsize+3)
else:
plt.ylabel("Integrated energy",fontsize=fontsize+3)
if nzProfiles == 1:
plt.xlim([x0,x1])
plt.title(args.title)
i = 0
#while os.path.isfile(args.input_directory+args.name_of_files+"_profile_"+str(i)): # If the a profile of this name has already been written
# i = i+1
np.savetxt(directory+args.name_of_files+"_profile_"+str(i),np.dstack((vect,zi))[0])
print("File ",directory+args.name_of_files+"_profile_"+str(i)," has been written")
if args.verbose:
print("Done")
if not args.noplot:
plt.show()
| gpl-2.0 |
kagayakidan/scikit-learn | examples/manifold/plot_lle_digits.py | 59 | 8576 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
yipenggao/moose | python/MooseDocs/extensions/gchart.py | 4 | 11198 | #pylint: disable=missing-docstring
####################################################################################################
# DO NOT MODIFY THIS HEADER #
# MOOSE - Multiphysics Object Oriented Simulation Environment #
# #
# (c) 2010 Battelle Energy Alliance, LLC #
# ALL RIGHTS RESERVED #
# #
# Prepared by Battelle Energy Alliance, LLC #
# Under Contract No. DE-AC07-05ID14517 #
# With the U. S. Department of Energy #
# #
# See COPYRIGHT for full restrictions #
####################################################################################################
#pylint: enable=missing-docstring
import os
import logging
import pandas
import jinja2
from markdown.util import etree
from markdown.inlinepatterns import Pattern
import MooseDocs
from MooseDocs.common import nodes
from MooseMarkdownExtension import MooseMarkdownExtension
from MooseMarkdownCommon import MooseMarkdownCommon
LOG = logging.getLogger(__name__)
class GoogleChartExtension(MooseMarkdownExtension):
"""
Adds support for google charts.
"""
@staticmethod
def defaultConfig():
"""GoogleChartExtension configuration."""
config = MooseMarkdownExtension.defaultConfig()
return config
def extendMarkdown(self, md, md_globals):
"""
Adds eqref support for MOOSE flavored markdown.
"""
md.registerExtension(self)
config = self.getConfigs()
md.inlinePatterns.add('moose-line-chart',
LineChart(markdown_instance=md, **config),
'_begin')
md.inlinePatterns.add('moose-scatter-chart',
ScatterChart(markdown_instance=md, **config),
'_begin')
md.inlinePatterns.add('moose-diff-scatter-chart',
ScatterDiffChart(markdown_instance=md, **config),
'_begin')
def makeExtension(*args, **kwargs): #pylint: disable=invalid-name
"""Create GoogleChartExtension"""
return GoogleChartExtension(*args, **kwargs)
class GoogleChartBase(MooseMarkdownCommon, Pattern):
"""
Base class for !chart command.
"""
TEMPLATE = None
@staticmethod
def defaultSettings():
"""GoogleChartBase settings."""
settings = MooseMarkdownCommon.defaultSettings()
settings['caption'] = (None, "The caption to place after the float heading and number.")
settings['counter'] = ('figure', "The name of global counter to utilized for numbering.")
settings['csv'] = (None, "The name of the CSV file to load.")
return settings
def __init__(self, markdown_instance=None, **kwargs):
MooseMarkdownCommon.__init__(self, **kwargs)
regex = r'^!chart\s+(?P<template>{})(?:$|\s+)(?P<settings>.*)'.format(self.TEMPLATE)
Pattern.__init__(self, regex, markdown_instance)
self._csv = dict() # CSV DataFrame cache
self._count = 0
self._status = None
def setStatus(self, message, *args):
"""
Set the error status message, this should be used in the arguments() and globals() methods.
"""
self._status = message.format(*args)
def clearStatus(self):
"""
Remove any existing error status messages.
"""
self._status = None
def arguments(self, settings):
"""
Method for modifying the template arguments to be applied to the jinja2 templates engine.
By default all the "settings" from the class are returned as template arguments.
Args:
settings[dict]: The class object settings.
"""
if settings['csv'] is None:
if isinstance(self.markdown.current, nodes.FileNodeBase):
self.setStatus("The 'csv' setting is required in {}.",
self.markdown.current.filename)
else:
self.setStatus("The 'csv' setting is required.")
settings['data_frame'] = pandas.DataFrame()
else:
settings['data_frame'] = self._readCSV(os.path.join(MooseDocs.ROOT_DIR,
settings['csv']))
return settings
def globals(self, env):
"""
Defines global template functions. (virtual)
Args:
env[jinja2.Environment]: Template object for adding global functions.
"""
pass
def handleMatch(self, match):
"""
Creates chart from a chart template.
"""
# Extract settings and template
template = match.group('template') + '.js'
settings = self.getSettings(match.group('settings'), legacy_style=False)
# Create a float element
div = self.createFloatElement(settings)
# Create 'chart_id' for linking JS with <div>
settings['chart_id'] = 'moose-google-{}-chart-{}'.format(self.TEMPLATE, int(self._count))
self._count += 1
# Paths to Google Chart template
paths = [os.path.join(MooseDocs.MOOSE_DIR, 'docs', 'templates', 'gchart'),
os.path.join(os.getcwd(), 'templates', 'gchart')]
# Apply the arguments to the template
self.clearStatus()
env = jinja2.Environment(loader=jinja2.FileSystemLoader(paths))
self.globals(env)
template = env.get_template(template)
complete = template.render(**self.arguments(settings))
if self._status is not None:
return self.createErrorElement(self._status, title="Google Chart Creation Error",
error=False)
# Create the <script> tag
script = etree.SubElement(div, 'script')
script.set('type', 'text/javascript')
script.text = self.markdown.htmlStash.store(complete, safe=True)
# Add the <div> to be replaced with the chart
el = etree.Element('div')
el.set('id', settings['chart_id'])
div.insert(0, el)
return div
def _readCSV(self, filename):
"""
Read the CSV data into a pandas DataFrame.
"""
if self._csv.get(filename, None) is None:
try:
self._csv[filename] = pandas.read_csv(filename)
except IOError:
if isinstance(self.markdown.current, nodes.FileNodeBase):
self.setStatus("Failed to read CSV file '{}' in chart command of {}.",
filename, self.markdown.current.filename)
else:
self.setStatus("Failed to read CSV file '{}' in chart command.", filename)
return pandas.DataFrame()
return self._csv[filename]
class ColumnChartBase(GoogleChartBase):
"""
Base class for column based chart types (e.g., 'line', 'scatter').
"""
@staticmethod
def defaultSettings():
"""LineChart settings."""
settings = GoogleChartBase.defaultSettings()
settings['columns'] = ('', "A comma separated list of names defining the columns from the "
"the CSV to extract for plotting in the chart.")
settings['column_names'] = ('', "A comma separated list of names to associate with each "
"column, the number of names must match the number of "
"columns.")
settings['title'] = ('', "The chart title.")
settings['subtitle'] = ('', "The chart sub-title.")
settings['chart_width'] = (900, "The Google chart width.")
settings['chart_height'] = (400, "The Google chart height.")
return settings
def arguments(self, settings):
"""
Define template arguments to pass to template.
"""
settings = super(ColumnChartBase, self).arguments(settings)
# Update the 'columns' and 'column_names'
settings['columns'] = [col.strip() for col in settings['columns'].split(',')]
if settings['column_names']:
settings['column_names'] = [col.strip() for col in settings['column_names'].split(',')]
else:
settings['column_names'] = settings['columns']
if len(settings['column_names']) != len(settings['columns']):
LOG.error("The 'column_names' list must be the same length as 'columns'.")
settings['column_names'] = settings['columns']
return settings
class LineChart(ColumnChartBase):
"""
Creates a Google line chart from CSV data.
"""
TEMPLATE = 'line'
class ScatterChart(ColumnChartBase):
"""
Creates a Google scatter chart from CSV data.
"""
TEMPLATE = 'scatter'
@staticmethod
def defaultSettings():
"""ScatterChart settings."""
settings = ColumnChartBase.defaultSettings()
settings['vaxis_title'] = ('y', "The vertical y-axis title.")
settings['haxis_title'] = ('x', "The horizontal x-axis title.")
settings['vaxis_ticks'] = (None, "The vertical x-axis tick marks (default: auto)")
settings['haxis_ticks'] = (None, "The vertical x-axis tick marks (default: auto)")
return settings
class ScatterDiffChart(ScatterChart):
"""
Creates a Google scatter diff chart from CSV data.
"""
TEMPLATE = 'diffscatter'
@staticmethod
def defaultSettings():
"""DiffScatterChart settings"""
settings = ScatterChart.defaultSettings()
settings['gold'] = ('', "The gold file to use for comparison, by default the file provided "
"in the 'csv' setting is used but with a gold directory prefix.")
return settings
def arguments(self, settings):
"""
Define template arguments for diff scatter chart.
"""
settings = super(ScatterDiffChart, self).arguments(settings)
if not settings['gold']:
base, name = os.path.split(settings['csv'])
settings['gold'] = os.path.join(base, 'gold', name)
settings['gold_data_frame'] = self._readCSV(os.path.join(MooseDocs.ROOT_DIR,
settings['gold']))
if settings['gold_data_frame'].empty:
self.setStatus("The gold file ({}) does not exist or does not contain data.",
settings['gold'])
return settings
| lgpl-2.1 |
rahul-c1/scikit-learn | sklearn/metrics/cluster/tests/test_supervised.py | 44 | 7663 | import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
"""Compute score for random uniform cluster labelings"""
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
"""Check that adjusted scores are almost zero on random labels"""
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
"""Compute the Adjusted Mutual Information and test against known values"""
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
"""Check numerical stability when information is exactly zero"""
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
"""Check relation between v_measure, entropy and mutual information"""
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/examples/ex_kernel_test_functional.py | 34 | 2246 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 08 19:03:20 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
from statsmodels.regression.linear_model import OLS
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
seed = np.random.randint(999999)
#seed = 661176
print(seed)
np.random.seed(seed)
sig_e = 0.5 #0.1
nobs, k_vars = 200, 1
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
x.sort()
order = 3
exog = x**np.arange(order + 1)
beta = np.array([1, 1, 0.1, 0.0])[:order+1] # 1. / np.arange(1, order + 2)
y_true = np.dot(exog, beta)
y = y_true + sig_e * np.random.normal(size=nobs)
endog = y
print('DGP')
print('nobs=%d, beta=%r, sig_e=%3.1f' % (nobs, beta, sig_e))
mod_ols = OLS(endog, exog[:,:2])
res_ols = mod_ols.fit()
#'cv_ls'[1000, 0.5][0.01, 0.45]
tst = smke.TestFForm(endog, exog[:,:2], bw=[0.01, 0.45], var_type='cc',
fform=lambda x,p: mod_ols.predict(p,x),
estimator=lambda y,x: OLS(y,x).fit().params,
nboot=1000)
print('bw', tst.bw)
print('tst.test_stat', tst.test_stat)
print(tst.sig)
print('tst.boots_results mean, min, max', (tst.boots_results.mean(),
tst.boots_results.min(),
tst.boots_results.max()))
print('lower tail bootstrap p-value', (tst.boots_results < tst.test_stat).mean())
print('upper tail bootstrap p-value', (tst.boots_results >= tst.test_stat).mean())
from scipy import stats
print('aymp.normal p-value (2-sided)', stats.norm.sf(np.abs(tst.test_stat))*2)
print('aymp.normal p-value (upper)', stats.norm.sf(tst.test_stat))
do_plot=True
if do_plot:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(x, y, '.')
plt.plot(x, res_ols.fittedvalues)
plt.title('OLS fit')
plt.figure()
plt.hist(tst.boots_results.ravel(), bins=20)
plt.title('bootstrap histogram or test statistic')
plt.show()
| bsd-3-clause |
celiafish/VisTrails | scripts/dist/mac/setup_itk.py | 3 | 1120 | """
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
import sys
VERSION = '1.2.1263'
plist = dict(
CFBundleName='VisTrails',
CFBundleShortVersionString=VERSION,
CFBundleGetInfoString=' '.join(['VisTrails', VERSION]),
CFBundleExecutable='vistrails',
CFBundleIdentifier='edu.utah.sci.vistrails',
)
sys.path.append('../..')
APP = ['../../vistrails/run.py']
#comma-separated list of additional data files and
#folders to include (not for code!)
#DATA_FILES = ['/usr/local/graphviz-2.12/bin/dot',]
OPTIONS = {'argv_emulation': True,
'iconfile': 'vistrails/resources/vistrails_icon.icns',
'includes': 'sip,pylab,xml,netCDF3,netCDF4_utils,netcdftime,\
libxml2,libxslt, Cookie, BaseHTTPServer, multifile, shelve,itk, itkBase, itkConfig, itkLazy, itkTypes, itkExtras',
'packages': 'PyQt4,vtk,MySQLdb,matplotlib,vistrails,numpy,ZSI,api',
'plist': plist,
}
setup(
app=APP,
# data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| bsd-3-clause |
aborovin/trading-with-python | cookbook/reconstructVXX/reconstructVXX.py | 77 | 3574 | # -*- coding: utf-8 -*-
"""
Reconstructing VXX from futures data
author: Jev Kuznetsov
License : BSD
"""
from __future__ import division
from pandas import *
import numpy as np
import os
class Future(object):
""" vix future class, used to keep data structures simple """
def __init__(self,series,code=None):
""" code is optional, example '2010_01' """
self.series = series.dropna() # price data
self.settleDate = self.series.index[-1]
self.dt = len(self.series) # roll period (this is default, should be recalculated)
self.code = code # string code 'YYYY_MM'
def monthNr(self):
""" get month nr from the future code """
return int(self.code.split('_')[1])
def dr(self,date):
""" days remaining before settlement, on a given date """
return(sum(self.series.index>date))
def price(self,date):
""" price on a date """
return self.series.get_value(date)
def returns(df):
""" daily return """
return (df/df.shift(1)-1)
def recounstructVXX():
"""
calculate VXX returns
needs a previously preprocessed file vix_futures.csv
"""
dataDir = os.path.expanduser('~')+'/twpData'
X = DataFrame.from_csv(dataDir+'/vix_futures.csv') # raw data table
# build end dates list & futures classes
futures = []
codes = X.columns
endDates = []
for code in codes:
f = Future(X[code],code=code)
print code,':', f.settleDate
endDates.append(f.settleDate)
futures.append(f)
endDates = np.array(endDates)
# set roll period of each future
for i in range(1,len(futures)):
futures[i].dt = futures[i].dr(futures[i-1].settleDate)
# Y is the result table
idx = X.index
Y = DataFrame(index=idx, columns=['first','second','days_left','w1','w2',
'ret','30days_avg'])
# W is the weight matrix
W = DataFrame(data = np.zeros(X.values.shape),index=idx,columns = X.columns)
# for VXX calculation see http://www.ipathetn.com/static/pdf/vix-prospectus.pdf
# page PS-20
for date in idx:
i =np.nonzero(endDates>=date)[0][0] # find first not exprired future
first = futures[i] # first month futures class
second = futures[i+1] # second month futures class
dr = first.dr(date) # number of remaining dates in the first futures contract
dt = first.dt #number of business days in roll period
W.set_value(date,codes[i],100*dr/dt)
W.set_value(date,codes[i+1],100*(dt-dr)/dt)
# this is all just debug info
p1 = first.price(date)
p2 = second.price(date)
w1 = 100*dr/dt
w2 = 100*(dt-dr)/dt
Y.set_value(date,'first',p1)
Y.set_value(date,'second',p2)
Y.set_value(date,'days_left',first.dr(date))
Y.set_value(date,'w1',w1)
Y.set_value(date,'w2',w2)
Y.set_value(date,'30days_avg',(p1*w1+p2*w2)/100)
valCurr = (X*W.shift(1)).sum(axis=1) # value on day N
valYest = (X.shift(1)*W.shift(1)).sum(axis=1) # value on day N-1
Y['ret'] = valCurr/valYest-1 # index return on day N
return Y
##-------------------Main script---------------------------
if __name__=="__main__":
Y = recounstructVXX()
print Y.head(30)#
Y.to_csv('reconstructedVXX.csv')
| bsd-3-clause |
danlorts/hootenanny | docs/filters/mpl/mplw.py | 2 | 8324 | #!/usr/bin/env python
import os, sys
from optparse import *
#from matplotlib.pyplot import barh, title, grid , savefig, yticks, xlabel
from matplotlib.pyplot import *
from numpy import *
import csv
import string
class EApp(Exception):
'''Application specific exception.'''
pass
class Application():
'''
OPTIONS
-s OUTFILE, --style=OUTFILE
plot style
-o OUTFILE, --outfile=OUTFILE
The file name of the output file. If not specified the output file is
named like INFILE but with a .png file name extension.
-v, --verbose
Verbosely print processing information to stderr.
'''
def __init__(self, argv=None):
if not argv:
argv = sys.argv
self.usage = '%prog [options] inputfile'
self.option_list = [
Option("-o", "--outfile", action="store",
dest="outfile",
help="Output file"),
Option("-s", "--style", action="store",
dest="style", default="asciidoc", type="choice",
choices=['asciidoc','none'],
help="Layout type. LAYOUT=<asciidoc>"),
Option("--debug", action="store_true",
dest="do_debug",
help=SUPPRESS_HELP),
Option("-v", "--verbose", action="store_true",
dest="do_verbose", default=False,
help="verbose output"),
]
self.parser = OptionParser( usage=self.usage,
option_list=self.option_list)
(self.options, self.args) = self.parser.parse_args()
if len(self.args) != 1:
self.parser.print_help()
sys.exit(1)
self.options.infile = self.args[0]
def systemcmd(self, cmd):
if self.options.do_verbose:
msg = 'Execute: %s' % cmd
sys.stderr.write(msg + os.linesep)
else:
cmd += ' 2>/dev/null'
if os.system(cmd):
raise EApp, 'failed command: %s' % cmd
def run_for_real(self, infile, outfile):
'''Convert Graphviz notation in file infile to PNG file named outfile.'''
outfile = os.path.abspath(outfile)
outdir = os.path.dirname(outfile)
if not os.path.isdir(outdir):
raise EApp, 'directory does not exist: %s' % outdir
basefile = os.path.splitext(outfile)[0]
saved_cwd = os.getcwd()
os.chdir(outdir)
try:
######################################################### lvv
#### check MPL version
str_ver=matplotlib.__version__.split('.')
ver=float(str_ver[0]) + float(str_ver[1])/1000
if ver < 0.098: # extra 0 after dot - devider for minor ver is 1000
sys.stderr.write('mplw warning: for mutplotlib version < 0.98 styles are disabled')
self.options.style = 'none'
#### READ PY CODE
eval_lines=''
embeded_data = False
for line in infile:
if line.startswith('___'):
embeded_data = True
break
eval_lines += line
#### READ DATA
if embeded_data:
m = [] # matrix
# TODO replace csv with http://matplotlib.sourceforge.net/api/mlab_api.html#matplotlib.mlab.csv2rec
# aslo see http://matplotlib.sourceforge.net/api/mlab_api.html
for row in csv.reader(infile, delimiter=',', quotechar="'", skipinitialspace=True):
if row: # if not blank line
m.append(row)
# convert to float if it look like number
for i in range(len(m[-1])):
if len(m[-1][i].translate(string.maketrans('',''),' +-0123456789eE.')) == 0:
m[-1][i] = float(m[-1][i])
c = [[row[i] for row in m] for i in range(len(m[0]))] # transpose
infile.close()
#### EVAL
exec eval_lines
if self.options.style == 'asciidoc':
auto_adjust(gcf())
grid(True, color='0.7')
### TODO GRIDS
#rcParams['grid.color'] = 'g' # does not work
#grid.color : black # grid color
#grid.linestyle : : # dotted
#grid.linewidth : 0.5 # in points
savefig(outfile, facecolor='0.95', edgecolor='0.8') # MPL bug? not all edges(borders) are drawn
# TODO axes.linewidth : 1.0 # edge linewidth
else:
savefig(outfile)
if self.options.style != 'none' :
sys.stderr.write('mplw warning: unknown style - ignored')
#########################################################
finally:
os.chdir(saved_cwd)
def run(self):
if self.options.infile == '-':
sys.stdout.write(' ') # To suppress asciidoc 'no output from filter' warnings.
if self.options.outfile is None:
sys.stderr.write('OUTFILE must be specified')
sys.exit(1)
infile = sys.stdin
else:
if not os.path.isfile(self.options.infile):
raise EApp, 'input file does not exist: %s' % self.options.infile
infile = open(self.options.infile)
if self.options.outfile is None:
outfile = os.path.splitext(self.options.infile)[0] + '.png'
else:
outfile = self.options.outfile
self.run_for_real(infile, outfile)
def benchmark(label, val, label_part=-1):
bar_width = 0.35
ytick_pos = arange(len(val))+.5
label.reverse()
val.reverse()
#
fontsize = rcParams['font.size']
fixed_part = fontsize/72 * 3
h = (len(val)+1.4)*bar_width + fixed_part
gcf().set_figheight(h)
#step = ytick_pos[1] - ytick_pos[0]
#gca().set_ylim(ytick_pos[0]-1, ytick_pos[-1]+1)
yticks(ytick_pos, label, fontsize='large')
barh(ytick_pos, val, align="center", height=0.6)
gca().set_ybound(lower=ytick_pos[0]-0.7, upper=ytick_pos[-1]+0.7)
gca().set_xbound(upper=max(val)*1.1)
rcParams['axes.labelsize'] = 'large'
def auto_adjust(fig):
axes = fig.get_axes()
h = fig.get_figheight() # inch
w = fig.get_figwidth() # inch
fontsize = rcParams['font.size'] # point
dpi = rcParams['savefig.dpi'] # point / inch
# top, title
top_space = 1.7 # em
if axes[0].get_title(): # if there is a title # FIXME: MPL bug? always true
title_fontsize = matplotlib.font_manager.font_scalings[rcParams['axes.titlesize']] * fontsize
top_adjust = 1.0 - title_fontsize/72 * top_space /h
fig.subplots_adjust(top=top_adjust)
# bottom, xlabel
bottom_space = 1.3 # em
xtick_fontsize = matplotlib.font_manager.font_scalings[rcParams['xtick.labelsize']] * fontsize
bottom_adjust = xtick_fontsize/72 /h * bottom_space
if len(axes[0].get_xlabel()) != 0: # xlabel
xlabel_fontsize = matplotlib.font_manager.font_scalings[rcParams['axes.labelsize']] * fontsize
bottom_adjust += xlabel_fontsize/72 /h
fig.subplots_adjust(bottom=bottom_adjust)
# left labels
char_width = 0.8 # em
current = gca().get_position().get_points()
ll = gca().get_yticklabels()
max_ytick_length = max([len(l.get_text()) for l in ll])
max_ytick_length = max(6, max_ytick_length)
ytick_fontsize = matplotlib.font_manager.font_scalings[rcParams['ytick.labelsize']] * fontsize
left_adjust = max_ytick_length * char_width * ytick_fontsize/72 /w
if len(axes[0].get_ylabel()) > 0: # ylable # FIXME: MPL bug? always true
ylabel_fontsize = matplotlib.font_manager.font_scalings[rcParams['axes.labelsize']] * fontsize
left_adjust += ylabel_fontsize/72 /w
fig.subplots_adjust(left=left_adjust)
# righ margin
right_margin = 1.5 # em
#fig.subplots_adjust(right=1.0-fontsize/72/w * right_margin, hspace=0.2)
if __name__ == "__main__":
app = Application()
app.run()
# vim:ts=4 et sw=4 ft=python:
| gpl-3.0 |
sampadsaha5/sympy | sympy/holonomic/holonomic.py | 3 | 90995 | """Holonomic Functions and Differential Operators"""
from __future__ import print_function, division
from sympy import (symbols, Symbol, diff, S, Dummy, Order, rf, meijerint, I,
solve, limit, Float, nsimplify, gamma)
from sympy.printing import sstr
from sympy.core.compatibility import range
from sympy.functions.combinatorial.factorials import binomial, factorial
from sympy.core.sympify import sympify
from sympy.simplify.hyperexpand import hyperexpand
from sympy.functions.special.hyper import hyper, meijerg
from sympy.core.numbers import NaN, Infinity, NegativeInfinity
from sympy.matrices import Matrix
from sympy.functions.elementary.exponential import exp_polar, exp
from .linearsolver import NewMatrix
from .recurrence import HolonomicSequence, RecurrenceOperator, RecurrenceOperators
from .holonomicerrors import NotPowerSeriesError, NotHyperSeriesError, SingularityError, NotHolonomicError
from sympy.polys.rings import PolyElement
from sympy.polys.fields import FracElement
from sympy.polys.domains import QQ, ZZ, RR
from sympy.polys.domains.pythonrational import PythonRational
from sympy.polys.polyclasses import DMF
from sympy.polys.polyroots import roots
def DifferentialOperators(base, generator):
"""
Returns an Algebra of Differential Operators and the operator for
differentiation i.e. the `Dx` operator.
The first argument needs to be the base polynomial ring for the algebra
and the second argument must be a generator which can be either a
noncommutative Symbol or a string.
Examples
=======
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
"""
ring = DifferentialOperatorAlgebra(base, generator)
return (ring, ring.derivative_operator)
class DifferentialOperatorAlgebra(object):
"""
An Ore Algebra is a set of noncommutative polynomials in the
intermediate `Dx` and coefficients in a base ring A. It follows the
commutation rule:
Dx * a = sigma(a) * Dx + delta(a)
Where sigma: A --> A is an endomorphism and delta: A --> A is a
skew-derivation i.e. delta(ab) = delta(a) * b + sigma(a) * delta(b)
If one takes the sigma as identity map and delta as the standard derivation
then it becomes the algebra of Differential Operators also called
a Weyl Algebra i.e. an algebra whose elements are Differential Operators.
This class represents a Weyl Algebra and serves as the parent ring for
Differential Operators.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring
ZZ[x]
See Also
========
DifferentialOperator
"""
def __init__(self, base, generator):
# the base polynomial ring for the algebra
self.base = base
# the operator representing differentiation i.e. `Dx`
self.derivative_operator = DifferentialOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = symbols('Dx', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = symbols(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Differential Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
if self.base == other.base and self.gen_symbol == other.gen_symbol:
return True
else:
return False
class DifferentialOperator(object):
"""
Differential Operators are elements of Weyl Algebra. The Operators
are defined by a list of polynomials in the base ring and the
parent ring of the Operator.
Takes a list of polynomials for each power of Dx and the
parent ring which must be an instance of DifferentialOperatorAlgebra.
A Differential Operator can be created easily using
the operator `Dx`. See examples below.
Examples
========
>>> from sympy.holonomic.holonomic import DifferentialOperator, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> DifferentialOperator([0, 1, x**2], R)
(1)Dx + (x**2)Dx**2
>>> (x*Dx*x + 1 - Dx**2)**2
(2*x**2 + 2*x + 1) + (4*x**3 + 2*x**2 - 4)Dx + (x**4 - 6*x - 2)Dx**2 + (-2*x**2)Dx**3 + (1)Dx**4
See Also
========
DifferentialOperatorAlgebra
"""
_op_priority = 20
def __init__(self, list_of_poly, parent):
# the parent ring for this operator
# must be an DifferentialOperatorAlgebra object
self.parent = parent
base = self.parent.base
self.x = base.gens[0] if isinstance(base.gens[0], Symbol) else base.gens[0][0]
# sequence of polynomials in x for each power of Dx
# the list should not have trailing zeroes
# represents the operator
# convert the expressions into ring elements using from_sympy
if isinstance(list_of_poly, list):
for i, j in enumerate(list_of_poly):
if not isinstance(j, base.dtype):
list_of_poly[i] = base.from_sympy(sympify(j))
elif isinstance(j, base.dtype):
list_of_poly[i] = base.from_sympy(base.to_sympy(j))
self.listofpoly = list_of_poly
# highest power of `Dx`
self.order = len(self.listofpoly) - 1
def __mul__(self, other):
"""
Multiplies two DifferentialOperator and returns another
DifferentialOperator instance using the commutation rule
Dx*a = a*Dx + a'
"""
listofself = self.listofpoly
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
listofother = [self.parent.base.from_sympy(sympify(other))]
else:
listofother = [other]
else:
listofother = other.listofpoly
# multiplies a polynomial `b` with a list of polynomials
def _mul_dmp_diffop(b, listofother):
if isinstance(listofother, list):
sol = []
for i in listofother:
sol.append(i * b)
return sol
else:
return [b * listofother]
sol = _mul_dmp_diffop(listofself[0], listofother)
# compute Dx^i * b
def _mul_Dxi_b(b):
sol1 = [self.parent.base.zero]
sol2 = []
if isinstance(b, list):
for i in b:
sol1.append(i)
sol2.append(i.diff())
else:
sol1.append(self.parent.base.from_sympy(b))
sol2.append(self.parent.base.from_sympy(b).diff())
return _add_lists(sol1, sol2)
for i in range(1, len(listofself)):
# find Dx^i * b in ith iteration
listofother = _mul_Dxi_b(listofother)
# solution = solution + listofself[i] * (Dx^i * b)
sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
return DifferentialOperator(sol, self.parent)
def __rmul__(self, other):
if not isinstance(other, DifferentialOperator):
if not isinstance(other, self.parent.base.dtype):
other = (self.parent.base).from_sympy(sympify(other))
sol = []
for j in self.listofpoly:
sol.append(other * j)
return DifferentialOperator(sol, self.parent)
def __add__(self, other):
if isinstance(other, DifferentialOperator):
sol = _add_lists(self.listofpoly, other.listofpoly)
return DifferentialOperator(sol, self.parent)
else:
list_self = self.listofpoly
if not isinstance(other, self.parent.base.dtype):
list_other = [((self.parent).base).from_sympy(sympify(other))]
else:
list_other = [other]
sol = []
sol.append(list_self[0] + list_other[0])
sol += list_self[1:]
return DifferentialOperator(sol, self.parent)
__radd__ = __add__
def __sub__(self, other):
return self + (-1) * other
def __rsub__(self, other):
return (-1) * self + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if n == 1:
return self
if n == 0:
return DifferentialOperator([self.parent.base.one], self.parent)
# if self is `Dx`
if self.listofpoly == self.parent.derivative_operator.listofpoly:
sol = []
for i in range(0, n):
sol.append(self.parent.base.zero)
sol.append(self.parent.base.one)
return DifferentialOperator(sol, self.parent)
# the general case
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def __str__(self):
listofpoly = self.listofpoly
print_str = ''
for i, j in enumerate(listofpoly):
if j == self.parent.base.zero:
continue
if i == 0:
print_str += '(' + sstr(j) + ')'
continue
if print_str:
print_str += ' + '
if i == 1:
print_str += '(' + sstr(j) + ')Dx'
continue
print_str += '(' + sstr(j) + ')' + 'Dx**' + sstr(i)
return print_str
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, DifferentialOperator):
if self.listofpoly == other.listofpoly and self.parent == other.parent:
return True
else:
return False
else:
if self.listofpoly[0] == other:
for i in listofpoly[1:]:
if i is not self.parent.base.zero:
return False
return True
else:
return False
def is_singular(self, x0):
"""
Checks if the differential equation is singular at x0.
"""
base = self.parent.base
return x0 in roots(base.to_sympy(self.listofpoly[-1]), self.x)
class HolonomicFunction(object):
"""
A Holonomic Function is a solution to a linear homogeneous ordinary
differential equation with polynomial coefficients. This differential
equation can also be represented by an annihilator i.e. a Differential
Operator L such that L.f = 0. For uniqueness of these functions,
initial conditions can also be provided along with the annihilator.
Holonomic functions have closure properties and thus forms a ring.
Given two Holonomic Functions f and g, their sum, product,
integral and derivative is also a Holonomic Function.
For ordinary points initial condition should be a vector of values of
the derivatives i.e. [y(x0), y'(x0), y''(x0) ...].
For regular singular points initial conditions can also be provided in this
format:
{s0: [C_0, C_1, ...], s1: [C0_0, C0_1, ...], ...}
where s0, s1, ... are the roots of indicial equation and vectors
[C_0, C_1, ...], [C0_0, C0_1, ...], ... are the corresponding intiial
terms of the associated power series. See Examples below.
To plot a Holonomic Function, one can use `.evalf()` for numerical
computation. Here's an example on `sin(x)**2/x` using numpy and matplotlib.
``
import sympy.holonomic
from sympy import var, sin
import matplotlib.pyplot as plt
import numpy as np
var("x")
r = np.linspace(1, 5, 100)
y = sympy.holonomic.expr_to_holonomic(sin(x)**2/x, x0=1).evalf(r)
plt.plot(r, y, label="holonomic function")
plt.show()
``
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> p = HolonomicFunction(Dx - 1, x, 0, [1]) # e^x
>>> q = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]) # sin(x)
>>> p + q # annihilator of e^x + sin(x)
HolonomicFunction((-1) + (1)Dx + (-1)Dx**2 + (1)Dx**3, x), f(0) = 1, f'(0) = 2, f''(0) = 1
>>> p * q # annihilator of e^x * sin(x)
HolonomicFunction((2) + (-2)Dx + (1)Dx**2, x), f(0) = 0, f'(0) = 1
# an example of initial conditions for regular singular points
# only one root `1/2` of the indicial equation. So ics is [(1/2, [1])]
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]})
HolonomicFunction((-1/2) + (x)Dx, x), {1/2: [1]}
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_expr()
sqrt(x)
"""
_op_priority = 20
def __init__(self, annihilator, x, x0=0, y0=None):
"""
Takes the annihilator and variable of the function.
`x0` is the point for which initial conditions are given and
`y0` is the initial condition.
For ordinary points `y0` should be a vector of initial values
y0 = [f(x0), f'(x0), f''(x0) ...].
To make the function unique, length of the vector `y0` must be equal to or
greater than the order of differential equation.
"""
# initial condition
self.y0 = y0
# the point for initial conditions, defualt is zero.
self.x0 = x0
# differential operator L such that L.f = 0
self.annihilator = annihilator
self.x = x
def __repr__(self):
str_sol = 'HolonomicFunction(%s, %s)' % ((self.annihilator).__repr__(), sstr(self.x))
if not self._have_init_cond():
return str_sol
# printing the singular initial condition
# in valid python
elif self.is_singularics():
str_sol += ', ' + sstr(self.y0)
return str_sol
# for ordinary initial conditions
else:
cond_str = ''
diff_str = ''
for i in self.y0:
cond_str += ', f%s(%s) = %s' % (diff_str, sstr(self.x0), sstr(i))
diff_str += "'"
sol = str_sol + cond_str
return sol
__str__ = __repr__
def unify(self, other):
"""
Unifies the ground domain of a given two Holonomic
Functions.
"""
R1 = self.annihilator.parent.base
R2 = other.annihilator.parent.base
dom1 = R1.dom
dom2 = R2.dom
if R1 == R2:
return (self, other)
R = (dom1.unify(dom2)).old_poly_ring(self.x)
newparent, _ = DifferentialOperators(R, str(self.annihilator.parent.gen_symbol))
sol1 = [R1.to_sympy(i) for i in self.annihilator.listofpoly]
sol2 = [R2.to_sympy(i) for i in other.annihilator.listofpoly]
sol1 = DifferentialOperator(sol1, newparent)
sol2 = DifferentialOperator(sol2, newparent)
sol1 = HolonomicFunction(sol1, self.x, self.x0, self.y0)
sol2 = HolonomicFunction(sol2, other.x, other.x0, other.y0)
return (sol1, sol2)
def is_singularics(self):
"""
Returns True if the function have singular initial condition
in the dictionary format.
Returns False if the function have ordinary initial condition
in the list format.
Returns None for all other cases.
"""
if isinstance(self.y0, dict):
return True
elif isinstance(self.y0, list):
return False
def _have_init_cond(self):
"""
Checks if the function have initial condition.
"""
return bool(self.y0)
def _singularics_to_ord(self):
"""
Converts a singular initial condition to ordinary if possible.
"""
a = list(self.y0)[0]
b = self.y0[a]
if len(self.y0) == 1 and a == int(a) and a > 0:
y0 = []
a = int(a)
for i in range(a):
y0.append(S(0))
y0 += [j * factorial(a + i) for i, j in enumerate(b)]
return HolonomicFunction(self.annihilator, self.x, self.x0, y0)
def __add__(self, other):
# if the ground domains are different
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a + b
deg1 = self.annihilator.order
deg2 = other.annihilator.order
dim = max(deg1, deg2)
R = self.annihilator.parent.base
K = R.get_field()
rowsself = [self.annihilator]
rowsother = [other.annihilator]
gen = self.annihilator.parent.derivative_operator
# constructing annihilators up to order dim
for i in range(dim - deg1):
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
for i in range(dim - deg2):
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
# constructing the matrix of the ansatz
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(0)
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
r = NewMatrix(r).transpose()
homosys = [[S(0) for q in range(dim + 1)]]
homosys = NewMatrix(homosys).transpose()
# solving the linear system using gauss jordan solver
solcomp = r.gauss_jordan_solve(homosys)
sol = solcomp[0]
# if a solution is not obtained then increasing the order by 1 in each
# iteration
while sol.is_zero:
dim += 1
diff1 = (gen * rowsself[-1])
rowsself.append(diff1)
diff2 = (gen * rowsother[-1])
rowsother.append(diff2)
row = rowsself + rowsother
r = []
for expr in row:
p = []
for i in range(dim + 1):
if i >= len(expr.listofpoly):
p.append(S(0))
else:
p.append(K.new(expr.listofpoly[i].rep))
r.append(p)
r = NewMatrix(r).transpose()
homosys = [[S(0) for q in range(dim + 1)]]
homosys = NewMatrix(homosys).transpose()
solcomp = r.gauss_jordan_solve(homosys)
sol = solcomp[0]
# taking only the coefficients needed to multiply with `self`
# can be also be done the other way by taking R.H.S and multiplying with
# `other`
sol = sol[:dim + 1 - deg1]
sol1 = _normalize(sol, self.annihilator.parent)
# annihilator of the solution
sol = sol1 * (self.annihilator)
sol = _normalize(sol.listofpoly, self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol, self.x)
# both the functions have ordinary initial conditions
if self.is_singularics() == False and other.is_singularics() == False:
# directly add the corresponding value
if self.x0 == other.x0:
# try to extended the initial conditions
# using the annihilator
y1 = _extend_y0(self, sol.order)
y2 = _extend_y0(other, sol.order)
y0 = [a + b for a, b in zip(y1, y2)]
return HolonomicFunction(sol, self.x, self.x0, y0)
else:
# change the intiial conditions to a same point
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self + other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) + other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self + other.change_ics(self.x0)
else:
return self.change_ics(other.x0) + other
if self.x0 != other.x0:
return HolonomicFunction(sol, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S(0):_y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S(0):_y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
# computing singular initial condition for the result
# taking union of the series terms of both functions
y0 = {}
for i in y1:
# add corresponding initial terms if the power
# on `x` is same
if i in y2:
y0[i] = [a + b for a, b in zip(y1[i], y2[i])]
else:
y0[i] = y1[i]
for i in y2:
if not i in y1:
y0[i] = y2[i]
return HolonomicFunction(sol, self.x, self.x0, y0)
def integrate(self, limits, initcond=False):
"""
Integrate the given holonomic function. Limits can be provided,
Initial conditions can only be computed when limits are (x0, x).
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).integrate((x, 0, x)) # e^x - 1
HolonomicFunction((-1)Dx + (1)Dx**2, x), f(0) = 0, f'(0) = 1
# integrate(cos(x), (x 0, x)) = sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]).integrate((x, 0, x))
HolonomicFunction((1)Dx + (1)Dx**3, x), f(0) = 0, f'(0) = 1, f''(0) = 0
"""
# to get the annihilator, just multiply by Dx from right
D = self.annihilator.parent.derivative_operator
# if the function have initial conditions of the series format
if self.is_singularics() == True:
r = self._singularics_to_ord()
if r:
return r.integrate(limits, initcond=initcond)
# computing singular initial condition for the function
# produced after integration.
y0 = {}
for i in self.y0:
c = self.y0[i]
c2 = []
for j in range(len(c)):
if c[j] == 0:
c2.append(S(0))
# if power on `x` is -1, the integration becomes log(x)
# TODO: Implement this case
elif i + j + 1 == 0:
raise NotImplementedError("logarithmic terms in the series are not supported")
else:
c2.append(c[j] / S(i + j + 1))
y0[i + 1] = c2
if hasattr(limits, "__iter__"):
raise NotImplementedError("Definite integration for singular initial conditions")
return HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
# if no initial conditions are available for the function
if not self._have_init_cond():
if initcond:
return HolonomicFunction(self.annihilator * D, self.x, self.x0, [S(0)])
return HolonomicFunction(self.annihilator * D, self.x)
# definite integral
# initial conditions for the answer will be stored at point `a`,
# where `a` is the lower limit of the integrand
if hasattr(limits, "__iter__"):
if len(limits) == 3 and limits[0] == self.x:
x0 = self.x0
a = limits[1]
b = limits[2]
else:
x0 = self.x0
a = self.x0
b = self.x
if x0 == a:
y0 = [S(0)]
y0 += self.y0
# use evalf to get the values at `a`
else:
y0 = [S(0)]
tempy0 = self.change_ics(a).y0
y0 += tempy0
# if the upper limit is `x`, the answer will be a function
if b == self.x:
return HolonomicFunction(self.annihilator * D, self.x, a, y0)
# if the upper limits is a Number, a numerical value will be returned
elif S(b).is_Number:
try:
s = HolonomicFunction(self.annihilator * D, self.x, a,\
y0).to_expr()
indefinite = s.subs(self.x, b)
if not isinstance(indefinite, NaN):
return indefinite
else:
return s.limit(self.x, b)
except (NotHyperSeriesError, NotPowerSeriesError):
return HolonomicFunction(self.annihilator * D, self.x, a, y0).evalf(b)
return HolonomicFunction(self.annihilator * D, self.x)
def diff(self, *args):
"""
Differentiation of the given Holonomic function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
# derivative of sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).diff().to_expr()
cos(x)
# derivative of e^2*x
>>> HolonomicFunction(Dx - 2, x, 0, [1]).diff().to_expr()
2*exp(2*x)
See Also
=======
.integrate()
"""
if args:
if args[0] != self.x:
return S(0)
elif len(args) == 2:
sol = self
for i in range(args[1]):
sol = sol.diff(args[0])
return sol
ann = self.annihilator
dx = ann.parent.derivative_operator
# if the function is constant.
if ann.listofpoly[0] == ann.parent.base.zero and ann.order == 1:
return S(0)
# if the coefficient of y in the differential equation is zero.
# a shifting is done to compute the answer in this case.
elif ann.listofpoly[0] == ann.parent.base.zero:
sol = DifferentialOperator(ann.listofpoly[1:], ann.parent)
if self._have_init_cond():
# if ordinary initial condition
if self.is_singularics() == False:
return HolonomicFunction(sol, self.x, self.x0, self.y0[1:])
# TODO: support for singular initial condition
return HolonomicFunction(sol, self.x)
else:
return HolonomicFunction(sol, self.x)
# the general algorithm
R = ann.parent.base
K = R.get_field()
seq_dmf = [K.new(i.rep) for i in ann.listofpoly]
# -y = a1*y'/a0 + a2*y''/a0 ... + an*y^n/a0
rhs = [i / seq_dmf[0] for i in seq_dmf[1:]]
rhs.insert(0, K.zero)
# differentiate both lhs and rhs
sol = _derivate_diff_eq(rhs)
# add the term y' in lhs to rhs
sol = _add_lists(sol, [K.zero, K.one])
sol = _normalize(sol[1:], self.annihilator.parent, negative=False)
if not self._have_init_cond() or self.is_singularics() == True:
return HolonomicFunction(sol, self.x)
y0 = _extend_y0(self, sol.order + 1)[1:]
return HolonomicFunction(sol, self.x, self.x0, y0)
def __eq__(self, other):
if self.annihilator == other.annihilator:
if self.x == other.x:
if self._have_init_cond() and other._have_init_cond():
if self.x0 == other.x0 and self.y0 == other.y0:
return True
else:
return False
else:
return True
else:
return False
else:
return False
def __mul__(self, other):
ann_self = self.annihilator
if not isinstance(other, HolonomicFunction):
other = sympify(other)
if not other.is_constant():
raise NotImplementedError(" Can't multiply a HolonomicFunction and expressions/functions.")
if not self._have_init_cond():
return self
else:
y0 = _extend_y0(self, ann_self.order)
y1 = []
for j in y0:
y1.append(j * other)
return HolonomicFunction(ann_self, self.x, self.x0, y1)
if self.annihilator.parent.base != other.annihilator.parent.base:
a, b = self.unify(other)
return a * b
ann_other = other.annihilator
list_self = []
list_other = []
a = ann_self.order
b = ann_other.order
R = ann_self.parent.base
K = R.get_field()
for j in ann_self.listofpoly:
list_self.append(K.new(j.rep))
for j in ann_other.listofpoly:
list_other.append(K.new(j.rep))
# will be used to reduce the degree
self_red = [-list_self[i] / list_self[a] for i in range(a)]
other_red = [-list_other[i] / list_other[b] for i in range(b)]
# coeff_mull[i][j] is the coefficient of Dx^i(f).Dx^j(g)
coeff_mul = [[S(0) for i in range(b + 1)] for j in range(a + 1)]
coeff_mul[0][0] = S(1)
# making the ansatz
lin_sys = [[coeff_mul[i][j] for i in range(a) for j in range(b)]]
homo_sys = [[S(0) for q in range(a * b)]]
homo_sys = NewMatrix(homo_sys).transpose()
sol = (NewMatrix(lin_sys).transpose()).gauss_jordan_solve(homo_sys)
# until a non trivial solution is found
while sol[0].is_zero:
# updating the coefficents Dx^i(f).Dx^j(g) for next degree
for i in range(a - 1, -1, -1):
for j in range(b - 1, -1, -1):
coeff_mul[i][j + 1] += coeff_mul[i][j]
coeff_mul[i + 1][j] += coeff_mul[i][j]
if isinstance(coeff_mul[i][j], K.dtype):
coeff_mul[i][j] = DMFdiff(coeff_mul[i][j])
else:
coeff_mul[i][j] = coeff_mul[i][j].diff(self.x)
# reduce the terms to lower power using annihilators of f, g
for i in range(a + 1):
if not coeff_mul[i][b] == S(0):
for j in range(b):
coeff_mul[i][j] += other_red[j] * \
coeff_mul[i][b]
coeff_mul[i][b] = S(0)
# not d2 + 1, as that is already covered in previous loop
for j in range(b):
if not coeff_mul[a][j] == 0:
for i in range(a):
coeff_mul[i][j] += self_red[i] * \
coeff_mul[a][j]
coeff_mul[a][j] = S(0)
lin_sys.append([coeff_mul[i][j] for i in range(a)
for j in range(b)])
sol = (NewMatrix(lin_sys).transpose()).gauss_jordan_solve(homo_sys)
sol_ann = _normalize(sol[0][0:], self.annihilator.parent, negative=False)
if not (self._have_init_cond() and other._have_init_cond()):
return HolonomicFunction(sol_ann, self.x)
if self.is_singularics() == False and other.is_singularics() == False:
# if both the conditions are at same point
if self.x0 == other.x0:
# try to find more inital conditions
y0_self = _extend_y0(self, sol_ann.order)
y0_other = _extend_y0(other, sol_ann.order)
# h(x0) = f(x0) * g(x0)
y0 = [y0_self[0] * y0_other[0]]
# coefficient of Dx^j(f)*Dx^i(g) in Dx^i(fg)
for i in range(1, min(len(y0_self), len(y0_other))):
coeff = [[0 for i in range(i + 1)] for j in range(i + 1)]
for j in range(i + 1):
for k in range(i + 1):
if j + k == i:
coeff[j][k] = binomial(i, j)
sol = 0
for j in range(i + 1):
for k in range(i + 1):
sol += coeff[j][k]* y0_self[j] * y0_other[k]
y0.append(sol)
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
# if the points are different, consider one
else:
selfat0 = self.annihilator.is_singular(0)
otherat0 = other.annihilator.is_singular(0)
if self.x0 == 0 and not selfat0 and not otherat0:
return self * other.change_ics(0)
elif other.x0 == 0 and not selfat0 and not otherat0:
return self.change_ics(0) * other
else:
selfatx0 = self.annihilator.is_singular(self.x0)
otheratx0 = other.annihilator.is_singular(self.x0)
if not selfatx0 and not otheratx0:
return self * other.change_ics(self.x0)
else:
return self.change_ics(other.x0) * other
if self.x0 != other.x0:
return HolonomicFunction(sol_ann, self.x)
# if the functions have singular_ics
y1 = None
y2 = None
if self.is_singularics() == False and other.is_singularics() == True:
_y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
y1 = {S(0):_y0}
y2 = other.y0
elif self.is_singularics() == True and other.is_singularics() == False:
_y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
y1 = self.y0
y2 = {S(0):_y0}
elif self.is_singularics() == True and other.is_singularics() == True:
y1 = self.y0
y2 = other.y0
y0 = {}
# multiply every possible pair of the series terms
for i in y1:
for j in y2:
k = min(len(y1[i]), len(y2[j]))
c = []
for a in range(k):
s = S(0)
for b in range(a + 1):
s += y1[i][b] * y2[j][a - b]
c.append(s)
if not i + j in y0:
y0[i + j] = c
else:
y0[i + j] = [a + b for a, b in zip(c, y0[i + j])]
return HolonomicFunction(sol_ann, self.x, self.x0, y0)
__rmul__ = __mul__
def __sub__(self, other):
return self + other * -1
def __rsub__(self, other):
return self * -1 + other
def __neg__(self):
return -1 * self
def __div__(self, other):
return self * (S.One / other)
def __truediv__(self, other):
return self.__div__(other)
def __pow__(self, n):
if n < 0:
raise NotHolonomicError("Negative Power on a Holonomic Function")
if n == 0:
return S(1)
if n == 1:
return self
else:
if n % 2 == 1:
powreduce = self**(n - 1)
return powreduce * self
elif n % 2 == 0:
powreduce = self**(n / 2)
return powreduce * powreduce
def degree(self):
"""
Returns the highest power of `x` in the annihilator.
"""
sol = [i.degree() for i in self.annihilator.listofpoly]
return max(sol)
def composition(self, expr, *args, **kwargs):
"""
Returns the annihilator after composition of a holonomic function with
an algebraic function. Initial conditions for the annihilator after
composition can be also be provided to the function.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x).composition(x**2, 0, [1]) # e^(x**2)
HolonomicFunction((-2*x) + (1)Dx, x), f(0) = 1
>>> HolonomicFunction(Dx**2 + 1, x).composition(x**2 - 1, 1, [1, 0])
HolonomicFunction((4*x**3) + (-1)Dx + (x)Dx**2, x), f(1) = 1, f'(1) = 0
See Also
========
from_hyper
"""
R = self.annihilator.parent
a = self.annihilator.order
diff = expr.diff(self.x)
listofpoly = self.annihilator.listofpoly
for i, j in enumerate(listofpoly):
if isinstance(j, self.annihilator.parent.base.dtype):
listofpoly[i] = self.annihilator.parent.base.to_sympy(j)
r = listofpoly[a].subs({self.x:expr})
subs = [-listofpoly[i].subs({self.x:expr}) / r for i in range (a)]
coeffs = [S(0) for i in range(a)] # coeffs[i] == coeff of (D^i f)(a) in D^k (f(a))
coeffs[0] = S(1)
system = [coeffs]
homogeneous = Matrix([[S(0) for i in range(a)]]).transpose()
sol = S(0)
while sol.is_zero:
coeffs_next = [p.diff(self.x) for p in coeffs]
for i in range(a - 1):
coeffs_next[i + 1] += (coeffs[i] * diff)
for i in range(a):
coeffs_next[i] += (coeffs[-1] * subs[i] * diff)
coeffs = coeffs_next
# check for linear relations
system.append(coeffs)
sol_tuple = (Matrix(system).transpose()).gauss_jordan_solve(homogeneous)
sol = sol_tuple[0]
tau = sol.atoms(Dummy).pop()
sol = sol.subs(tau, 1)
sol = _normalize(sol[0:], R, negative=False)
# if initial conditions are given for the resulting function
if args:
return HolonomicFunction(sol, self.x, args[0], args[1])
return HolonomicFunction(sol, self.x)
def to_sequence(self, lb=True):
"""
Finds the recurrence relation in power series expansion
of the function about `x0`, where `x0` is the point at which
initial conditions are given.
If the point `x0` is ordinary, solution of the form [(R, n0)]
is returned. Where `R` is the recurrence relation and `n0` is the
smallest `n` for which the recurrence holds true.
If the point `x0` is regular singular, a vector of `(R, p, n0)` is
returned, i.e. [(R, p, n0), ...]. Each tuple in this vector represents
a recurrence relation `R` associated with a root of the indicial
equation `p`. Conditions of a different format can also be provided in
this case, see the docstring of the class.
If it's not possible to numerically compute a initial condition,
it is returned as a symbol C_j, denoting the coefficient of (x - x0)^j
in the power series about x0.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
# exp(x), the recurrence relation holds for n >= 0
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_sequence()
[(HolonomicSequence((-1) + (n + 1)Sn, n), u(0) = 1, 0)]
# log(1 + x), the recurrence relation holds for n >= 2
>>> HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]).to_sequence()
[(HolonomicSequence((n**2) + (n**2 + n)Sn, n), u(0) = 0, u(1) = 1, u(2) = -1/2, 2)]
>>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_sequence()
[(HolonomicSequence((n), n), u(0) = 1, 1/2, 1)]
See Also
========
HolonomicFunction.series
References
==========
[1] hal.inria.fr/inria-00070025/document
[2] http://www.risc.jku.at/publications/download/risc_2244/DIPLFORM.pdf
"""
if self.x0 != 0:
return self.shift_x(self.x0).to_sequence()
# check whether a power series exists if the point is singular
if self.annihilator.is_singular(self.x0):
return self._frobenius(lb=lb)
dict1 = {}
n = symbols('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
# substituting each term of the form `x^k Dx^j` in the
# annihilator, according to the formula below:
# x^k Dx^j = Sum(rf(n + 1 - k, j) * a(n + j - k) * x^n, (n, k, oo))
# for explanation see [2].
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k) in dict1:
dict1[(i - k, k)] += (dom.to_sympy(coeff) * rf(n - k + 1, i))
else:
dict1[(i - k, k)] = (dom.to_sympy(coeff) * rf(n - k + 1, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = self.degree()
# the recurrence relation holds for all values of
# n greater than smallest_n, i.e. n >= smallest_n
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
# an appropriate shift of the recurrence
for j in range(lower, upper + 1):
if j in keylist:
temp = S(0)
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S(0))
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
y0 = _extend_y0(self, order)
u0 = []
# u(n) = y^n(0)/factorial(n)
for i, j in enumerate(y0):
u0.append(j / factorial(i))
# if sufficient conditions can't be computed then
# try to use the series method i.e.
# equate the coefficients of x^k in the equation formed by
# substituting the series in differential equation, to zero.
if len(u0) < order:
for i in range(degree):
eq = S(0)
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S(0)
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
dummys[i + j[0]] = Symbol('C_%s' %(i + j[0]))
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
for i in range(len(u0), order):
if i not in dummys:
dummys[i] = Symbol('C_%s' %i)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
return [(HolonomicSequence(sol, u0), smallest_n)]
return [HolonomicSequence(sol, u0)]
def _frobenius(self, lb=True):
# compute the roots of indicial equation
indicialroots = self._indicial()
reals = []
compl = []
for i in indicialroots:
if i.is_real:
reals.extend([i] * indicialroots[i])
else:
a, b = i.as_real_imag()
compl.extend([(i, a, b)] * indicialroots[i])
# sort the roots for a fixed ordering of solution
compl.sort(key=lambda x : x[1])
compl.sort(key=lambda x : x[2])
reals.sort()
x = self.x
# grouping the roots, roots differ by an integer are put in the same group.
grp = []
for i in reals:
intdiff = False
if len(grp) == 0:
grp.append([i])
continue
for j in grp:
if int(j[0] - i) == j[0] - i:
j.append(i)
intdiff = True
break
if not intdiff:
grp.append([i])
# True if none of the roots differ by an integer i.e.
# each element in group have only one member
independent = True if all(len(i) == 1 for i in grp) else False
allpos = all(i >= 0 for i in reals)
allint = all(int(i) == i for i in reals)
# if initial conditions are provided
# then use them.
if self.is_singularics() == True:
rootstoconsider = []
for i in self.y0:
for j in indicialroots:
if j == i:
rootstoconsider.append(i)
elif allpos and allint:
rootstoconsider = [min(reals)]
elif independent:
rootstoconsider = [i[0] for i in grp] + [j[0] for j in compl]
elif not allint:
rootstoconsider = []
for i in reals:
if not int(i) == i:
rootstoconsider.append(i)
elif not allpos:
if not self._have_init_cond() or S(self.y0[0]).is_finite == False:
rootstoconsider = [min(reals)]
else:
posroots = []
for i in reals:
if i >= 0:
posroots.append(i)
rootstoconsider = [min(posroots)]
n = symbols('n', integer=True)
dom = self.annihilator.parent.base.dom
R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
finalsol = []
char = ord('C')
for p in rootstoconsider:
dict1 = {}
for i, j in enumerate(self.annihilator.listofpoly):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
for k in range(degree + 1):
coeff = listofdmp[degree - k]
if coeff == 0:
continue
if (i - k, k - i) in dict1:
dict1[(i - k, k - i)] += (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
else:
dict1[(i - k, k - i)] = (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
sol = []
keylist = [i[0] for i in dict1]
lower = min(keylist)
upper = max(keylist)
degree = max([i[1] for i in dict1])
degree2 = min([i[1] for i in dict1])
smallest_n = lower + degree
dummys = {}
eqs = []
unknowns = []
for j in range(lower, upper + 1):
if j in keylist:
temp = S(0)
for k in dict1.keys():
if k[0] == j:
temp += dict1[k].subs(n, n - lower)
sol.append(temp)
else:
sol.append(S(0))
# the recurrence relation
sol = RecurrenceOperator(sol, R)
# computing the initial conditions for recurrence
order = sol.order
all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
all_roots = all_roots.keys()
if all_roots:
max_root = max(all_roots) + 1
smallest_n = max(max_root, smallest_n)
order += smallest_n
u0 = []
if self.is_singularics() == True:
u0 = self.y0[p]
elif self.is_singularics() == False and p >= 0 and int(p) == p and len(rootstoconsider) == 1:
y0 = _extend_y0(self, order + int(p))
# u(n) = y^n(0)/factorial(n)
if len(y0) > int(p):
for i in range(int(p), len(y0)):
u0.append(y0[i] / factorial(i))
if len(u0) < order:
for i in range(degree2, degree):
eq = S(0)
for j in dict1:
if i + j[0] < 0:
dummys[i + j[0]] = S(0)
elif i + j[0] < len(u0):
dummys[i + j[0]] = u0[i + j[0]]
elif not i + j[0] in dummys:
letter = chr(char) + '_%s' %(i + j[0])
dummys[i + j[0]] = Symbol(letter)
unknowns.append(dummys[i + j[0]])
if j[1] <= i:
eq += dict1[j].subs(n, i) * dummys[i + j[0]]
eqs.append(eq)
# solve the system of equations formed
soleqs = solve(eqs, *unknowns)
if isinstance(soleqs, dict):
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
if dummys[i] in soleqs:
u0.append(soleqs[dummys[i]])
else:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
continue
else:
finalsol.append((HolonomicSequence(sol, u0), p))
continue
for i in range(len(u0), order):
if i not in dummys:
letter = chr(char) + '_%s' %i
dummys[i] = Symbol(letter)
s = False
for j in soleqs:
if dummys[i] in j:
u0.append(j[dummys[i]])
s = True
if not s:
u0.append(dummys[i])
if lb:
finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
else:
finalsol.append((HolonomicSequence(sol, u0), p))
char += 1
return finalsol
def series(self, n=6, coefficient=False, order=True, _recur=None):
"""
Finds the power series expansion of given holonomic function about x0.
A list of series might be returned if `x0` is a regular point with
multiple roots of the indcial equation.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(Dx - 1, x, 0, [1]).series() # e^x
1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).series(n=8) # sin(x)
x - x**3/6 + x**5/120 - x**7/5040 + O(x**8)
See Also
========
HolonomicFunction.to_sequence
"""
if _recur == None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = []
for i in recurrence:
sol.append(self.series(_recur=i))
return sol
n = n - int(constantpower)
l = len(recurrence.u0) - 1
k = recurrence.recurrence.order
x = self.x
x0 = self.x0
seq_dmp = recurrence.recurrence.listofpoly
R = recurrence.recurrence.parent.base
K = R.get_field()
seq = []
for i, j in enumerate(seq_dmp):
seq.append(K.new(j.rep))
sub = [-seq[i] / seq[k] for i in range(k)]
sol = [i for i in recurrence.u0]
if l + 1 >= n:
pass
else:
# use the initial conditions to find the next term
for i in range(l + 1 - k, n - k):
coeff = S(0)
for j in range(k):
if i + j >= 0:
coeff += DMFsubs(sub[j], i) * sol[i + j]
sol.append(coeff)
if coefficient:
return sol
ser = S(0)
for i, j in enumerate(sol):
ser += x**(i + constantpower) * j
if order:
ser += Order(x**(n + int(constantpower)), x)
if x0 != 0:
return ser.subs(x, x - x0)
return ser
def _indicial(self):
"""Computes the roots of Indicial equation.
"""
if self.x0 != 0:
return self.shift_x(self.x0)._indicial()
list_coeff = self.annihilator.listofpoly
R = self.annihilator.parent.base
x = self.x
s = R.zero
y = R.one
def _pole_degree(poly):
root_all = roots(R.to_sympy(poly), x, filter='Z')
if 0 in root_all.keys():
return root_all[0]
else:
return 0
degree = [j.degree() for j in list_coeff]
degree = max(degree)
inf = 10 * (max(1, degree) + max(1, self.annihilator.order))
deg = lambda q: inf if q.is_zero else _pole_degree(q)
b = deg(list_coeff[0])
for j in range(1, len(list_coeff)):
b = min(b, deg(list_coeff[j]) - j)
for i, j in enumerate(list_coeff):
listofdmp = j.all_coeffs()
degree = len(listofdmp) - 1
if - i - b <= 0 and degree - i - b >= 0:
s = s + listofdmp[degree - i - b] * y
y *= x - i
return roots(R.to_sympy(s), x)
def evalf(self, points, method='RK4', h=0.05, derivatives=False):
"""
Finds numerical value of a holonomic function using numerical methods.
(RK4 by default). A set of points (real or complex) must be provided
which will be the path for the numerical integration.
The path should be given as a list [x1, x2, ... xn]. The numerical
values will be computed at each point in this order x1 --> x2 --> x3
... --> xn.
Returns values of the function at x1, x2, ... xn in a list.
Examples
=======
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
>>> # a straight line on the real axis from (0 to 1)
>>> r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
# using Runge-Kutta 4th order on e^x from 0.1 to 1.
# exact solution at 1 is 2.71828182845905
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r)
[1.10517083333333, 1.22140257085069, 1.34985849706254, 1.49182424008069,
1.64872063859684, 1.82211796209193, 2.01375162659678, 2.22553956329232,
2.45960141378007, 2.71827974413517]
# using Euler's method for the same
>>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r, method='Euler')
[1.1, 1.21, 1.331, 1.4641, 1.61051, 1.771561, 1.9487171, 2.14358881,
2.357947691, 2.5937424601]
One can also observe that the value obtained using Runge-Kutta 4th order
is much more accurate than Euler's method.
"""
from sympy.holonomic.numerical import _evalf
lp = False
# if a point `b` is given instead of a mesh
if not hasattr(points, "__iter__"):
lp = True
b = S(points)
if self.x0 == b:
return _evalf(self, [b], method=method, derivatives=derivatives)[-1]
if not b.is_Number:
raise NotImplementedError
a = self.x0
if a > b:
h = -h
n = int((b - a) / h)
points = [a + h]
for i in range(n - 1):
points.append(points[-1] + h)
for i in roots(self.annihilator.parent.base.to_sympy(self.annihilator.listofpoly[-1]), self.x):
if i == self.x0 or i in points:
raise SingularityError(self, i)
if lp:
return _evalf(self, points, method=method, derivatives=derivatives)[-1]
return _evalf(self, points, method=method, derivatives=derivatives)
def change_x(self, z):
"""
Changes only the variable of Holonomic Function, for internal
purposes. For composition use HolonomicFunction.composition()
"""
dom = self.annihilator.parent.base.dom
R = dom.old_poly_ring(z)
parent, _ = DifferentialOperators(R, 'Dx')
sol = []
for j in self.annihilator.listofpoly:
sol.append(R(j.rep))
sol = DifferentialOperator(sol, parent)
return HolonomicFunction(sol, z, self.x0, self.y0)
def shift_x(self, a):
"""
Substitute `x + a` for `x`.
"""
x = self.x
listaftershift = self.annihilator.listofpoly
base = self.annihilator.parent.base
sol = [base.from_sympy(base.to_sympy(i).subs(x, x + a)) for i in listaftershift]
sol = DifferentialOperator(sol, self.annihilator.parent)
x0 = self.x0 - a
if not self._have_init_cond():
return HolonomicFunction(sol, x)
return HolonomicFunction(sol, x, x0, self.y0)
def to_hyper(self, as_list=False, _recur=None):
"""
Returns a hypergeometric function (or linear combination of them)
representing the given holonomic function.
Returns an answer of the form:
a1 * x**b1 * hyper() + a2 * x**b2 * hyper() ...
This is very useful as one can now use `hyperexpand` to find the
symbolic expressions/functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
# sin(x)
>>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).to_hyper()
x*hyper((), (3/2,), -x**2/4)
# exp(x)
>>> HolonomicFunction(Dx - 1, x, 0, [1]).to_hyper()
hyper((), (), x)
See Also
========
from_hyper, from_meijerg
"""
if _recur == None:
recurrence = self.to_sequence()
else:
recurrence = _recur
if isinstance(recurrence, tuple) and len(recurrence) == 2:
smallest_n = recurrence[1]
recurrence = recurrence[0]
constantpower = 0
elif isinstance(recurrence, tuple) and len(recurrence) == 3:
smallest_n = recurrence[2]
constantpower = recurrence[1]
recurrence = recurrence[0]
elif len(recurrence) == 1 and len(recurrence[0]) == 2:
smallest_n = recurrence[0][1]
recurrence = recurrence[0][0]
constantpower = 0
elif len(recurrence) == 1 and len(recurrence[0]) == 3:
smallest_n = recurrence[0][2]
constantpower = recurrence[0][1]
recurrence = recurrence[0][0]
else:
sol = self.to_hyper(as_list=as_list, _recur=recurrence[0])
for i in recurrence[1:]:
sol += self.to_hyper(as_list=as_list, _recur=i)
return sol
u0 = recurrence.u0
r = recurrence.recurrence
x = self.x
x0 = self.x0
# order of the recurrence relation
m = r.order
# when no recurrence exists, and the power series have finite terms
if m == 0:
nonzeroterms = roots(r.parent.base.to_sympy(r.listofpoly[0]), recurrence.n, filter='R')
sol = S(0)
for j, i in enumerate(nonzeroterms):
if i < 0 or int(i) != i:
continue
i = int(i)
if i < len(u0):
if isinstance(u0[i], (PolyElement, FracElement)):
u0[i] = u0[i].as_expr()
sol += u0[i] * x**i
else:
sol += Symbol('C_%s' %j) * x**i
if isinstance(sol, (PolyElement, FracElement)):
sol = sol.as_expr() * x**constantpower
else:
sol = sol * x**constantpower
if as_list:
if x0 != 0:
return [(sol.subs(x, x - x0), )]
return [(sol, )]
if x0 != 0:
return sol.subs(x, x - x0)
return sol
if smallest_n + m > len(u0):
raise NotImplementedError("Can't compute sufficient Initial Conditions")
# check if the recurrence represents a hypergeometric series
is_hyper = True
for i in range(1, len(r.listofpoly)-1):
if r.listofpoly[i] != r.parent.base.zero:
is_hyper = False
break
if not is_hyper:
raise NotHyperSeriesError(self, self.x0)
a = r.listofpoly[0]
b = r.listofpoly[-1]
# the constant multiple of argument of hypergeometric function
if isinstance(a.rep[0], (PolyElement, FracElement)):
c = - (S(a.rep[0].as_expr()) * m**(a.degree())) / (S(b.rep[0].as_expr()) * m**(b.degree()))
else:
c = - (S(a.rep[0]) * m**(a.degree())) / (S(b.rep[0]) * m**(b.degree()))
sol = 0
arg1 = roots(r.parent.base.to_sympy(a), recurrence.n)
arg2 = roots(r.parent.base.to_sympy(b), recurrence.n)
# iterate thorugh the initial conditions to find
# the hypergeometric representation of the given
# function.
# The answer will be a linear combination
# of different hypergeometric series which satisfies
# the recurrence.
if as_list:
listofsol = []
for i in range(smallest_n + m):
# if the recurrence relation doesn't hold for `n = i`,
# then a Hypergeometric representation doesn't exist.
# add the algebraic term a * x**i to the solution,
# where a is u0[i]
if i < smallest_n:
if as_list:
listofsol.append(((S(u0[i]) * x**(i+constantpower)).subs(x, x-x0), ))
else:
sol += S(u0[i]) * x**i
continue
# if the coefficient u0[i] is zero, then the
# independent hypergeomtric series starting with
# x**i is not a part of the answer.
if S(u0[i]) == 0:
continue
ap = []
bq = []
# substitute m * n + i for n
for k in arg1:
ap.extend([nsimplify((i - k) / m)] * arg1[k])
for k in arg2:
bq.extend([nsimplify((i - k) / m)] * arg2[k])
# convention of (k + 1) in the denominator
if 1 in bq:
bq.remove(1)
else:
ap.append(1)
if as_list:
listofsol.append(((S(u0[i])*x**(i+constantpower)).subs(x, x-x0), (hyper(ap, bq, c*x**m)).subs(x, x-x0)))
else:
sol += S(u0[i]) * hyper(ap, bq, c * x**m) * x**i
if as_list:
return listofsol
sol = sol * x**constantpower
if x0 != 0:
return sol.subs(x, x - x0)
return sol
def to_expr(self):
"""
Converts a Holonomic Function back to elementary functions.
Examples
========
>>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy import symbols, S
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
>>> HolonomicFunction(x**2*Dx**2 + x*Dx + (x**2 - 1), x, 0, [0, S(1)/2]).to_expr()
besselj(1, x)
>>> HolonomicFunction((1 + x)*Dx**3 + Dx**2, x, 0, [1, 1, 1]).to_expr()
x*log(x + 1) + log(x + 1) + 1
"""
return hyperexpand(self.to_hyper()).simplify()
def change_ics(self, b, lenics=None):
"""
Changes the point `x0` to `b` for initial conditions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import symbols, sin, cos, exp
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x)).change_ics(1)
HolonomicFunction((1) + (1)Dx**2, x), f(1) = sin(1), f'(1) = cos(1)
>>> expr_to_holonomic(exp(x)).change_ics(2)
HolonomicFunction((-1) + (1)Dx, x), f(2) = exp(2)
"""
symbolic = True
if lenics == None and len(self.y0) > self.annihilator.order:
lenics = len(self.y0)
dom = self.annihilator.parent.base.domain
try:
sol = expr_to_holonomic(self.to_expr(), x=self.x, x0=b, lenics=lenics, domain=dom)
except (NotPowerSeriesError, NotHyperSeriesError):
symbolic = False
if symbolic and sol.x0 == b:
return sol
y0 = self.evalf(b, derivatives=True)
return HolonomicFunction(self.annihilator, self.x, b, y0)
def to_meijerg(self):
"""
Returns a linear combination of Meijer G-functions.
Examples
========
>>> from sympy.holonomic import expr_to_holonomic
>>> from sympy import sin, cos, hyperexpand, log, symbols
>>> x = symbols('x')
>>> hyperexpand(expr_to_holonomic(cos(x) + sin(x)).to_meijerg())
sin(x) + cos(x)
>>> hyperexpand(expr_to_holonomic(log(x)).to_meijerg()).simplify()
log(x)
See Also
========
to_hyper()
"""
# convert to hypergeometric first
rep = self.to_hyper(as_list=True)
sol = S(0)
for i in rep:
if len(i) == 1:
sol += i[0]
elif len(i) == 2:
sol += i[0] * _hyper_to_meijerg(i[1])
return sol
def from_hyper(func, x0=0, evalf=False):
"""
Converts Hypergeometric Function to Holonomic.
func is the Hypergeometric Function and x0 be the point at
which initial conditions are required.
Examples
=======
>>> from sympy.holonomic.holonomic import from_hyper, DifferentialOperators
>>> from sympy import symbols, hyper, S
>>> x = symbols('x')
>>> from_hyper(hyper([], [S(3)/2], x**2/4))
HolonomicFunction((-x) + (2)Dx + (x)Dx**2, x), f(1) = sinh(1), f'(1) = -sinh(1) + cosh(1)
"""
a = func.ap
b = func.bq
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
# generalized hypergeometric differential equation
r1 = 1
for i in range(len(a)):
r1 = r1 * (x * Dx + a[i])
r2 = Dx
for i in range(len(b)):
r2 = r2 * (x * Dx + b[i] - 1)
sol = r1 - r2
simp = hyperexpand(func)
if isinstance(simp, Infinity) or isinstance(simp, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
# return None if it is Infinite or NaN
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# if the function is known symbolically
if not isinstance(simp, hyper):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
# if values don't exist at 0, then try to find initial
# conditions at 1. If it doesn't exist at 1 too then
# try 2 and so on.
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, hyper):
x0 = 1
# use evalf if the function can't be simpified
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
def from_meijerg(func, x0=0, evalf=False, initcond=True, domain=QQ):
"""
Converts a Meijer G-function to Holonomic.
func is the Hypergeometric Function and x0 be the point at
which initial conditions are required.
Examples
=======
>>> from sympy.holonomic.holonomic import from_meijerg, DifferentialOperators
>>> from sympy import symbols, meijerg, S
>>> x = symbols('x')
>>> from_meijerg(meijerg(([], []), ([S(1)/2], [0]), x**2/4))
HolonomicFunction((1) + (1)Dx**2, x), f(0) = 0, f'(0) = 1/sqrt(pi)
"""
a = func.ap
b = func.bq
n = len(func.an)
m = len(func.bm)
p = len(a)
z = func.args[2]
x = z.atoms(Symbol).pop()
R, Dx = DifferentialOperators(domain.old_poly_ring(x), 'Dx')
# compute the differential equation satisfied by the
# Meijer G-function.
mnp = (-1)**(m + n - p)
r1 = x * mnp
for i in range(len(a)):
r1 *= x * Dx + 1 - a[i]
r2 = 1
for i in range(len(b)):
r2 *= x * Dx - b[i]
sol = r1 - r2
if not initcond:
return HolonomicFunction(sol, x).composition(z)
simp = hyperexpand(func)
if isinstance(simp, Infinity) or isinstance(simp, NegativeInfinity):
return HolonomicFunction(sol, x).composition(z)
def _find_conditions(simp, x, x0, order, evalf=False):
y0 = []
for i in range(order):
if evalf:
val = simp.subs(x, x0).evalf()
else:
val = simp.subs(x, x0)
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
simp = simp.diff(x)
return y0
# computing initial conditions
if not isinstance(simp, meijerg):
y0 = _find_conditions(simp, x, x0, sol.order)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order)
return HolonomicFunction(sol, x).composition(z, x0, y0)
if isinstance(simp, meijerg):
x0 = 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
while not y0:
x0 += 1
y0 = _find_conditions(simp, x, x0, sol.order, evalf)
return HolonomicFunction(sol, x).composition(z, x0, y0)
return HolonomicFunction(sol, x).composition(z)
x_1 = Dummy('x_1')
_lookup_table = None
domain_for_table = None
from sympy.integrals.meijerint import _mytype
def expr_to_holonomic(func, x=None, x0=0, y0=None, lenics=None, domain=None, initcond=True):
"""
Uses `meijerint._rewrite1` to convert to `meijerg` function and then
eventually to Holonomic Functions. Only works when `meijerint._rewrite1`
returns a `meijerg` representation of the function provided.
Examples
========
>>> from sympy.holonomic.holonomic import expr_to_holonomic
>>> from sympy import sin, exp, symbols
>>> x = symbols('x')
>>> expr_to_holonomic(sin(x))
HolonomicFunction((1) + (1)Dx**2, x), f(0) = 0, f'(0) = 1
>>> expr_to_holonomic(exp(x))
HolonomicFunction((-1) + (1)Dx, x), f(0) = 1
See Also
========
meijerint._rewrite1, _convert_poly_rat_alg, _create_table
"""
func = sympify(func)
syms = func.free_symbols
if not x:
if len(syms) == 1:
x= syms.pop()
else:
raise ValueError("Specify the variable for the function")
elif x in syms:
syms.remove(x)
extra_syms = list(syms)
if domain == None:
if func.has(Float):
domain = RR
else:
domain = QQ
if len(extra_syms) != 0:
domain = domain[extra_syms].get_field()
# try to convert if the function is polynomial or rational
solpoly = _convert_poly_rat_alg(func, x, x0=x0, y0=y0, lenics=lenics, domain=domain, initcond=initcond)
if solpoly:
return solpoly
# create the lookup table
global _lookup_table, domain_for_table
if not _lookup_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
elif domain != domain_for_table:
domain_for_table = domain
_lookup_table = {}
_create_table(_lookup_table, domain=domain)
# use the table directly to convert to Holonomic
if func.is_Function:
f = func.subs(x, x_1)
t = _mytype(f, x_1)
if t in _lookup_table:
l = _lookup_table[t]
sol = l[0][1].change_x(x)
else:
sol = _convert_meijerint(func, x, initcond=False, domain=domain)
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
if y0 or not initcond:
sol = sol.composition(func.args[0])
if y0:
sol.y0 = y0
sol.x0 = x0
return sol
if not lenics:
lenics = sol.annihilator.order
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return sol.composition(func.args[0], x0, _y0)
# iterate though the expression recursively
args = func.args
f = func.func
from sympy.core import Add, Mul, Pow
sol = expr_to_holonomic(args[0], x=x, initcond=False, domain=domain)
if f is Add:
for i in range(1, len(args)):
sol += expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Mul:
for i in range(1, len(args)):
sol *= expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
elif f is Pow:
sol = sol**args[1]
sol.x0 = x0
if not sol:
raise NotImplementedError
if y0:
sol.y0 = y0
if y0 or not initcond:
return sol
if sol.y0:
return sol
if not lenics:
lenics = sol.annihilator.order
if sol.annihilator.is_singular(x0):
r = sol._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S(1):
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol.annihilator, x, x0, y0)
_y0 = _find_conditions(func, x, x0, lenics)
while not _y0:
x0 += 1
_y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol.annihilator, x, x0, _y0)
## Some helper functions ##
def _normalize(list_of, parent, negative=True):
"""
Normalize a given annihilator
"""
num = []
denom = []
base = parent.base
K = base.get_field()
lcm_denom = base.from_sympy(S(1))
list_of_coeff = []
# convert polynomials to the elements of associated
# fraction field
for i, j in enumerate(list_of):
if isinstance(j, base.dtype):
list_of_coeff.append(K.new(j.rep))
elif not isinstance(j, K.dtype):
list_of_coeff.append(K.from_sympy(sympify(j)))
else:
list_of_coeff.append(j)
# corresponding numerators of the sequence of polynomials
num.append(list_of_coeff[i].numer())
# corresponding denominators
denom.append(list_of_coeff[i].denom())
# lcm of denominators in the coefficients
for i in denom:
lcm_denom = i.lcm(lcm_denom)
if negative:
lcm_denom = -lcm_denom
lcm_denom = K.new(lcm_denom.rep)
# multiply the coefficients with lcm
for i, j in enumerate(list_of_coeff):
list_of_coeff[i] = j * lcm_denom
gcd_numer = base((list_of_coeff[-1].numer() / list_of_coeff[-1].denom()).rep)
# gcd of numerators in the coefficients
for i in num:
gcd_numer = i.gcd(gcd_numer)
gcd_numer = K.new(gcd_numer.rep)
# divide all the coefficients by the gcd
for i, j in enumerate(list_of_coeff):
frac_ans = j / gcd_numer
list_of_coeff[i] = base((frac_ans.numer() / frac_ans.denom()).rep)
return DifferentialOperator(list_of_coeff, parent)
def _derivate_diff_eq(listofpoly):
"""
Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
where a0, a1,... are polynomials or rational functions. The function
returns b0, b1, b2... such that the differential equation
b0(x)y(x) + b1(x)y'(x) +... = 0 is formed after differentiating the
former equation.
"""
sol = []
a = len(listofpoly) - 1
sol.append(DMFdiff(listofpoly[0]))
for i, j in enumerate(listofpoly[1:]):
sol.append(DMFdiff(j) + listofpoly[i])
sol.append(listofpoly[a])
return sol
def _hyper_to_meijerg(func):
"""
Converts a `hyper` to meijerg.
"""
ap = func.ap
bq = func.bq
p = len(ap)
q = len(bq)
ispoly = any(i <= 0 and int(i) == i for i in ap)
if ispoly:
return hyperexpand(func)
z = func.args[2]
# paramters of the `meijerg` function.
an = (1 - i for i in ap)
anp = ()
bm = (S(0), )
bmq = (1 - i for i in bq)
k = S(1)
for i in bq:
k = k * gamma(i)
for i in ap:
k = k / gamma(i)
return k * meijerg(an, anp, bm, bmq, -z)
def _add_lists(list1, list2):
"""Takes polynomial sequences of two annihilators a and b and returns
the list of polynomials of sum of a and b.
"""
if len(list1) <= len(list2):
sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
else:
sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
return sol
def _extend_y0(Holonomic, n):
"""
Tries to find more initial conditions by substituting the initial
value point in the differential equation.
"""
if Holonomic.annihilator.is_singular(Holonomic.x0) or Holonomic.is_singularics() == True:
return Holonomic.y0
annihilator = Holonomic.annihilator
a = annihilator.order
x = Holonomic.x
listofpoly = []
y0 = Holonomic.y0
R = annihilator.parent.base
K = R.get_field()
for i, j in enumerate(annihilator.listofpoly):
if isinstance(j, annihilator.parent.base.dtype):
listofpoly.append(K.new(j.rep))
if len(y0) < a or n <= len(y0):
return y0
else:
list_red = [-listofpoly[i] / listofpoly[a]
for i in range(a)]
if len(y0) > a:
y1 = [y0[i] for i in range(a)]
else:
y1 = [i for i in y0]
for i in range(n - a):
sol = 0
for a, b in zip(y1, list_red):
r = DMFsubs(b, Holonomic.x0)
try:
if not r.is_finite:
return y0
except AttributeError:
pass
if isinstance(r, (PolyElement, FracElement)):
r = r.as_expr()
sol += a * r
y1.append(sol)
list_red = _derivate_diff_eq(list_red)
return y0 + y1[len(y0):]
def DMFdiff(frac):
# differentiate a DMF object represented as p/q
if not isinstance(frac, DMF):
return frac.diff()
K = frac.ring
p = K.numer(frac)
q = K.denom(frac)
sol_num = - p * q.diff() + q * p.diff()
sol_denom = q**2
return K((sol_num.rep, sol_denom.rep))
def DMFsubs(frac, x0, mpm=False):
# substitute the point x0 in DMF object of the form p/q
if not isinstance(frac, DMF):
return frac
p = frac.num
q = frac.den
sol_p = S(0)
sol_q = S(0)
if mpm:
from mpmath import mp
for i, j in enumerate(reversed(p)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_p += j * x0**i
for i, j in enumerate(reversed(q)):
if mpm:
j = sympify(j)._to_mpmath(mp.prec)
sol_q += j * x0**i
if isinstance(sol_p, (PolyElement, FracElement)):
sol_p = sol_p.as_expr()
if isinstance(sol_q, (PolyElement, FracElement)):
sol_q = sol_q.as_expr()
return sol_p / sol_q
def _convert_poly_rat_alg(func, x, x0=0, y0=None, lenics=None, domain=QQ, initcond=True):
"""Converts Polynomials and Rationals to Holonomic.
"""
ispoly = func.is_polynomial()
if not ispoly:
israt = func.is_rational_function()
else:
israt = True
if not (ispoly or israt):
basepoly, ratexp = func.as_base_exp()
if basepoly.is_polynomial() and ratexp.is_Number:
if isinstance(ratexp, Float):
ratexp = nsimplify(ratexp)
m, n = ratexp.p, ratexp.q
is_alg = True
else:
is_alg = False
else:
is_alg = True
if not (ispoly or israt or is_alg):
return None
R = domain.old_poly_ring(x)
_, Dx = DifferentialOperators(R, 'Dx')
# if the function is constant
if not func.has(x):
return HolonomicFunction(Dx, x, 0, [func])
if ispoly:
# differential equation satisfied by polynomial
sol = func * Dx - func.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 == None and x0 == 0 and is_singular:
rep = R.from_sympy(func).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
else:
coeff = list(reversed(rep))[i:]
indicial = i
break
y0 = {indicial:coeff}
elif israt:
order = 1
p, q = func.as_numer_denom()
# differential equation satisfied by rational
sol = p * q * Dx + p * q.diff(x) - q * p.diff(x)
sol = _normalize(sol.listofpoly, sol.parent, negative=False)
elif is_alg:
sol = n * (x / m) * Dx - 1
sol = HolonomicFunction(sol, x).composition(basepoly).annihilator
is_singular = sol.is_singular(x0)
# try to compute the conditions for singular points
if y0 == None and x0 == 0 and is_singular:
rep = R.from_sympy(basepoly).rep
for i, j in enumerate(reversed(rep)):
if j == 0:
continue
else:
coeff = S(j)**ratexp
indicial = S(i) * ratexp
break
y0 = {indicial: [coeff]}
if y0 or not initcond:
return HolonomicFunction(sol, x, x0, y0)
if not lenics:
lenics = sol.order
if sol.is_singular(x0):
r = HolonomicFunction(sol, x, x0)._indicial()
l = list(r)
if len(r) == 1 and r[l[0]] == S(1):
r = l[0]
g = func / (x - x0)**r
singular_ics = _find_conditions(g, x, x0, lenics)
singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
y0 = {r:singular_ics}
return HolonomicFunction(sol, x, x0, y0)
y0 = _find_conditions(func, x, x0, lenics)
while not y0:
x0 += 1
y0 = _find_conditions(func, x, x0, lenics)
return HolonomicFunction(sol, x, x0, y0)
def _convert_meijerint(func, x, initcond=True, domain=QQ):
args = meijerint._rewrite1(func, x)
if args:
fac, po, g, _ = args
else:
return None
# lists for sum of meijerg functions
fac_list = [fac * i[0] for i in g]
t = po.as_base_exp()
s = t[1] if t[0] is x else S(0)
po_list = [s + i[1] for i in g]
G_list = [i[2] for i in g]
# finds meijerg representation of x**s * meijerg(a1 ... ap, b1 ... bq, z)
def _shift(func, s):
z = func.args[-1]
if z.has(I):
z = z.subs(exp_polar, exp)
d = z.collect(x, evaluate=False)
b = list(d)[0]
a = d[b]
t = b.as_base_exp()
b = t[1] if t[0] is x else S(0)
r = s / b
an = (i + r for i in func.args[0][0])
ap = (i + r for i in func.args[0][1])
bm = (i + r for i in func.args[1][0])
bq = (i + r for i in func.args[1][1])
return a**-r, meijerg((an, ap), (bm, bq), z)
coeff, m = _shift(G_list[0], po_list[0])
sol = fac_list[0] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
# add all the meijerg functions after converting to holonomic
for i in range(1, len(G_list)):
coeff, m = _shift(G_list[i], po_list[i])
sol += fac_list[i] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
return sol
def _create_table(table, domain=QQ):
"""
Creates the look-up table. For a similar implementation
see meijerint._create_lookup_table.
"""
def add(formula, annihilator, arg, x0=0, y0=[]):
"""
Adds a formula in the dictionary
"""
table.setdefault(_mytype(formula, x_1), []).append((formula,
HolonomicFunction(annihilator, arg, x0, y0)))
R = domain.old_poly_ring(x_1)
_, Dx = DifferentialOperators(R, 'Dx')
from sympy import (sin, cos, exp, log, erf, sqrt, pi,
sinh, cosh, sinc, erfc, Si, Ci, Shi, erfi)
# add some basic functions
add(sin(x_1), Dx**2 + 1, x_1, 0, [0, 1])
add(cos(x_1), Dx**2 + 1, x_1, 0, [1, 0])
add(exp(x_1), Dx - 1, x_1, 0, 1)
add(log(x_1), Dx + x_1*Dx**2, x_1, 1, [0, 1])
add(erf(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(erfc(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [1, -2/sqrt(pi)])
add(erfi(x_1), -2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
add(sinh(x_1), Dx**2 - 1, x_1, 0, [0, 1])
add(cosh(x_1), Dx**2 - 1, x_1, 0, [1, 0])
add(sinc(x_1), x_1 + 2*Dx + x_1*Dx**2, x_1)
add(Si(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Ci(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
add(Shi(x_1), -x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
def _find_conditions(func, x, x0, order):
y0 = []
for i in range(order):
val = func.subs(x, x0)
if isinstance(val, NaN):
val = limit(func, x, x0)
if (val.is_finite is not None and not val.is_finite) or isinstance(val, NaN):
return None
y0.append(val)
func = func.diff(x)
return y0
| bsd-3-clause |
linebp/pandas | pandas/tests/indexing/test_panel.py | 7 | 7477 | import pytest
from warnings import catch_warnings
import numpy as np
from pandas.util import testing as tm
from pandas import Panel, date_range, DataFrame
class TestPanel(object):
def test_iloc_getitem_panel(self):
with catch_warnings(record=True):
# GH 7189
p = Panel(np.arange(4 * 3 * 2).reshape(4, 3, 2),
items=['A', 'B', 'C', 'D'],
major_axis=['a', 'b', 'c'],
minor_axis=['one', 'two'])
result = p.iloc[1]
expected = p.loc['B']
tm.assert_frame_equal(result, expected)
result = p.iloc[1, 1]
expected = p.loc['B', 'b']
tm.assert_series_equal(result, expected)
result = p.iloc[1, 1, 1]
expected = p.loc['B', 'b', 'two']
assert result == expected
# slice
result = p.iloc[1:3]
expected = p.loc[['B', 'C']]
tm.assert_panel_equal(result, expected)
result = p.iloc[:, 0:2]
expected = p.loc[:, ['a', 'b']]
tm.assert_panel_equal(result, expected)
# list of integers
result = p.iloc[[0, 2]]
expected = p.loc[['A', 'C']]
tm.assert_panel_equal(result, expected)
# neg indicies
result = p.iloc[[-1, 1], [-1, 1]]
expected = p.loc[['D', 'B'], ['c', 'b']]
tm.assert_panel_equal(result, expected)
# dups indicies
result = p.iloc[[-1, -1, 1], [-1, 1]]
expected = p.loc[['D', 'D', 'B'], ['c', 'b']]
tm.assert_panel_equal(result, expected)
# combined
result = p.iloc[0, [True, True], [0, 1]]
expected = p.loc['A', ['a', 'b'], ['one', 'two']]
tm.assert_frame_equal(result, expected)
# out-of-bounds exception
with pytest.raises(IndexError):
p.iloc[tuple([10, 5])]
def f():
p.iloc[0, [True, True], [0, 1, 2]]
pytest.raises(IndexError, f)
# trying to use a label
with pytest.raises(ValueError):
p.iloc[tuple(['j', 'D'])]
# GH
p = Panel(
np.random.rand(4, 3, 2), items=['A', 'B', 'C', 'D'],
major_axis=['U', 'V', 'W'], minor_axis=['X', 'Y'])
expected = p['A']
result = p.iloc[0, :, :]
tm.assert_frame_equal(result, expected)
result = p.iloc[0, [True, True, True], :]
tm.assert_frame_equal(result, expected)
result = p.iloc[0, [True, True, True], [0, 1]]
tm.assert_frame_equal(result, expected)
def f():
p.iloc[0, [True, True, True], [0, 1, 2]]
pytest.raises(IndexError, f)
def f():
p.iloc[0, [True, True, True], [2]]
pytest.raises(IndexError, f)
def test_iloc_panel_issue(self):
with catch_warnings(record=True):
# see gh-3617
p = Panel(np.random.randn(4, 4, 4))
assert p.iloc[:3, :3, :3].shape == (3, 3, 3)
assert p.iloc[1, :3, :3].shape == (3, 3)
assert p.iloc[:3, 1, :3].shape == (3, 3)
assert p.iloc[:3, :3, 1].shape == (3, 3)
assert p.iloc[1, 1, :3].shape == (3, )
assert p.iloc[1, :3, 1].shape == (3, )
assert p.iloc[:3, 1, 1].shape == (3, )
def test_panel_getitem(self):
with catch_warnings(record=True):
# GH4016, date selection returns a frame when a partial string
# selection
ind = date_range(start="2000", freq="D", periods=1000)
df = DataFrame(
np.random.randn(
len(ind), 5), index=ind, columns=list('ABCDE'))
panel = Panel(dict([('frame_' + c, df) for c in list('ABC')]))
test2 = panel.loc[:, "2002":"2002-12-31"]
test1 = panel.loc[:, "2002"]
tm.assert_panel_equal(test1, test2)
# GH8710
# multi-element getting with a list
panel = tm.makePanel()
expected = panel.iloc[[0, 1]]
result = panel.loc[['ItemA', 'ItemB']]
tm.assert_panel_equal(result, expected)
result = panel.loc[['ItemA', 'ItemB'], :, :]
tm.assert_panel_equal(result, expected)
result = panel[['ItemA', 'ItemB']]
tm.assert_panel_equal(result, expected)
result = panel.loc['ItemA':'ItemB']
tm.assert_panel_equal(result, expected)
with catch_warnings(record=True):
result = panel.ix[['ItemA', 'ItemB']]
tm.assert_panel_equal(result, expected)
# with an object-like
# GH 9140
class TestObject:
def __str__(self):
return "TestObject"
obj = TestObject()
p = Panel(np.random.randn(1, 5, 4), items=[obj],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
expected = p.iloc[0]
result = p[obj]
tm.assert_frame_equal(result, expected)
def test_panel_setitem(self):
with catch_warnings(record=True):
# GH 7763
# loc and setitem have setting differences
np.random.seed(0)
index = range(3)
columns = list('abc')
panel = Panel({'A': DataFrame(np.random.randn(3, 3),
index=index, columns=columns),
'B': DataFrame(np.random.randn(3, 3),
index=index, columns=columns),
'C': DataFrame(np.random.randn(3, 3),
index=index, columns=columns)})
replace = DataFrame(np.eye(3, 3), index=range(3), columns=columns)
expected = Panel({'A': replace, 'B': replace, 'C': replace})
p = panel.copy()
for idx in list('ABC'):
p[idx] = replace
tm.assert_panel_equal(p, expected)
p = panel.copy()
for idx in list('ABC'):
p.loc[idx, :, :] = replace
tm.assert_panel_equal(p, expected)
def test_panel_assignment(self):
with catch_warnings(record=True):
# GH3777
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
wp2 = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
# TODO: unused?
# expected = wp.loc[['Item1', 'Item2'], :, ['A', 'B']]
def f():
wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = wp2.loc[
['Item1', 'Item2'], :, ['A', 'B']]
pytest.raises(NotImplementedError, f)
# to_assign = wp2.loc[['Item1', 'Item2'], :, ['A', 'B']]
# wp.loc[['Item1', 'Item2'], :, ['A', 'B']] = to_assign
# result = wp.loc[['Item1', 'Item2'], :, ['A', 'B']]
# tm.assert_panel_equal(result,expected)
| bsd-3-clause |
jakobworldpeace/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
cloud-fan/spark | python/pyspark/sql/tests/test_pandas_udf_typehints.py | 22 | 9603 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import inspect
from typing import Union, Iterator, Tuple
from pyspark.sql.functions import mean, lit
from pyspark.testing.sqlutils import ReusedSQLTestCase, \
have_pandas, have_pyarrow, pandas_requirement_message, \
pyarrow_requirement_message
from pyspark.sql.pandas.typehints import infer_eval_type
from pyspark.sql.pandas.functions import pandas_udf, PandasUDFType
from pyspark.sql import Row
if have_pandas:
import pandas as pd
import numpy as np
from pandas.testing import assert_frame_equal
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
def func(col: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, col1: pd.Series) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.DataFrame, *args: pd.Series) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: pd.Series, *, col2: pd.DataFrame) -> pd.DataFrame:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> pd.Series:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR)
def test_type_annotation_scalar_iter(self):
def func(iter: Iterator[pd.Series]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, pd.Series]]) -> Iterator[pd.DataFrame]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(iter: Iterator[Tuple[pd.DataFrame, ...]]) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def func(
iter: Iterator[Tuple[Union[pd.DataFrame, pd.Series], ...]]
) -> Iterator[pd.Series]:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.SCALAR_ITER)
def test_type_annotation_group_agg(self):
def func(col: pd.Series) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, col1: pd.Series) -> int:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.DataFrame, *args: pd.Series) -> Row:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame) -> str:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: pd.Series, *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def func(col: Union[pd.Series, pd.DataFrame], *, col2: pd.DataFrame) -> float:
pass
self.assertEqual(
infer_eval_type(inspect.signature(func)), PandasUDFType.GROUPED_AGG)
def test_type_annotation_negative(self):
def func(col: str) -> pd.Series:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.DataFrame, col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*int",
infer_eval_type, inspect.signature(func))
def func(col: Union[pd.DataFrame, str], col1: int) -> pd.DataFrame:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*str",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series) -> Tuple[pd.DataFrame]:
pass
self.assertRaisesRegex(
NotImplementedError,
"Unsupported signature.*Tuple",
infer_eval_type, inspect.signature(func))
def func(col, *args: pd.Series) -> pd.Series:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *args: pd.Series, **kwargs: pd.DataFrame):
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def func(col: pd.Series, *, col2) -> pd.DataFrame:
pass
self.assertRaisesRegex(
ValueError,
"should be specified.*Series",
infer_eval_type, inspect.signature(func))
def test_scalar_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(v: Union[pd.Series, pd.DataFrame]) -> pd.Series:
return v + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_scalar_iter_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def plus_one(itr: Iterator[pd.Series]) -> Iterator[pd.Series]:
for s in itr:
yield s + 1
plus_one = pandas_udf("long")(plus_one)
actual = df.select(plus_one(df.v).alias("plus_one"))
expected = df.selectExpr("(v + 1) as plus_one")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_group_agg_udf_type_hint(self):
df = self.spark.range(10).selectExpr("id", "id as v")
def weighted_mean(v: pd.Series, w: pd.Series) -> float:
return np.average(v, weights=w)
weighted_mean = pandas_udf("double")(weighted_mean)
actual = df.groupby('id').agg(weighted_mean(df.v, lit(1.0))).sort('id')
expected = df.groupby('id').agg(mean(df.v).alias('weighted_mean(v, 1.0)')).sort('id')
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_group_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(v: pd.DataFrame) -> pd.DataFrame:
return v + 1
actual = df.groupby('id').applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_cogroup_apply_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(left: pd.DataFrame, right: pd.DataFrame) -> pd.DataFrame:
return left + 1
actual = df.groupby('id').cogroup(
self.spark.range(10).groupby("id")
).applyInPandas(pandas_plus_one, schema=df.schema).sort('id')
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
def test_ignore_type_hint_in_map_in_pandas(self):
df = self.spark.range(10)
def pandas_plus_one(iter: Iterator[pd.DataFrame]) -> Iterator[pd.DataFrame]:
return map(lambda v: v + 1, iter)
actual = df.mapInPandas(pandas_plus_one, schema=df.schema)
expected = df.selectExpr("id + 1 as id")
assert_frame_equal(expected.toPandas(), actual.toPandas())
if __name__ == "__main__":
from pyspark.sql.tests.test_pandas_udf_typehints import * # noqa: #401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
FEPanalysis/alchemical-analysis-OLD | alchemical_analysis/alchemical_analysis.py | 1 | 58462 | #!/usr/bin/env python
######################################################################
# Alchemical Analysis: An open tool implementing some recommended practices for analyzing alchemical free energy calculations
# Copyright 2011-2015 UC Irvine and the Authors
#
# Authors: Pavel Klimovich, Michael Shirts and David Mobley
#
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, see <http://www.gnu.org/licenses/>.
######################################################################
#===================================================================================================
# IMPORTS
#===================================================================================================
## Not a built-in module. Will be called from main, whenever needed. ##
## import pymbar Multistate Bennett Acceptance Ratio estimator. ##
import sys
import numpy
import pickle # for full-precision data storage
from optparse import OptionParser # for parsing command-line options
import os # for os interface
import time as ttt_time # for timing
import pdb # for debugging
#===================================================================================================
# INPUT OPTIONS
#===================================================================================================
parser = OptionParser()
parser.add_option('-a', '--software', dest = 'software', help = 'Package\'s name the data files come from: Gromacs, Sire, Desmond, or AMBER. Default: Gromacs.', default = 'Gromacs')
parser.add_option('-c', '--cfm', dest = 'bCFM', help = 'The Curve-Fitting-Method-based consistency inspector. Default: False.', default = False, action = 'store_true')
parser.add_option('-d', '--dir', dest = 'datafile_directory', help = 'Directory in which data files are stored. Default: Current directory.', default = '.')
parser.add_option('-f', '--forwrev', dest = 'bForwrev', help = 'Plotting the free energy change as a function of time in both directions. The number of time points (an integer) is to be followed the flag. Default: 0', default = 0, type=int)
parser.add_option('-g', '--breakdown', dest = 'breakdown', help = 'Plotting the free energy differences evaluated for each pair of adjacent states for all methods. Default: False.', default = False, action = 'store_true')
parser.add_option('-i', '--threshold', dest = 'uncorr_threshold', help = 'Proceed with correlated samples if the number of uncorrelated samples is found to be less than this number. If 0 is given, the time series analysis will not be performed at all. Default: 50.', default = 50, type=int)
parser.add_option('-k', '--koff', dest = 'bSkipLambdaIndex', help = 'Give a string of lambda indices separated by \'-\' and they will be removed from the analysis. (Another approach is to have only the files of interest present in the directory). Default: None.', default = '')
parser.add_option('-m', '--methods', dest = 'methods', help = 'A list of the methods to esitimate the free energy with. Default: [TI, TI-CUBIC, DEXP, IEXP, BAR, MBAR]. To add/remove methods to the above list provide a string formed of the method strings preceded with +/-. For example, \'-ti_cubic+gdel\' will turn methods into [TI, DEXP, IEXP, BAR, MBAR, GDEL]. \'ti_cubic+gdel\', on the other hand, will call [TI-CUBIC, GDEL]. \'all\' calls the full list of supported methods [TI, TI-CUBIC, DEXP, IEXP, GINS, GDEL, BAR, UBAR, RBAR, MBAR].', default = '')
parser.add_option('-n', '--uncorr', dest = 'uncorr', help = 'The observable to be used for the autocorrelation analysis; either \'dhdl\' (default) or \'dE\'. In the latter case the energy differences dE_{i,i+1} (dE_{i,i-1} for the last lambda) are used.', default = 'dhdl')
parser.add_option('-o', '--out', dest = 'output_directory', help = 'Directory in which the output files produced by this script will be stored. Default: Same as datafile_directory.', default = '')
parser.add_option('-p', '--prefix', dest = 'prefix', help = 'Prefix for datafile sets, i.e.\'dhdl\' (default).', default = 'dhdl')
parser.add_option('-q', '--suffix', dest = 'suffix', help = 'Suffix for datafile sets, i.e. \'xvg\' (default).', default = 'xvg')
parser.add_option('-r', '--decimal', dest = 'decimal', help = 'The number of decimal places the free energies are to be reported with. No worries, this is for the text output only; the full-precision data will be stored in \'results.pickle\'. Default: 3.', default = 3, type=int)
parser.add_option('-s', '--skiptime', dest = 'equiltime', help = 'Discard data prior to this specified time as \'equilibration\' data. Units picoseconds. Default: 0 ps.', default = 0, type=float)
parser.add_option('-t', '--temperature', dest = 'temperature', help = "Temperature in K. Default: 298 K.", default = 298, type=float)
parser.add_option('-u', '--units', dest = 'units', help = 'Units to report energies: \'kJ\', \'kcal\', and \'kBT\'. Default: \'kJ\'', default = 'kJ')
parser.add_option('-v', '--verbose', dest = 'verbose', help = 'Verbose option. Default: False.', default = False, action = 'store_true')
parser.add_option('-w', '--overlap', dest = 'overlap', help = 'Print out and plot the overlap matrix. Default: False.', default = False, action = 'store_true')
parser.add_option('-x', '--ignoreWL', dest = 'bIgnoreWL', help = 'Do not check whether the WL weights are equilibrated. No log file needed as an accompanying input.', default = False, action = 'store_true')
parser.add_option('-y', '--tolerance', dest = 'relative_tolerance', help = "Convergence criterion for the energy estimates with BAR and MBAR. Default: 1e-10.", default = 1e-10, type=float)
parser.add_option('-z', '--initialize', dest = 'init_with', help = 'The initial MBAR free energy guess; either \'BAR\' or \'zeroes\'. Default: \'BAR\'.', default = 'BAR')
#===================================================================================================
# FUNCTIONS: Miscellanea.
#===================================================================================================
def getMethods(string):
"""Returns a list of the methods the free energy is to be estimated with."""
all_methods = ['TI','TI-CUBIC','DEXP','IEXP','GINS','GDEL','BAR','UBAR','RBAR','MBAR']
methods = ['TI','TI-CUBIC','DEXP','IEXP','BAR','MBAR']
if (numpy.array(['Sire']) == P.software.title()).any():
methods = ['TI','TI-CUBIC']
if not string:
return methods
def addRemove(string):
operation = string[0]
string = string[1:]+'+'
method = ''
for c in string:
if c.isalnum():
method += c
elif c=='_':
method += '-'
elif (c=='-' or c=='+'):
if method in all_methods:
if operation=='-':
if method in methods:
methods.remove(method)
else:
if not method in methods:
methods.append(method)
method = ''
operation = c
else:
parser.error("\nThere is no '%s' in the list of supported methods." % method)
else:
parser.error("\nUnknown character '%s' in the method string is found." % c)
return
if string=='ALL':
methods = all_methods
else:
primo = string[0]
if primo.isalpha():
methods = string.replace('+', ' ').replace('_', '-').split()
methods = [m for m in methods if m in all_methods]
elif primo=='+' or primo=='-':
addRemove(string)
else:
parser.error("\nUnknown character '%s' in the method string is found." % primo)
return methods
def checkUnitsAndMore(units):
kB = 1.3806488*6.02214129/1000.0 # Boltzmann's constant (kJ/mol/K).
beta = 1./(kB*P.temperature)
b_kcal = (numpy.array(['Sire', 'Amber']) == P.software.title()).any()
if units == 'kJ':
beta_report = beta/4.184**b_kcal
units = '(kJ/mol)'
elif units == 'kcal':
beta_report = 4.184**(not b_kcal)*beta
units = '(kcal/mol)'
elif units == 'kBT':
beta_report = 1
units = '(k_BT)'
else:
parser.error('\nI don\'t understand the unit type \'%s\': the only options \'kJ\', \'kcal\', and \'kBT\'' % units)
if not P.output_directory:
P.output_directory = P.datafile_directory
if P.overlap:
if not 'MBAR' in P.methods:
parser.error("\nMBAR is not in 'methods'; can't plot the overlap matrix.")
return units, beta, beta_report
def timeStatistics(stime):
etime = ttt_time.time()
tm = int((etime-stime)/60.)
th = int(tm/60.)
ts = '%.2f' % (etime-stime-60*(tm+60*th))
return th, tm, ts, ttt_time.asctime()
#===================================================================================================
# FUNCTIONS: The autocorrelation analysis.
#===================================================================================================
def uncorrelate(sta, fin, do_dhdl=False):
"""Identifies uncorrelated samples and updates the arrays of the reduced potential energy and dhdlt retaining data entries of these samples only.
'sta' and 'fin' are the starting and final snapshot positions to be read, both are arrays of dimension K."""
if not P.uncorr_threshold:
if P.software.title()=='Sire':
return dhdlt, nsnapshots, None
return dhdlt, nsnapshots, u_klt
import pymbar ## this is not a built-in module ##
u_kln = numpy.zeros([K,K,max(fin-sta)], numpy.float64) # u_kln[k,m,n] is the reduced potential energy of uncorrelated sample index n from state k evaluated at state m
N_k = numpy.zeros(K, int) # N_k[k] is the number of uncorrelated samples from state k
g = numpy.zeros(K,float) # autocorrelation times for the data
if do_dhdl:
dhdl = numpy.zeros([K,n_components,max(fin-sta)], float) #dhdl is value for dhdl for each component in the file at each time.
print "\n\nNumber of correlated and uncorrelated samples:\n\n%6s %12s %12s %12s\n" % ('State', 'N', 'N_k', 'N/N_k')
UNCORR_OBSERVABLE = {'Gromacs':P.uncorr, 'Amber':'dhdl', 'Sire':'dhdl', 'Desmond':'dE'}[P.software.title()]
if UNCORR_OBSERVABLE == 'dhdl':
#Uncorrelate based on dhdl values at a given lambda
for k in range(K):
# Sum up over the energy components; notice, that only the relevant data is being used in the third dimension.
dhdl_sum = numpy.sum(dhdlt[k,:,sta[k]:fin[k]], axis=0)
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
# (alternatively, could use the energy differences -- here, we will use total dhdl).
g[k] = pymbar.timeseries.statisticalInefficiency(dhdl_sum)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dhdl_sum, g=g[k])) # indices of uncorrelated samples
N = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N < P.uncorr_threshold:
if do_dhdl:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k)
indices = sta[k] + numpy.arange(len(dhdl_sum))
N = len(indices)
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
print "%6s %12s %12s %12.2f" % (k, fin[k], N_k[k], g[k])
for n in range(n_components):
dhdl[k,n,0:N] = dhdlt[k,n,indices]
if UNCORR_OBSERVABLE == 'dE':
#Decorrelate based on energy differences between lambdas
for k in range(K):
# Sum up over the energy components as above using only the relevant data; here we use energy differences
# Determine indices of uncorrelated samples from potential autocorrelation analysis at state k
dE = u_klt[k,k+1,sta[k]:fin[k]] if not k==K-1 else u_klt[k,k-1,sta[k]:fin[k]]
g[k] = pymbar.timeseries.statisticalInefficiency(dE)
indices = sta[k] + numpy.array(pymbar.timeseries.subsampleCorrelatedData(dE, g=g[k])) # indices of uncorrelated samples
N = len(indices) # number of uncorrelated samples
# Handle case where we end up with too few.
if N < P.uncorr_threshold:
print "WARNING: Only %s uncorrelated samples found at lambda number %s; proceeding with analysis using correlated samples..." % (N, k)
indices = sta[k] + numpy.arange(len(dE))
N = len(indices)
N_k[k] = N # Store the number of uncorrelated samples from state k.
if not (u_klt is None):
for l in range(K):
u_kln[k,l,0:N] = u_klt[k,l,indices]
if do_dhdl:
return (dhdl, N_k, u_kln)
return (N_k, u_kln)
#===================================================================================================
# FUNCTIONS: The MBAR workhorse.
#===================================================================================================
def estimatewithMBAR(u_kln, N_k, reltol, regular_estimate=False):
"""Computes the MBAR free energy given the reduced potential and the number of relevant entries in it."""
def plotOverlapMatrix(O):
"""Plots the probability of observing a sample from state i (row) in state j (column).
For convenience, the neigboring state cells are fringed in bold."""
max_prob = O.max()
fig = pl.figure(figsize=(K/2.,K/2.))
fig.add_subplot(111, frameon=False, xticks=[], yticks=[])
for i in range(K):
if i!=0:
pl.axvline(x=i, ls='-', lw=0.5, color='k', alpha=0.25)
pl.axhline(y=i, ls='-', lw=0.5, color='k', alpha=0.25)
for j in range(K):
if O[j,i] < 0.005:
ii = ''
else:
ii = ("%.2f" % O[j,i])[1:]
alf = O[j,i]/max_prob
pl.fill_between([i,i+1], [K-j,K-j], [K-(j+1),K-(j+1)], color='k', alpha=alf)
pl.annotate(ii, xy=(i,j), xytext=(i+0.5,K-(j+0.5)), size=8, textcoords='data', va='center', ha='center', color=('k' if alf < 0.5 else 'w'))
if P.bSkipLambdaIndex:
ks = [int(l) for l in P.bSkipLambdaIndex.split('-')]
ks = numpy.delete(numpy.arange(K+len(ks)), ks)
else:
ks = range(K)
for i in range(K):
pl.annotate(ks[i], xy=(i+0.5, 1), xytext=(i+0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')
pl.annotate(ks[i], xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K-(i+0.5)), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')
pl.annotate('$\lambda$', xy=(-0.5, K-(j+0.5)), xytext=(-0.5, K+0.5), size=10, textcoords=('data', 'data'), va='center', ha='center', color='k')
pl.plot([0,K], [0,0], 'k-', lw=4.0, solid_capstyle='butt')
pl.plot([K,K], [0,K], 'k-', lw=4.0, solid_capstyle='butt')
pl.plot([0,0], [0,K], 'k-', lw=2.0, solid_capstyle='butt')
pl.plot([0,K], [K,K], 'k-', lw=2.0, solid_capstyle='butt')
cx = sorted(2*range(K+1))
cy = sorted(2*range(K+1), reverse=True)
pl.plot(cx[2:-1], cy[1:-2], 'k-', lw=2.0)
pl.plot(numpy.array(cx[2:-3])+1, cy[1:-4], 'k-', lw=2.0)
pl.plot(cx[1:-2], numpy.array(cy[:-3])-1, 'k-', lw=2.0)
pl.plot(cx[1:-4], numpy.array(cy[:-5])-2, 'k-', lw=2.0)
pl.xlim(-1, K)
pl.ylim(0, K+1)
pl.savefig(os.path.join(P.output_directory, 'O_MBAR.pdf'), bbox_inches='tight', pad_inches=0.0)
pl.close(fig)
return
if regular_estimate:
print "\nEstimating the free energy change with MBAR..."
MBAR = pymbar.mbar.MBAR(u_kln, N_k, verbose = P.verbose, relative_tolerance = reltol, initialize = P.init_with)
# Get matrix of dimensionless free energy differences and uncertainty estimate.
(Deltaf_ij, dDeltaf_ij, theta_ij ) = MBAR.getFreeEnergyDifferences(uncertainty_method='svd-ew', return_theta = True)
if P.verbose:
print "Matrix of free energy differences\nDeltaf_ij:\n%s\ndDeltaf_ij:\n%s" % (Deltaf_ij, dDeltaf_ij)
if regular_estimate:
if P.overlap:
print "The overlap matrix is..."
O = MBAR.computeOverlap()[2]
for k in range(K):
line = ''
for l in range(K):
line += ' %5.2f ' % O[k, l]
print line
plotOverlapMatrix(O)
print "\nFor a nicer figure look at 'O_MBAR.pdf'"
return (Deltaf_ij, dDeltaf_ij)
return (Deltaf_ij[0,K-1]/P.beta_report, dDeltaf_ij[0,K-1]/P.beta_report)
#===================================================================================================
# FUNCTIONS: Thermodynamic integration.
#===================================================================================================
class naturalcubicspline:
def __init__(self, x):
# define some space
L = len(x)
H = numpy.zeros([L,L],float)
M = numpy.zeros([L,L],float)
BW = numpy.zeros([L,L],float)
AW = numpy.zeros([L,L],float)
DW = numpy.zeros([L,L],float)
h = x[1:L]-x[0:L-1]
ih = 1.0/h
# define the H and M matrix, from p. 371 "applied numerical methods with matlab, Chapra"
H[0,0] = 1
H[L-1,L-1] = 1
for i in range(1,L-1):
H[i,i] = 2*(h[i-1]+h[i])
H[i,i-1] = h[i-1]
H[i,i+1] = h[i]
M[i,i] = -3*(ih[i-1]+ih[i])
M[i,i-1] = 3*(ih[i-1])
M[i,i+1] = 3*(ih[i])
CW = numpy.dot(numpy.linalg.inv(H),M) # this is the matrix translating c to weights in f.
# each row corresponds to the weights for each c.
# from CW, define the other coefficient matrices
for i in range(0,L-1):
BW[i,:] = -(h[i]/3)*(2*CW[i,:]+CW[i+1,:])
BW[i,i] += -ih[i]
BW[i,i+1] += ih[i]
DW[i,:] = (ih[i]/3)*(CW[i+1,:]-CW[i,:])
AW[i,i] = 1
# Make copies of the arrays we'll be using in the future.
self.x = x.copy()
self.AW = AW.copy()
self.BW = BW.copy()
self.CW = CW.copy()
self.DW = DW.copy()
# find the integrating weights
self.wsum = numpy.zeros([L],float)
self.wk = numpy.zeros([L-1,L],float)
for k in range(0,L-1):
w = DW[k,:]*(h[k]**4)/4.0 + CW[k,:]*(h[k]**3)/3.0 + BW[k,:]*(h[k]**2)/2.0 + AW[k,:]*(h[k])
self.wk[k,:] = w
self.wsum += w
def interpolate(self,y,xnew):
if len(self.x) != len(y):
parser.error("\nThe length of 'y' should be consistent with that of 'self.x'. I cannot perform linear algebra operations.")
# get the array of actual coefficients by multiplying the coefficient matrix by the values
a = numpy.dot(self.AW,y)
b = numpy.dot(self.BW,y)
c = numpy.dot(self.CW,y)
d = numpy.dot(self.DW,y)
N = len(xnew)
ynew = numpy.zeros([N],float)
for i in range(N):
# Find the index of 'xnew[i]' it would have in 'self.x'.
j = numpy.searchsorted(self.x, xnew[i]) - 1
lamw = xnew[i] - self.x[j]
ynew[i] = d[j]*lamw**3 + c[j]*lamw**2 + b[j]*lamw + a[j]
# Preserve the terminal points.
ynew[0] = y[0]
ynew[-1] = y[-1]
return ynew
def TIprelim(lv):
# Lambda vectors spacing.
dlam = numpy.diff(lv, axis=0)
lchange = numpy.zeros([K,n_components],bool) # booleans for which lambdas are changing
for j in range(n_components):
# need to identify range over which lambda doesn't change, and not interpolate over that range.
for k in range(K-1):
if (lv[k+1,j]-lv[k,j] > 0):
lchange[k,j] = True
lchange[k+1,j] = True
if 'ave_dhdl' in globals() and 'std_dhdl' in globals():
return lchange, dlam, globals()['ave_dhdl'], globals()['std_dhdl']
# Compute <dhdl> and std(dhdl) for each component, for each lambda; multiply them by beta to make unitless.
ave_dhdl = numpy.zeros([K,n_components],float)
std_dhdl = numpy.zeros([K,n_components],float)
for k in range(K):
ave_dhdl[k,:] = P.beta*numpy.average(dhdl[k,:,0:N_k[k]],axis=1)
std_dhdl[k,:] = P.beta*numpy.std(dhdl[k,:,0:N_k[k]],axis=1)/numpy.sqrt(N_k[k]-1)
return lchange, dlam, ave_dhdl, std_dhdl
def getSplines(lchange):
# construct a map back to the original components
mapl = numpy.zeros([K,n_components],int) # map back to the original k from the components
for j in range(n_components):
incr = 0
for k in range(K):
if (lchange[k,j]):
mapl[k,j] += incr
incr +=1
# put together the spline weights for the different components
cubspl = list()
for j in range(n_components):
lv_lchange = lv[lchange[:,j],j]
if len(lv_lchange) == 0: # handle the all-zero lv column
cubspl.append(0)
else:
spl = naturalcubicspline(lv_lchange)
cubspl.append(spl)
return cubspl, mapl
#===================================================================================================
# FUNCTIONS: This one estimates dF and ddF for all pairs of adjacent states and stores them.
#===================================================================================================
def estimatePairs():
print ("Estimating the free energy change with %s..." % ', '.join(P.methods)).replace(', MBAR', '')
df_allk = list(); ddf_allk = list()
for k in range(K-1):
df = dict(); ddf = dict()
for name in P.methods:
if name == 'TI':
#===================================================================================================
# Estimate free energy difference with TI; interpolating with the trapezoidal rule.
#===================================================================================================
df['TI'] = 0.5*numpy.dot(dlam[k],(ave_dhdl[k]+ave_dhdl[k+1]))
ddf['TI'] = 0.5*numpy.sqrt(numpy.dot(dlam[k]**2,std_dhdl[k]**2+std_dhdl[k+1]**2))
if name == 'TI-CUBIC':
#===================================================================================================
# Estimate free energy difference with TI; interpolating with the natural cubic splines.
#===================================================================================================
df['TI-CUBIC'], ddf['TI-CUBIC'] = 0, 0
for j in range(n_components):
if dlam[k,j] > 0:
lj = lchange[:,j]
df['TI-CUBIC'] += numpy.dot(cubspl[j].wk[mapl[k,j]],ave_dhdl[lj,j])
ddf['TI-CUBIC'] += numpy.dot(cubspl[j].wk[mapl[k,j]]**2,std_dhdl[lj,j]**2)
ddf['TI-CUBIC'] = numpy.sqrt(ddf['TI-CUBIC'])
if any(name == m for m in ['DEXP', 'GDEL', 'BAR', 'UBAR', 'RBAR']):
w_F = u_kln[k,k+1,0:N_k[k]] - u_kln[k,k,0:N_k[k]]
if name == 'DEXP':
#===================================================================================================
# Estimate free energy difference with Forward-direction EXP (in this case, Deletion from solvent).
#===================================================================================================
(df['DEXP'], ddf['DEXP']) = pymbar.exp.EXP(w_F)
if name == 'GDEL':
#===================================================================================================
# Estimate free energy difference with a Gaussian estimate of EXP (in this case, deletion from solvent)
#===================================================================================================
(df['GDEL'], ddf['GDEL']) = pymbar.exp.EXPGauss(w_F)
if any(name == m for m in ['IEXP', 'GINS', 'BAR', 'UBAR', 'RBAR']):
w_R = u_kln[k+1,k,0:N_k[k+1]] - u_kln[k+1,k+1,0:N_k[k+1]]
if name == 'IEXP':
#===================================================================================================
# Estimate free energy difference with Reverse-direction EXP (in this case, insertion into solvent).
#===================================================================================================
(rdf,rddf) = pymbar.exp.EXP(w_R)
(df['IEXP'], ddf['IEXP']) = (-rdf,rddf)
if name == 'GINS':
#===================================================================================================
# Estimate free energy difference with a Gaussian estimate of EXP (in this case, insertion into solvent)
#===================================================================================================
(rdf,rddf) = pymbar.exp.EXPGauss(w_R)
(df['GINS'], ddf['GINS']) = (-rdf,rddf)
if name == 'BAR':
#===================================================================================================
# Estimate free energy difference with BAR; use w_F and w_R computed above.
#===================================================================================================
(df['BAR'], ddf['BAR']) = pymbar.bar.BAR(w_F, w_R, relative_tolerance=P.relative_tolerance, verbose = P.verbose)
if name == 'UBAR':
#===================================================================================================
# Estimate free energy difference with unoptimized BAR -- assume dF is zero, and just do one evaluation
#===================================================================================================
(df['UBAR'], ddf['UBAR']) = pymbar.bar.BAR(w_F, w_R, verbose = P.verbose,iterated_solution=False)
if name == 'RBAR':
#===================================================================================================
# Estimate free energy difference with Unoptimized BAR over range of free energy values, and choose the one
# that is self consistently best.
#===================================================================================================
min_diff = 1E6
best_udf = 0
for trial_udf in range(-10,10,1):
(udf, uddf) = pymbar.bar.BAR(w_F, w_R, DeltaF=trial_udf, iterated_solution=False, verbose=P.verbose)
diff = numpy.abs(udf - trial_udf)
if (diff < min_diff):
best_udf = udf
best_uddf = uddf
min_diff = diff
(df['RBAR'], ddf['RBAR']) = (best_udf,best_uddf)
if name == 'MBAR':
#===================================================================================================
# Store the MBAR free energy difference (already estimated above) properly, i.e. by state.
#===================================================================================================
(df['MBAR'], ddf['MBAR']) = Deltaf_ij[k,k+1], dDeltaf_ij[k,k+1]
df_allk = numpy.append(df_allk,df)
ddf_allk = numpy.append(ddf_allk,ddf)
return df_allk, ddf_allk
#===================================================================================================
# FUNCTIONS: All done with calculations; summarize and print stats.
#===================================================================================================
def totalEnergies():
# Count up the charging states.
startcoul = 0
endcoul = 0
startvdw = 0
endvdw = 0
for lv_n in ['vdw','coul','fep']:
if lv_n in P.lv_names:
_ndx_char = P.lv_names.index(lv_n)
lv_char = lv[:, _ndx_char]
if not (lv_char == lv_char[0]).all():
if lv_n == 'vdw':
endvdw = (lv_char != 1).sum()
if lv_n == 'fep':
endcoul = (lv_char != 1).sum()
ndx_char = P.lv_names.index(lv_n)
if lv_n == 'coul':
endcoul = (lv_char != 1).sum()
ndx_char = P.lv_names.index(lv_n)
# Figure out if coulomb section comes before or after vdw section
if endcoul > endvdw:
startcoul = endvdw
startvdw = 0
else:
startcoul = 0
startvdw = endcoul
segments = ['Coulomb' , 'vdWaals' , 'TOTAL']
segmentstarts = [startcoul , startvdw , 0 ]
segmentends = [endcoul , endvdw , K-1 ]
dFs = []
ddFs = []
# Perform the energy segmentation; be pedantic about the TI cumulative ddF's (see Section 3.1 of the paper).
for i in range(len(segments)):
segment = segments[i]; segstart = segmentstarts[i]; segend = segmentends[i]
dF = dict.fromkeys(P.methods, 0)
ddF = dict.fromkeys(P.methods, 0)
for name in P.methods:
if name == 'MBAR':
dF['MBAR'] = Deltaf_ij[segstart, segend]
ddF['MBAR'] = dDeltaf_ij[segstart, segend]
elif name[0:2] == 'TI':
for k in range(segstart, segend):
dF[name] += df_allk[k][name]
if segment == 'Coulomb':
jlist = [ndx_char] if endcoul>0 else []
elif segment == 'vdWaals':
jlist = []
elif segment == 'TOTAL':
jlist = range(n_components)
for j in jlist:
lj = lchange[:,j]
if not (lj == False).all(): # handle the all-zero lv column
if name == 'TI-CUBIC':
ddF[name] += numpy.dot((cubspl[j].wsum)**2,std_dhdl[lj,j]**2)
elif name == 'TI':
h = numpy.trim_zeros(dlam[:,j])
wsum = 0.5*(numpy.append(h,0) + numpy.append(0,h))
ddF[name] += numpy.dot(wsum**2,std_dhdl[lj,j]**2)
ddF[name] = numpy.sqrt(ddF[name])
else:
for k in range(segstart,segend):
dF[name] += df_allk[k][name]
ddF[name] += (ddf_allk[k][name])**2
ddF[name] = numpy.sqrt(ddF[name])
dFs.append(dF)
ddFs.append(ddF)
for name in P.methods: # 'vdWaals' = 'TOTAL' - 'Coulomb'
ddFs[1][name] = (ddFs[2][name]**2 - ddFs[0][name]**2)**0.5
# Display results.
def printLine(str1, str2, d1=None, d2=None):
"""Fills out the results table linewise."""
print str1,
text = str1
for name in P.methods:
if d1 == 'plain':
print str2,
text += ' ' + str2
if d1 == 'name':
print str2 % (name, P.units),
text += ' ' + str2 % (name, P.units)
if d1 and d2:
print str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report),
text += ' ' + str2 % (d1[name]/P.beta_report, d2[name]/P.beta_report)
print ''
outtext.append(text + '\n')
return
d = P.decimal
str_dash = (d+7 + 6 + d+2)*'-'
str_dat = ('X%d.%df +- X%d.%df' % (d+7, d, d+2, d)).replace('X', '%')
str_names = ('X%ds X-%ds' % (d+6, d+8)).replace('X', '%')
outtext = []
printLine(12*'-', str_dash, 'plain')
printLine('%-12s' % ' States', str_names, 'name')
printLine(12*'-', str_dash, 'plain')
for k in range(K-1):
printLine('%4d -- %-4d' % (k, k+1), str_dat, df_allk[k], ddf_allk[k])
printLine(12*'-', str_dash, 'plain')
remark = ["", "A remark on the energy components interpretation: ",
" 'vdWaals' is computed as 'TOTAL' - 'Coulomb', where ",
" 'Coulomb' is found as the free energy change between ",
" the states defined by the lambda vectors (0,0,...,0) ",
" and (1,0,...,0), the only varying vector component ",
" being either 'coul-lambda' or 'fep-lambda'. "]
w = 12 + (1+len(str_dash))*len(P.methods)
str_align = '{:I^%d}' % w
if len(P.lv_names)>1:
for i in range(len(segments)):
printLine('%9s: ' % segments[i], str_dat, dFs[i], ddFs[i])
for i in remark:
print str_align.replace('I', ' ').format(i)
else:
printLine('%9s: ' % segments[-1], str_dat, dFs[-1], ddFs[-1])
# Store results.
outfile = open(os.path.join(P.output_directory, 'results.txt'), 'w')
outfile.write('# Command line was: %s\n\n' % ' '.join(sys.argv) )
outfile.writelines(outtext)
outfile.close()
P.datafile_directory = os.getcwd()
P.when_analyzed = ttt_time.asctime()
P.ddf_allk = ddf_allk
P.df_allk = df_allk
P.ddFs = ddFs
P.dFs = dFs
outfile = open(os.path.join(P.output_directory, 'results.pickle'), 'w')
pickle.dump(P, outfile)
outfile.close()
print '\n'+w*'*'
for i in [" The above table has been stored in ", " "+P.output_directory+"/results.txt ",
" while the full-precision data ", " (along with the simulation profile) in ", " "+P.output_directory+"/results.pickle "]:
print str_align.format('{:^40}'.format(i))
print w*'*'
return
#===================================================================================================
# FUNCTIONS: Free energy change vs. simulation time. Called by the -f flag.
#===================================================================================================
def dF_t():
def plotdFvsTime(f_ts, r_ts, F_df, R_df, F_ddf, R_ddf):
"""Plots the free energy change computed using the equilibrated snapshots between the proper target time frames (f_ts and r_ts)
in both forward (data points are stored in F_df and F_ddf) and reverse (data points are stored in R_df and R_ddf) directions."""
fig = pl.figure(figsize=(8,6))
ax = fig.add_subplot(111)
pl.setp(ax.spines['bottom'], color='#D2B9D3', lw=3, zorder=-2)
pl.setp(ax.spines['left'], color='#D2B9D3', lw=3, zorder=-2)
for dire in ['top', 'right']:
ax.spines[dire].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
max_fts = max(f_ts)
rr_ts = [aa/max_fts for aa in f_ts[::-1]]
f_ts = [aa/max_fts for aa in f_ts]
r_ts = [aa/max_fts for aa in r_ts]
line0 = pl.fill_between([r_ts[0], f_ts[-1]], R_df[0]-R_ddf[0], R_df[0]+R_ddf[0], color='#D2B9D3', zorder=-5)
for i in range(len(f_ts)):
line1 = pl.plot([f_ts[i]]*2, [F_df[i]-F_ddf[i], F_df[i]+F_ddf[i]], color='#736AFF', ls='-', lw=3, solid_capstyle='round', zorder=1)
line11 = pl.plot(f_ts, F_df, color='#736AFF', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#736AFF', ms=12, zorder=2)
for i in range(len(rr_ts)):
line2 = pl.plot([rr_ts[i]]*2, [R_df[i]-R_ddf[i], R_df[i]+R_ddf[i]], color='#C11B17', ls='-', lw=3, solid_capstyle='round', zorder=3)
line22 = pl.plot(rr_ts, R_df, color='#C11B17', ls='-', lw=3, marker='o', mfc='w', mew=2.5, mec='#C11B17', ms=12, zorder=4)
pl.xlim(r_ts[0], f_ts[-1])
pl.xticks(r_ts[::2] + f_ts[-1:], fontsize=10)
pl.yticks(fontsize=10)
leg = pl.legend((line1[0], line2[0]), (r'$Forward$', r'$Reverse$'), loc=9, prop=FP(size=18), frameon=False)
pl.xlabel(r'$\mathrm{Fraction\/of\/the\/simulation\/time}$', fontsize=16, color='#151B54')
pl.ylabel(r'$\mathrm{\Delta G\/%s}$' % P.units, fontsize=16, color='#151B54')
pl.xticks(f_ts, ['%.2f' % i for i in f_ts])
pl.tick_params(axis='x', color='#D2B9D3')
pl.tick_params(axis='y', color='#D2B9D3')
pl.savefig(os.path.join(P.output_directory, 'dF_t.pdf'))
pl.close(fig)
return
if not 'MBAR' in P.methods:
parser.error("\nCurrent version of the dF(t) analysis works with MBAR only and the method is not found in the list.")
if not (P.snap_size[0] == numpy.array(P.snap_size)).all(): # this could be circumvented
parser.error("\nThe snapshot size isn't the same for all the files; cannot perform the dF(t) analysis.")
# Define a list of bForwrev equidistant time frames at which the free energy is to be estimated; count up the snapshots embounded between the time frames.
n_tf = P.bForwrev + 1
nss_tf = numpy.zeros([n_tf, K], int)
increment = 1./(n_tf-1)
if P.bExpanded:
from collections import Counter # for counting elements in an array
tf = numpy.arange(0,1+increment,increment)*(numpy.sum(nsnapshots)-1)+1
tf[0] = 0
for i in range(n_tf-1):
nss = Counter(extract_states[tf[i]:tf[i+1]])
nss_tf[i+1] = numpy.array([nss[j] for j in range(K)])
else:
tf = numpy.arange(0,1+increment,increment)*(max(nsnapshots)-1)+1
tf[0] = 0
for i in range(n_tf-1):
nss_tf[i+1] = numpy.array([min(j, tf[i+1]) for j in nsnapshots]) - numpy.sum(nss_tf[:i+1],axis=0)
# Define the real time scale (in ps) rather than a snapshot sequence.
ts = ["%.1f" % ((i-(i!=tf[0]))*P.snap_size[0] + P.equiltime) for i in tf]
# Initialize arrays to store data points to be plotted.
F_df = numpy.zeros(n_tf-1, float)
F_ddf = numpy.zeros(n_tf-1, float)
R_df = numpy.zeros(n_tf-1, float)
R_ddf = numpy.zeros(n_tf-1, float)
# Store the MBAR energy that accounts for all the equilibrated snapshots (has already been computed in the previous section).
F_df[-1], F_ddf[-1] = (Deltaf_ij[0,K-1]/P.beta_report, dDeltaf_ij[0,K-1]/P.beta_report)
R_df[0], R_ddf[0] = (Deltaf_ij[0,K-1]/P.beta_report, dDeltaf_ij[0,K-1]/P.beta_report)
# Do the forward analysis.
print "Forward dF(t) analysis...\nEstimating the free energy change using the data up to"
sta = nss_tf[0]
for i in range(n_tf-2):
print "%60s ps..." % ts[i+1]
fin = numpy.sum(nss_tf[:i+2],axis=0)
N_k, u_kln = uncorrelate(nss_tf[0], numpy.sum(nss_tf[:i+2],axis=0))
F_df[i], F_ddf[i] = estimatewithMBAR(u_kln, N_k, P.relative_tolerance)
# Do the reverse analysis.
print "Reverse dF(t) analysis...\nUsing the data starting from"
fin = numpy.sum(nss_tf[:],axis=0)
for i in range(n_tf-2):
print "%34s ps..." % ts[i+1]
sta = numpy.sum(nss_tf[:i+2],axis=0)
N_k, u_kln = uncorrelate(sta, fin)
R_df[i+1], R_ddf[i+1] = estimatewithMBAR(u_kln, N_k, P.relative_tolerance)
print """\n The free energies %s evaluated by using the trajectory
snaphots corresponding to various time intervals for both the
reverse and forward (in parentheses) direction.\n""" % P.units
print "%s\n %20s %19s %20s\n%s" % (70*'-', 'Time interval, ps','Reverse', 'Forward', 70*'-')
print "%10s -- %s\n%10s -- %-10s %11.3f +- %5.3f %16s\n" % (ts[0], ts[-1], '('+ts[0], ts[0]+')', R_df[0], R_ddf[0], 'XXXXXX')
for i in range(1, len(ts)-1):
print "%10s -- %s\n%10s -- %-10s %11.3f +- %5.3f %11.3f +- %5.3f\n" % (ts[i], ts[-1], '('+ts[0], ts[i]+')', R_df[i], R_ddf[i], F_df[i-1], F_ddf[i-1])
print "%10s -- %s\n%10s -- %-10s %16s %15.3f +- %5.3f\n%s" % (ts[-1], ts[-1], '('+ts[0], ts[-1]+')', 'XXXXXX', F_df[-1], F_ddf[-1], 70*'-')
# Plot the forward and reverse dF(t); store the data points in the text file.
print "Plotting data to the file dF_t.pdf...\n\n"
plotdFvsTime([float(i) for i in ts[1:]], [float(i) for i in ts[:-1]], F_df, R_df, F_ddf, R_ddf)
outtext = ["%12s %10s %-10s %17s %10s %s\n" % ('Time (ps)', 'Forward', P.units, 'Time (ps)', 'Reverse', P.units)]
outtext+= ["%10s %11.3f +- %5.3f %18s %11.3f +- %5.3f\n" % (ts[1:][i], F_df[i], F_ddf[i], ts[:-1][i], R_df[i], R_ddf[i]) for i in range(len(F_df))]
outfile = open(os.path.join(P.output_directory, 'dF_t.txt'), 'w'); outfile.writelines(outtext); outfile.close()
return
#===================================================================================================
# FUNCTIONS: Free energy change breakdown (into lambda-pair dFs). Called by the -g flag.
#===================================================================================================
def plotdFvsLambda():
def plotdFvsLambda1():
"""Plots the free energy differences evaluated for each pair of adjacent states for all methods."""
x = numpy.arange(len(df_allk))
if x[-1]<8:
fig = pl.figure(figsize = (8,6))
else:
fig = pl.figure(figsize = (len(x),6))
width = 1./(len(P.methods)+1)
elw = 30*width
colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}
lines = tuple()
for name in P.methods:
y = [df_allk[i][name]/P.beta_report for i in x]
ye = [ddf_allk[i][name]/P.beta_report for i in x]
line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.1*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))
lines += (line[0],)
pl.xlabel('States', fontsize=12, color='#151B54')
pl.ylabel('$\Delta G$ '+P.units, fontsize=12, color='#151B54')
pl.xticks(x+0.5*width*len(P.methods), tuple(['%d--%d' % (i, i+1) for i in x]), fontsize=8)
pl.yticks(fontsize=8)
pl.xlim(x[0], x[-1]+len(lines)*width)
ax = pl.gca()
for dir in ['right', 'top', 'bottom']:
ax.spines[dir].set_color('none')
ax.yaxis.set_ticks_position('left')
for tick in ax.get_xticklines():
tick.set_visible(False)
leg = pl.legend(lines, tuple(P.methods), loc=3, ncol=2, prop=FP(size=10), fancybox=True)
leg.get_frame().set_alpha(0.5)
pl.title('The free energy change breakdown', fontsize = 12)
pl.savefig(os.path.join(P.output_directory, 'dF_state_long.pdf'), bbox_inches='tight')
pl.close(fig)
return
def plotdFvsLambda2(nb=10):
"""Plots the free energy differences evaluated for each pair of adjacent states for all methods.
The layout is approximately 'nb' bars per subplot."""
x = numpy.arange(len(df_allk))
if len(x) < nb:
return
xs = numpy.array_split(x, len(x)/nb+1)
mnb = max([len(i) for i in xs])
fig = pl.figure(figsize = (8,6))
width = 1./(len(P.methods)+1)
elw = 30*width
colors = {'TI':'#C45AEC', 'TI-CUBIC':'#33CC33', 'DEXP':'#F87431', 'IEXP':'#FF3030', 'GINS':'#EAC117', 'GDEL':'#347235', 'BAR':'#6698FF', 'UBAR':'#817339', 'RBAR':'#C11B17', 'MBAR':'#F9B7FF'}
ndx = 1
for x in xs:
lines = tuple()
ax = pl.subplot(len(xs), 1, ndx)
for name in P.methods:
y = [df_allk[i][name]/P.beta_report for i in x]
ye = [ddf_allk[i][name]/P.beta_report for i in x]
line = pl.bar(x+len(lines)*width, y, width, color=colors[name], yerr=ye, lw=0.05*elw, error_kw=dict(elinewidth=elw, ecolor='black', capsize=0.5*elw))
lines += (line[0],)
for dir in ['left', 'right', 'top', 'bottom']:
if dir == 'left':
ax.yaxis.set_ticks_position(dir)
else:
ax.spines[dir].set_color('none')
pl.yticks(fontsize=10)
ax.xaxis.set_ticks([])
for i in x+0.5*width*len(P.methods):
ax.annotate('$\mathrm{%d-%d}$' % (i, i+1), xy=(i, 0), xycoords=('data', 'axes fraction'), xytext=(0, -2), size=10, textcoords='offset points', va='top', ha='center')
pl.xlim(x[0], x[-1]+len(lines)*width + (mnb - len(x)))
ndx += 1
leg = ax.legend(lines, tuple(P.methods), loc=0, ncol=2, prop=FP(size=8), title='$\mathrm{\Delta G\/%s\/}\mathit{vs.}\/\mathrm{lambda\/pair}$' % P.units, fancybox=True)
leg.get_frame().set_alpha(0.5)
pl.savefig(os.path.join(P.output_directory, 'dF_state.pdf'), bbox_inches='tight')
pl.close(fig)
return
def plotTI():
"""Plots the ave_dhdl array as a function of the lambda value.
If (TI and TI-CUBIC in methods) -- plots the TI integration area and the TI-CUBIC interpolation curve,
elif (only one of them in methods) -- plots the integration area of the method."""
min_dl = dlam[dlam != 0].min()
S = int(0.4/min_dl)
fig = pl.figure(figsize = (8,6))
ax = fig.add_subplot(1,1,1)
ax.spines['bottom'].set_position('zero')
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
for k, spine in ax.spines.items():
spine.set_zorder(12.2)
xs, ndx, dx = [0], 0, 0.001
colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']
min_y, max_y = 0, 0
lines = tuple()
## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper
lv_names2 = []
for j in range(n_components):
y = ave_dhdl[:,j]
if not (y == 0).all():
lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())
for j in range(n_components):
y = ave_dhdl[:,j]
if not (y == 0).all():
#if not cubspl[j] == 0:
# Get the coordinates.
lj = lchange[:,j]
x = lv[:,j][lj]
y = y[lj]/P.beta_report
if 'TI' in P.methods:
# Plot the TI integration area.
ss = 'TI'
for i in range(len(x)-1):
min_y = min(y.min(), min_y)
max_y = max(y.max(), max_y)
#pl.plot(x,y)
if i%2==0:
pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)
else:
pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)
xlegend = [-100*wnum for wnum in range(len(lv_names2))]
pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper
if 'TI-CUBIC' in P.methods:
# Plot the TI-CUBIC interpolation curve.
ss += ' and TI-CUBIC'
xnew = numpy.arange(0, 1+dx, dx)
ynew = cubspl[j].interpolate(y, xnew)
min_y = min(ynew.min(), min_y)
max_y = max(ynew.max(), max_y)
pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)
else:
# Plot the TI-CUBIC integration area.
ss = 'TI-CUBIC'
for i in range(len(x)-1):
xnew = numpy.arange(x[i], x[i+1]+dx, dx)
ynew = cubspl[j].interpolate(y, xnew)
ynew[0], ynew[-1] = y[i], y[i+1]
min_y = min(ynew.min(), min_y)
max_y = max(ynew.max(), max_y)
if i%2==0:
pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)
else:
pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)
# Store the abscissa values and update the subplot index.
xs += (x+ndx).tolist()[1:]
ndx += 1
# Make sure the tick labels are not overcrowded.
xs = numpy.array(xs)
dl_mat = numpy.array([xs-i for i in xs])
ri = range(len(xs))
def getInd(r=ri, z=[0]):
primo = r[0]
min_dl=ndx*0.02*2**(primo>10)
if dl_mat[primo].max()<min_dl:
return z
for i in r:
for j in range(len(xs)):
if dl_mat[i,j]>min_dl:
z.append(j)
return getInd(ri[j:], z)
xt = [i if (i in getInd()) else '' for i in range(K)]
pl.xticks(xs[1:], xt[1:], fontsize=10)
pl.yticks(fontsize=10)
#ax = pl.gca()
#for label in ax.get_xticklabels():
# label.set_bbox(dict(fc='w', ec='None', alpha=0.5))
# Remove the abscissa ticks and set up the axes limits.
for tick in ax.get_xticklines():
tick.set_visible(False)
pl.xlim(0, ndx)
min_y *= 1.01
max_y *= 1.01
pl.ylim(min_y, max_y)
for i,j in zip(xs[1:], xt[1:]):
pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')
if ndx>1:
lenticks = len(ax.get_ymajorticklabels()) - 1
if min_y<0: lenticks -= 1
if lenticks < 5:
from matplotlib.ticker import AutoMinorLocator as AML
ax.yaxis.set_minor_locator(AML())
pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)
pl.ylabel(r'$\mathrm{\langle{\frac{ \partial U } { \partial \lambda }}\rangle_{\lambda}\/%s}$' % P.units, fontsize=20, color='#151B54')
pl.annotate('$\mathit{\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')
if not P.software.title()=='Sire':
lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)
for l in lege.legendHandles:
l.set_linewidth(10)
pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))
pl.close(fig)
return
print "Plotting the free energy breakdown figure..."
plotdFvsLambda1()
plotdFvsLambda2()
if ('TI' in P.methods or 'TI-CUBIC' in P.methods):
print "Plotting the TI figure..."
plotTI()
#===================================================================================================
# FUNCTIONS: The Curve-Fitting Method. Called by the -c flag.
#===================================================================================================
def plotCFM(u_kln, N_k, num_bins=100):
"""A graphical representation of what Bennett calls 'Curve-Fitting Method'."""
print "Plotting the CFM figure..."
def leaveTicksOnlyOnThe(xdir, ydir, axis):
dirs = ['left', 'right', 'top', 'bottom']
axis.xaxis.set_ticks_position(xdir)
axis.yaxis.set_ticks_position(ydir)
return
def plotdg_vs_dU(yy, df_allk, ddf_allk):
sq = (len(yy))**0.5
h = int(sq)
w = h + 1 + 1*(sq-h>0.5)
scale = round(w/3., 1)+0.4 if len(yy)>13 else 1
sf = numpy.ceil(scale*3) if scale>1 else 0
fig = pl.figure(figsize = (8*scale,6*scale))
matplotlib.rc('axes', facecolor = '#E3E4FA')
matplotlib.rc('axes', edgecolor = 'white')
if P.bSkipLambdaIndex:
ks = [int(l) for l in P.bSkipLambdaIndex.split('-')]
ks = numpy.delete(numpy.arange(K+len(ks)), ks)
else:
ks = range(K)
for i, (xx_i, yy_i) in enumerate(yy):
ax = pl.subplot(h, w, i+1)
ax.plot(xx_i, yy_i, color='r', ls='-', lw=3, marker='o', mec='r')
leaveTicksOnlyOnThe('bottom', 'left', ax)
ax.locator_params(axis='x', nbins=5)
ax.locator_params(axis='y', nbins=6)
ax.fill_between(xx_i, df_allk[i]['BAR'] - ddf_allk[i]['BAR'], df_allk[i]['BAR'] + ddf_allk[i]['BAR'], color='#D2B9D3', zorder=-1)
ax.annotate(r'$\mathrm{%d-%d}$' % (ks[i], ks[i+1]), xy=(0.5, 0.9), xycoords=('axes fraction', 'axes fraction'), xytext=(0, -2), size=14, textcoords='offset points', va='top', ha='center', color='#151B54', bbox = dict(fc='w', ec='none', boxstyle='round', alpha=0.5))
pl.xlim(xx_i.min(), xx_i.max())
pl.annotate(r'$\mathrm{\Delta U_{i,i+1}\/(reduced\/units)}$', xy=(0.5, 0.03), xytext=(0.5, 0), xycoords=('figure fraction', 'figure fraction'), size=20+sf, textcoords='offset points', va='center', ha='center', color='#151B54')
pl.annotate(r'$\mathrm{\Delta g_{i+1,i}\/(reduced\/units)}$', xy=(0.06, 0.5), xytext=(0, 0.5), rotation=90, xycoords=('figure fraction', 'figure fraction'), size=20+sf, textcoords='offset points', va='center', ha='center', color='#151B54')
pl.savefig(os.path.join(P.output_directory, 'cfm.pdf'))
pl.close(fig)
return
def findOptimalMinMax(ar):
c = zip(*numpy.histogram(ar, bins=10))
thr = int(ar.size/8.)
mi, ma = ar.min(), ar.max()
for (i,j) in c:
if i>thr:
mi = j
break
for (i,j) in c[::-1]:
if i>thr:
ma = j
break
return mi, ma
def stripZeros(a, aa, b, bb):
z = numpy.array([a, aa[:-1], b, bb[:-1]])
til = 0
for i,j in enumerate(a):
if j>0:
til = i
break
z = z[:, til:]
til = 0
for i,j in enumerate(b[::-1]):
if j>0:
til = i
break
z = z[:, :len(a)+1-til]
a, aa, b, bb = z
return a, numpy.append(aa, 100), b, numpy.append(bb, 100)
K = len(u_kln)
yy = []
for k in range(0, K-1):
upto = min(N_k[k], N_k[k+1])
righ = -u_kln[k,k+1, : upto]
left = u_kln[k+1,k, : upto]
min1, max1 = findOptimalMinMax(righ)
min2, max2 = findOptimalMinMax(left)
mi = min(min1, min2)
ma = max(max1, max2)
(counts_l, xbins_l) = numpy.histogram(left, bins=num_bins, range=(mi, ma))
(counts_r, xbins_r) = numpy.histogram(righ, bins=num_bins, range=(mi, ma))
counts_l, xbins_l, counts_r, xbins_r = stripZeros(counts_l, xbins_l, counts_r, xbins_r)
counts_r, xbins_r, counts_l, xbins_l = stripZeros(counts_r, xbins_r, counts_l, xbins_l)
with numpy.errstate(divide='ignore', invalid='ignore'):
log_left = numpy.log(counts_l) - 0.5*xbins_l[:-1]
log_righ = numpy.log(counts_r) + 0.5*xbins_r[:-1]
diff = log_left - log_righ
yy.append((xbins_l[:-1], diff))
plotdg_vs_dU(yy, df_allk, ddf_allk)
return
#===================================================================================================
# MAIN
#===================================================================================================
def main():
global dhdlt
global u_klt
global P
global K
global n_components
global pymbar
global dhdl
global N_k
global lv
global dlam
global ave_dhdl
global std_dhdl
global lchange
global cubspl
global mapl
global u_kln
global Deltaf_ij
global dDeltaf_ij
global df_allk
global ddf_allk
global nsnapshots
global pl
global FP
global matplotlib
# Timing.
stime = ttt_time.time()
print "Started on %s" % ttt_time.asctime()
print 'Command line was: %s\n' % ' '.join(sys.argv)
# Simulation profile P (to be stored in 'results.pickle') will amass information about the simulation.
P = parser.parse_args()[0]
P.methods = getMethods(P.methods.upper())
P.units, P.beta, P.beta_report = checkUnitsAndMore(P.units)
if ''.join(P.methods).replace('TI-CUBIC', '').replace('TI', ''):
import pymbar ## this is not a built-in module ##
if (numpy.array([P.bForwrev, P.breakdown, P.bCFM, P.overlap]) != 0).any():
import matplotlib # 'matplotlib-1.1.0-1'; errors may pop up when using an earlier version
matplotlib.use('Agg')
import matplotlib.pyplot as pl
from matplotlib.font_manager import FontProperties as FP
if P.software.title() == 'Gromacs':
import parser_gromacs
nsnapshots, lv, dhdlt, u_klt = parser_gromacs.readDataGromacs(P)
elif P.software.title() == 'Sire':
import parser_sire
nsnapshots, lv, dhdlt, u_klt = parser_sire.readDataSire(P)
elif P.software.title() == 'Amber':
import parser_amber
nsnapshots, lv, dhdlt, u_klt = parser_amber.readDataAmber(P)
elif P.software.title() == 'Desmond':
import parser_desmond
#NML: Desmond FEP jobs will always output with these names
P.prefix='gibbs'
P.suffix='dE'
P.methods=['BAR'] #Given only dE of adj Lambdas, limiting to BAR only
nsnapshots, lv, u_klt = parser_desmond.readDataDesmond(P)
else:
from inspect import currentframe, getframeinfo
lineno = getframeinfo(currentframe()).lineno
print "\n\n%s\n Looks like there is no yet proper parser to process your files. \n Please modify lines %d and %d of this script.\n%s\n\n" % (78*"*", lineno+3, lineno+4, 78*"*")
#### LINES TO BE MODIFIED
#import YOUR_OWN_FILE_PARSER
#nsnapshots, lv, dhdlt, u_klt = YOUR_OWN_FILE_PARSER.yourDataParser(*args, **kwargs)
#### All the four are numpy arrays.
#### lv is the array of lambda vectors.
#### nsnapshots is the number of equilibrated snapshots per each state.
#### dhdlt[k,n,t] is the derivative of energy component n with respect to state k of snapshot t
#### u_klt[k,m,t] is the reduced potential energy of snapshot t of state k evaluated at state m
K, n_components = lv.shape
if (numpy.array(['Sire','Gromacs', 'Amber']) == P.software.title()).any():
dhdl, N_k, u_kln = uncorrelate(sta=numpy.zeros(K, int), fin=nsnapshots, do_dhdl=True)
elif P.software.title() == 'Desmond':
N_k, u_kln = uncorrelate(sta=numpy.zeros(K, int), fin=nsnapshots, do_dhdl=False)
# Estimate free energy difference with MBAR -- all states at once.
if 'MBAR' in P.methods:
Deltaf_ij, dDeltaf_ij = estimatewithMBAR(u_kln, N_k, P.relative_tolerance, regular_estimate=True)
# The TI preliminaries.
if ('TI' in P.methods or 'TI-CUBIC' in P.methods):
lchange, dlam, ave_dhdl, std_dhdl = TIprelim(lv)
if 'TI-CUBIC' in P.methods:
cubspl, mapl = getSplines(lchange)
# Call other methods. Print stats. Store results.
df_allk, ddf_allk = estimatePairs()
totalEnergies()
# Plot figures.
if P.bForwrev:
dF_t()
if P.breakdown:
plotdFvsLambda()
if P.bCFM:
if not (u_kln is None):
plotCFM(u_kln, N_k, 50)
print "\nTime spent: %s hours, %s minutes, and %s seconds.\nFinished on %s" % timeStatistics(stime)
if __name__ == "__main__":
main()
#===================================================================================================
# End of the script
#===================================================================================================
| lgpl-2.1 |
mxjl620/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
toobaz/pandas | pandas/tests/resample/test_resampler_grouper.py | 2 | 7932 | from textwrap import dedent
import numpy as np
import pandas as pd
from pandas import DataFrame, Series, Timestamp
from pandas.core.indexes.datetimes import date_range
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal, assert_series_equal
test_frame = DataFrame(
{"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)},
index=date_range("1/1/2000", freq="s", periods=40),
)
def test_tab_complete_ipython6_warning(ip):
from IPython.core.completer import provisionalcompleter
code = dedent(
"""\
import pandas.util.testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
"""
)
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter("ignore"):
list(ip.Completer.completions("rs.", 1))
def test_deferred_with_groupby():
# GH 12486
# support deferred resample ops with groupby
data = [
["2010-01-01", "A", 2],
["2010-01-02", "A", 3],
["2010-01-05", "A", 8],
["2010-01-10", "A", 7],
["2010-01-13", "A", 3],
["2010-01-01", "B", 5],
["2010-01-03", "B", 2],
["2010-01-04", "B", 1],
["2010-01-11", "B", 7],
["2010-01-14", "B", 3],
]
df = DataFrame(data, columns=["date", "id", "score"])
df.date = pd.to_datetime(df.date)
def f(x):
return x.set_index("date").resample("D").asfreq()
expected = df.groupby("id").apply(f)
result = df.set_index("date").groupby("id").resample("D").asfreq()
assert_frame_equal(result, expected)
df = DataFrame(
{
"date": pd.date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": [5, 6, 7, 8],
}
).set_index("date")
def f(x):
return x.resample("1D").ffill()
expected = df.groupby("group").apply(f)
result = df.groupby("group").resample("1D").ffill()
assert_frame_equal(result, expected)
def test_getitem():
g = test_frame.groupby("A")
expected = g.B.apply(lambda x: x.resample("2s").mean())
result = g.resample("2s").B.mean()
assert_series_equal(result, expected)
result = g.B.resample("2s").mean()
assert_series_equal(result, expected)
result = g.resample("2s").mean().B
assert_series_equal(result, expected)
def test_getitem_multiple():
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{"id": 1, "buyer": "A"}, {"id": 2, "buyer": "B"}]
df = DataFrame(data, index=pd.date_range("2016-01-01", periods=2))
r = df.groupby("id").resample("1D")
result = r["buyer"].count()
expected = Series(
[1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp("2016-01-01")), (2, Timestamp("2016-01-02"))],
names=["id", None],
),
name="buyer",
)
assert_series_equal(result, expected)
result = r["buyer"].count()
assert_series_equal(result, expected)
def test_groupby_resample_on_api_with_getitem():
# GH 17813
df = pd.DataFrame(
{"id": list("aabbb"), "date": pd.date_range("1-1-2016", periods=5), "data": 1}
)
exp = df.set_index("date").groupby("id").resample("2D")["data"].sum()
result = df.groupby("id").resample("2D", on="date")["data"].sum()
assert_series_equal(result, exp)
def test_nearest():
# GH 17496
# Resample nearest
index = pd.date_range("1/1/2000", periods=3, freq="T")
result = Series(range(3), index=index).resample("20s").nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
[
"2000-01-01 00:00:00",
"2000-01-01 00:00:20",
"2000-01-01 00:00:40",
"2000-01-01 00:01:00",
"2000-01-01 00:01:20",
"2000-01-01 00:01:40",
"2000-01-01 00:02:00",
],
dtype="datetime64[ns]",
freq="20S",
),
)
assert_series_equal(result, expected)
def test_methods():
g = test_frame.groupby("A")
r = g.resample("2s")
for f in ["first", "last", "median", "sem", "sum", "mean", "min", "max"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
assert_frame_equal(result, expected)
for f in ["size"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
assert_series_equal(result, expected)
for f in ["count"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
assert_frame_equal(result, expected)
# series only
for f in ["nunique"]:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample("2s"), f)())
assert_series_equal(result, expected)
for f in ["nearest", "backfill", "ffill", "asfreq"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample("2s").ohlc())
assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample("2s"), f)(ddof=1))
assert_frame_equal(result, expected)
def test_apply():
g = test_frame.groupby("A")
r = g.resample("2s")
# reduction
expected = g.resample("2s").sum()
def f(x):
return x.resample("2s").sum()
result = r.apply(f)
assert_frame_equal(result, expected)
def f(x):
return x.resample("2s").apply(lambda y: y.sum())
result = g.apply(f)
assert_frame_equal(result, expected)
def test_apply_with_mutated_index():
# GH 15169
index = pd.date_range("1-1-2015", "12-31-15", freq="D")
df = DataFrame(data={"col1": np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=["a", "b"])
return s
expected = df.groupby(pd.Grouper(freq="M")).apply(f)
result = df.resample("M").apply(f)
assert_frame_equal(result, expected)
# A case for series
expected = df["col1"].groupby(pd.Grouper(freq="M")).apply(f)
result = df["col1"].resample("M").apply(f)
assert_series_equal(result, expected)
def test_resample_groupby_with_label():
# GH 13235
index = date_range("2000-01-01", freq="2D", periods=5)
df = DataFrame(index=index, data={"col0": [0, 0, 1, 1, 2], "col1": [1, 1, 1, 1, 1]})
result = df.groupby("col0").resample("1W", label="left").sum()
mi = [
np.array([0, 0, 1, 2]),
pd.to_datetime(
np.array(["1999-12-26", "2000-01-02", "2000-01-02", "2000-01-02"])
),
]
mindex = pd.MultiIndex.from_arrays(mi, names=["col0", None])
expected = DataFrame(
data={"col0": [0, 0, 2, 2], "col1": [1, 1, 2, 1]}, index=mindex
)
assert_frame_equal(result, expected)
def test_consistency_with_window():
# consistent return values with window
df = test_frame
expected = pd.Int64Index([1, 2, 3], name="A")
result = df.groupby("A").resample("2s").mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby("A").rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns():
# GH 14233
df = DataFrame(
np.random.randn(20, 3),
columns=list("aaa"),
index=pd.date_range("2012-01-01", periods=20, freq="s"),
)
df2 = df.copy()
df2.columns = ["a", "b", "c"]
expected = df2.resample("5s").median()
result = df.resample("5s").median()
expected.columns = result.columns
assert_frame_equal(result, expected)
| bsd-3-clause |
elingg/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/ops.py | 11 | 44167 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Non-core ops for LabeledTensor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import types
import numpy as np
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.ops import random_ops
from tensorflow.python.training import input # pylint: disable=redefined-builtin
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensor, ops.Tensor, core.Axis,
tc.Optional(string_types))
def _gather_1d_on_axis(labeled_tensor, indexer, axis, name=None):
with ops.name_scope(name, 'lt_take', [labeled_tensor]) as scope:
temp_axes = core.Axes(
[axis] + list(labeled_tensor.axes.remove(axis.name).values()))
transposed = core.transpose(labeled_tensor, temp_axes.keys())
indexed = core.LabeledTensor(array_ops.gather(transposed.tensor, indexer),
temp_axes)
return core.transpose(indexed, labeled_tensor.axes.keys(), name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(string_types, tc.Union(
slice, collections.Hashable, collections.Sequence)),
tc.Optional(string_types))
def select(labeled_tensor, selection, name=None):
"""Slice out a subset of the tensor.
Args:
labeled_tensor: The input tensor.
selection: A dictionary mapping an axis name to a scalar, slice or list of
values to select. Currently supports two types of selections:
(a) Any number of scalar and/or slice selections.
(b) Exactly one list selection, without any scalars or slices.
name: Optional op name.
Returns:
The selection as a `LabeledTensor`.
Raises:
ValueError: If the tensor doesn't have an axis in the selection or if
that axis lacks labels.
KeyError: If any labels in a selection are not found in the original axis.
NotImplementedError: If you attempt to combine a list selection with
scalar selection or another list selection.
"""
with ops.name_scope(name, 'lt_select', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
slices = {}
indexers = {}
for axis_name, value in selection.items():
if axis_name not in labeled_tensor.axes:
raise ValueError(
'The tensor does not have an axis named %s. Its axes are: %r' %
(axis_name, labeled_tensor.axes.keys()))
axis = labeled_tensor.axes[axis_name]
if axis.labels is None:
raise ValueError(
'The axis named %s does not have labels. The axis is: %r' %
(axis_name, axis))
if isinstance(value, slice):
# TODO(shoyer): consider deprecating using slices in favor of lists
if value.start is None:
start = None
else:
start = axis.index(value.start)
if value.stop is None:
stop = None
else:
# For now, follow the pandas convention of making labeled slices
# inclusive of both bounds.
stop = axis.index(value.stop) + 1
if value.step is not None:
raise NotImplementedError('slicing with a step is not yet supported')
slices[axis_name] = slice(start, stop)
else:
# We're allowing anything NumPy treats as a scalar or 1D array.
value = np.asarray(value)
if value.ndim == 0:
slices[axis_name] = axis.index(value.item())
elif value.ndim == 1:
if indexers:
raise NotImplementedError(
'select does not yet support more than one list selection at '
'the same time')
indexer = [axis.index(v) for v in value.tolist()]
indexers[axis_name] = ops.convert_to_tensor(
indexer, dtype=dtypes.int64)
else:
raise NotImplementedError(
'select does not yet support selections with more than one '
'dimension: %s on axis %r' % (value, axis_name))
if indexers and slices:
raise NotImplementedError(
'select does not yet support combined scalar and list selection')
# For now, handle array selection separately, because tf.gather_nd does
# not support gradients yet. Later, using gather_nd will let us combine
# these paths.
if indexers:
(axis_name, indexer), = indexers.items()
axis = core.Axis(axis_name, selection[axis_name])
return _gather_1d_on_axis(labeled_tensor, indexer, axis, name=scope)
else:
return core.slice_function(labeled_tensor, slices, name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(tc.Collection(core.LabeledTensorLike), string_types,
tc.Optional(string_types))
def concat(labeled_tensors, axis_name, name=None):
"""Concatenate tensors along a dimension.
See tf.concat.
Args:
labeled_tensors: A list of input LabeledTensors.
axis_name: The name of the axis along which to concatenate.
name: Optional op name.
Returns:
The concatenated tensor.
The coordinate labels for the concatenation dimension are also concatenated,
if they are available for every tensor.
Raises:
ValueError: If fewer than one tensor inputs is provided, if the tensors
have incompatible axes, or if `axis_name` isn't the name of an axis.
"""
with ops.name_scope(name, 'lt_concat', labeled_tensors) as scope:
labeled_tensors = [core.convert_to_labeled_tensor(lt)
for lt in labeled_tensors]
if len(labeled_tensors) < 1:
raise ValueError('concat expects at least 1 tensor, but received %s' %
labeled_tensors)
# All tensors must have these axes.
axes_0 = labeled_tensors[0].axes
axis_names = list(axes_0.keys())
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
shared_axes = axes_0.remove(axis_name)
tensors = [labeled_tensors[0].tensor]
concat_axis_list = [axes_0[axis_name]]
for labeled_tensor in labeled_tensors[1:]:
current_shared_axes = labeled_tensor.axes.remove(axis_name)
if current_shared_axes != shared_axes:
# TODO(shoyer): add more specific checks about what went wrong,
# including raising AxisOrderError when appropriate
raise ValueError('Mismatched shared axes: the first tensor '
'had axes %r but this tensor has axes %r.' %
(shared_axes, current_shared_axes))
# Accumulate the axis labels, if they're available.
concat_axis_list.append(labeled_tensor.axes[axis_name])
tensors.append(labeled_tensor.tensor)
concat_axis = core.concat_axes(concat_axis_list)
concat_dimension = axis_names.index(axis_name)
concat_tensor = array_ops.concat(tensors, concat_dimension, name=scope)
values = list(axes_0.values())
concat_axes = (values[:concat_dimension] + [concat_axis] +
values[concat_dimension + 1:])
return core.LabeledTensor(concat_tensor, concat_axes)
# TODO(shoyer): rename pack/unpack to stack/unstack
@tc.returns(core.LabeledTensor)
@tc.accepts(
tc.Collection(core.LabeledTensorLike),
tc.Union(string_types, core.AxisLike),
int, tc.Optional(string_types))
def pack(labeled_tensors, new_axis, axis_position=0, name=None):
"""Pack tensors along a new axis.
See tf.pack.
Args:
labeled_tensors: The input tensors, which must have identical axes.
new_axis: The name of the new axis, or a tuple containing the name
and coordinate labels.
axis_position: Optional integer position at which to insert the new axis.
name: Optional op name.
Returns:
The packed tensors as a single LabeledTensor, with `new_axis` in the given
`axis_position`.
Raises:
ValueError: If fewer than one input tensors is provided, or if the tensors
don't have identical axes.
"""
with ops.name_scope(name, 'lt_pack', labeled_tensors) as scope:
labeled_tensors = [core.convert_to_labeled_tensor(lt)
for lt in labeled_tensors]
if len(labeled_tensors) < 1:
raise ValueError('pack expects at least 1 tensors, but received %s' %
labeled_tensors)
axes_0 = labeled_tensors[0].axes
for t in labeled_tensors:
if t.axes != axes_0:
raise ValueError('Non-identical axes. Expected %s but got %s' %
(axes_0, t.axes))
pack_op = array_ops.stack(
[t.tensor for t in labeled_tensors], axis=axis_position, name=scope)
axes = list(axes_0.values())
axes.insert(axis_position, new_axis)
return core.LabeledTensor(pack_op, axes)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike, tc.Optional(string_types),
tc.Optional(string_types))
def unpack(labeled_tensor, axis_name=None, name=None):
"""Unpack the tensor.
See tf.unpack.
Args:
labeled_tensor: The input tensor.
axis_name: Optional name of axis to unpack. By default, the first axis is
used.
name: Optional op name.
Returns:
The list of unpacked LabeledTensors.
Raises:
ValueError: If `axis_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_unpack', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
axis_names = list(labeled_tensor.axes.keys())
if axis_name is None:
axis_name = axis_names[0]
if axis_name not in axis_names:
raise ValueError('%s not in %s' % (axis_name, axis_names))
axis = axis_names.index(axis_name)
unpack_ops = array_ops.unstack(labeled_tensor.tensor, axis=axis, name=scope)
axes = [a for i, a in enumerate(labeled_tensor.axes.values())
if i != axis]
return [core.LabeledTensor(t, axes) for t in unpack_ops]
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Collection(string_types),
tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def reshape(labeled_tensor, existing_axes, new_axes, name=None):
"""Reshape specific axes of a LabeledTensor.
Non-indicated axes remain in their original locations.
Args:
labeled_tensor: The input tensor.
existing_axes: List of axis names found on the input tensor. These must
appear sequentially in the list of axis names on the input. In other
words, they must be a valid slice of `list(labeled_tensor.axes.keys())`.
new_axes: List of strings, tuples of (axis_name, axis_value) or Axis objects
providing new axes with which to replace `existing_axes` in the reshaped
result. At most one element of `new_axes` may be a string, indicating an
axis with unknown size.
name: Optional op name.
Returns:
The reshaped LabeledTensor.
Raises:
ValueError: If `existing_axes` are not all axes on the input, or if more
than one of `new_axes` has unknown size.
AxisOrderError: If `existing_axes` are not a slice of axis names on the
input.
"""
with ops.name_scope(name, 'lt_reshape', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
original_axis_names = list(labeled_tensor.axes.keys())
existing_axes = list(existing_axes)
if not set(existing_axes) <= set(original_axis_names):
raise ValueError('existing_axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_axes, original_axis_names))
start = original_axis_names.index(existing_axes[0])
stop = original_axis_names.index(existing_axes[-1]) + 1
if existing_axes != original_axis_names[start:stop]:
# We could support existing_axes that aren't a slice by using transpose,
# but that could lead to unpredictable performance consequences because
# transposes are not free in TensorFlow. If we did transpose
# automatically, the user might never realize that their data is being
# produced with the wrong order. (The later will occur with some frequency
# because of how broadcasting automatically choose axis order.)
# So for now we've taken the strict approach.
raise core.AxisOrderError(
'existing_axes %r are not a slice of axis names %r on the input '
'labeled tensor. Use `transpose` or `impose_axis_order` to reorder '
'axes on the input explicitly.' %
(existing_axes, original_axis_names))
if sum(isinstance(axis, string_types) for axis in new_axes) > 1:
raise ValueError(
'at most one axis in new_axes can have unknown size. All other '
'axes must have an indicated integer size or labels: %r' % new_axes)
original_values = list(labeled_tensor.axes.values())
axis_size = lambda axis: -1 if axis.size is None else axis.size
shape = [axis_size(axis) for axis in original_values[:start]]
for axis_ref in new_axes:
if isinstance(axis_ref, string_types):
shape.append(-1)
else:
axis = core.as_axis(axis_ref)
shape.append(axis_size(axis))
shape.extend(axis_size(axis) for axis in original_values[stop:])
reshaped_tensor = array_ops.reshape(
labeled_tensor.tensor, shape, name=scope)
axes = original_values[:start] + list(new_axes) + original_values[stop:]
return core.LabeledTensor(reshaped_tensor, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, string_types,
tc.Optional(string_types))
def rename_axis(labeled_tensor, existing_name, new_name, name=None):
"""Rename an axis of LabeledTensor.
Args:
labeled_tensor: The input tensor.
existing_name: Name for an existing axis on the input.
new_name: Desired replacement name.
name: Optional op name.
Returns:
LabeledTensor with renamed axis.
Raises:
ValueError: If `existing_name` is not an axis on the input.
"""
with ops.name_scope(name, 'lt_rename_axis', [labeled_tensor]) as scope:
if existing_name not in labeled_tensor.axes:
raise ValueError('existing_name %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(existing_name, labeled_tensor.axes.keys()))
new_axis = core.Axis(new_name, labeled_tensor.axes[existing_name].value)
return reshape(labeled_tensor, [existing_name], [new_axis], name=scope)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(string_types, collections.Callable, int, bool,
tc.Collection(core.LabeledTensorLike), bool,
tc.Optional(string_types))
def _batch_helper(default_name,
batch_fn,
batch_size,
enqueue_many,
labeled_tensors,
allow_smaller_final_batch,
name=None):
with ops.name_scope(name, default_name, labeled_tensors) as scope:
labeled_tensors = [core.convert_to_labeled_tensor(lt)
for lt in labeled_tensors]
batch_ops = batch_fn([t.tensor for t in labeled_tensors], scope)
# TODO(shoyer): Remove this when they sanitize the TF API.
if not isinstance(batch_ops, list):
assert isinstance(batch_ops, ops.Tensor)
batch_ops = [batch_ops]
if allow_smaller_final_batch:
batch_size = None
@tc.returns(core.Axes)
@tc.accepts(core.Axes)
def output_axes(axes):
if enqueue_many:
if 'batch' not in axes or list(axes.keys()).index('batch') != 0:
raise ValueError(
'When enqueue_many is True, input tensors must have an axis '
'called "batch" as their first dimension, '
'but axes were %s' % axes)
culled_axes = axes.remove('batch')
return core.Axes([('batch', batch_size)] + list(culled_axes.values()))
else:
return core.Axes([('batch', batch_size)] + list(axes.values()))
output_labeled_tensors = []
for i, tensor in enumerate(batch_ops):
axes = output_axes(labeled_tensors[i].axes)
output_labeled_tensors.append(core.LabeledTensor(tensor, axes))
return output_labeled_tensors
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, bool,
tc.Optional(string_types))
def batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.batch(tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(tc.List(core.LabeledTensor))
@tc.accepts(
tc.Collection(core.LabeledTensorLike), int, int, int, bool, int,
tc.Optional(int), bool, tc.Optional(string_types))
def shuffle_batch(labeled_tensors,
batch_size,
num_threads=1,
capacity=32,
enqueue_many=False,
min_after_dequeue=0,
seed=None,
allow_smaller_final_batch=False,
name=None):
"""Rebatch a tensor, with shuffling.
See tf.batch.
Args:
labeled_tensors: The input tensors.
batch_size: The output batch size.
num_threads: See tf.batch.
capacity: See tf.batch.
enqueue_many: If true, the input tensors must contain a 'batch' axis as
their first axis.
If false, the input tensors must not contain a 'batch' axis.
See tf.batch.
min_after_dequeue: Minimum number of elements in the queue after a dequeue,
used to ensure mixing.
seed: Optional random seed.
allow_smaller_final_batch: See tf.batch.
name: Optional op name.
Returns:
The rebatched tensors.
If enqueue_many is false, the output tensors will have a new 'batch' axis
as their first axis.
Raises:
ValueError: If enqueue_many is True and the first axis of the tensors
isn't "batch".
"""
def fn(tensors, scope):
return input.shuffle_batch(
tensors,
batch_size=batch_size,
num_threads=num_threads,
capacity=capacity,
enqueue_many=enqueue_many,
min_after_dequeue=min_after_dequeue,
seed=seed,
allow_smaller_final_batch=allow_smaller_final_batch,
name=scope)
return _batch_helper('lt_shuffle_batch', fn, batch_size, enqueue_many,
labeled_tensors, allow_smaller_final_batch, name)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, int),
tc.Optional(int), tc.Optional(string_types))
def random_crop(labeled_tensor, shape_map, seed=None, name=None):
"""Randomly crops a tensor to a given size.
See tf.random_crop.
Args:
labeled_tensor: The input tensor.
shape_map: A dictionary mapping axis names to the size of the random crop
for that dimension.
seed: An optional random seed.
name: An optional op name.
Returns:
A tensor of the same rank as `labeled_tensor`, cropped randomly in the
selected dimensions.
Raises:
ValueError: If the shape map contains an axis name not in the input tensor.
"""
with ops.name_scope(name, 'lt_random_crop', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
for axis_name in shape_map:
if axis_name not in labeled_tensor.axes:
raise ValueError('Selection axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
shape = []
axes = []
for axis in labeled_tensor.axes.values():
if axis.name in shape_map:
size = shape_map[axis.name]
shape.append(size)
# We lose labels for the axes we crop, leaving just the size.
axes.append((axis.name, size))
else:
shape.append(len(axis))
axes.append(axis)
crop_op = random_ops.random_crop(labeled_tensor.tensor,
shape,
seed=seed,
name=scope)
return core.LabeledTensor(crop_op, axes)
# TODO(shoyer): Allow the user to select the axis over which to map.
@tc.returns(core.LabeledTensor)
@tc.accepts(collections.Callable, core.LabeledTensorLike,
tc.Optional(string_types))
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Optional(tc.Collection(string_types)),
tc.Optional(string_types))
def squeeze(labeled_tensor, axis_names=None, name=None):
"""Remove size-1 dimensions.
See tf.squeeze.
Args:
labeled_tensor: The input tensor.
axis_names: The names of the dimensions to remove, or None to remove
all size-1 dimensions.
name: Optional op name.
Returns:
A tensor with the specified dimensions removed.
Raises:
ValueError: If the named axes are not in the tensor, or if they are
not size-1.
"""
with ops.name_scope(name, 'lt_squeeze', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axis_names is None:
axis_names = [a.name for a in labeled_tensor.axes.values() if len(a) == 1]
for axis_name in axis_names:
if axis_name not in labeled_tensor.axes:
raise ValueError('axis %s is not in tensor axes %s' %
(axis_name, labeled_tensor.axes))
elif len(labeled_tensor.axes[axis_name]) != 1:
raise ValueError(
'cannot squeeze axis with size greater than 1: (%s, %s)' %
(axis_name, labeled_tensor.axes[axis_name]))
squeeze_dimensions = []
axes = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in axis_names:
squeeze_dimensions.append(i)
else:
axes.append(axis)
if squeeze_dimensions:
squeeze_op = array_ops.squeeze(labeled_tensor.tensor,
squeeze_dimensions,
name=scope)
else:
squeeze_op = array_ops.identity(labeled_tensor.tensor, name=scope)
return core.LabeledTensor(squeeze_op, axes)
# pylint: disable=invalid-name
ReduceAxis = tc.Union(
string_types, tc.Tuple(string_types, collections.Hashable))
ReduceAxes = tc.Optional(tc.Union(ReduceAxis, tc.Collection(ReduceAxis)))
# pylint: enable=invalid-name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def matmul(a, b, name=None):
"""Matrix multiply two tensors with rank 1 or 2.
If both tensors have rank 2, a matrix-matrix product is performed.
If one tensor has rank 1 and the other has rank 2, then a matrix-vector
product is performed.
If both tensors have rank 1, then a vector dot-product is performed.
(This behavior matches that of `numpy.dot`.)
Both tensors must share exactly one dimension in common, which is the
dimension the operation is summed along. The inputs will be automatically
transposed if necessary as part of the matmul op.
We intend to eventually support `matmul` on higher rank input, and also
eventually support summing over any number shared dimensions (via an `axis`
argument), but neither of these features has been implemented yet.
Args:
a: First LabeledTensor.
b: Second LabeledTensor.
name: Optional op name.
Returns:
LabeledTensor with the result of matrix multiplication. Axes are ordered by
the current axis_order_scope, if set, or in or order of appearance on the
inputs.
Raises:
NotImplementedError: If inputs have rank >2 or share multiple axes.
ValueError: If the inputs have rank 0 or do not share any axes.
"""
with ops.name_scope(name, 'lt_matmul', [a, b]) as scope:
a = core.convert_to_labeled_tensor(a)
b = core.convert_to_labeled_tensor(b)
if len(a.axes) > 2 or len(b.axes) > 2:
# We could pass batched inputs to tf.matmul to make this work, but we
# would also need to use tf.tile and/or tf.transpose. These are more
# expensive than doing reshapes, so it's not clear if it's a good idea to
# do this automatically.
raise NotImplementedError(
'matmul currently requires inputs with rank 2 or less, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
if not a.axes or not b.axes:
raise ValueError(
'matmul currently requires inputs with at least rank 1, but '
'inputs have ranks %r and %r' % (len(a.axes), len(b.axes)))
shared_axes = set(a.axes) & set(b.axes)
if len(shared_axes) > 1:
raise NotImplementedError(
'matmul does not yet support summing over multiple shared axes: %r. '
'Use transpose and reshape to create a single shared axis to sum '
'over.' % shared_axes)
if not shared_axes:
raise ValueError('there must have exactly one axis in common between '
'input to matmul: %r, %r' %
(a.axes.keys(), b.axes.keys()))
shared_axis, = shared_axes
if a.axes[shared_axis] != b.axes[shared_axis]:
raise ValueError('axis %r does not match on input arguments: %r vs %r' %
(shared_axis, a.axes[shared_axis].value,
b.axes[shared_axis].value))
result_axes = []
for axes in [a.axes, b.axes]:
for axis in axes.values():
if axis.name != shared_axis:
result_axes.append(axis)
axis_scope_order = core.get_axis_order()
if axis_scope_order is not None:
result_axis_names = [axis.name for axis in result_axes]
new_axis_names = [name for name in axis_scope_order
if name in result_axis_names]
if new_axis_names != result_axis_names:
# switch a and b
b, a = a, b
# result_axes is a list of length 1 or 2
result_axes = result_axes[::-1]
squeeze_dims = []
if len(a.axes) == 1:
a_tensor = array_ops.reshape(a.tensor, (1, -1))
squeeze_dims.append(0)
transpose_a = False
else:
a_tensor = a.tensor
transpose_a = list(a.axes.keys()).index(shared_axis) == 0
if len(b.axes) == 1:
b_tensor = array_ops.reshape(b.tensor, (-1, 1))
squeeze_dims.append(1)
transpose_b = False
else:
b_tensor = b.tensor
transpose_b = list(b.axes.keys()).index(shared_axis) == 1
result_op = math_ops.matmul(a_tensor,
b_tensor,
transpose_a=transpose_a,
transpose_b=transpose_b)
if squeeze_dims:
result_op = array_ops.squeeze(result_op, squeeze_dims)
result_op = array_ops.identity(result_op, name=scope)
return core.LabeledTensor(result_op, result_axes)
@tc.returns(types.FunctionType)
@tc.accepts(string_types, collections.Callable)
def define_reduce_op(op_name, reduce_fn):
"""Define a reduction op for labeled tensors.
Args:
op_name: string name of the TensorFlow op.
reduce_fn: function to call to evaluate the op on a tf.Tensor.
Returns:
Function defining the given reduction op that acts on a LabeledTensor.
"""
default_name = 'lt_%s' % op_name
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, ReduceAxes, tc.Optional(string_types))
def op(labeled_tensor, axes=None, name=None):
"""Computes the given reduction across the given axes of a LabeledTensor.
See `tf.{op_name}` for full details.
Args:
labeled_tensor: The input tensor.
axes: A set of axes or None.
If None, all axes will be reduced.
Axes must all be strings, in which case those dimensions will be
removed, or pairs of (name, None) or (name, label), in which case those
dimensions will be kept.
name: Optional op name.
Returns:
The reduced LabeledTensor.
Raises:
ValueError: if any of the axes to reduce over are not found on
`labeled_tensor`.
"""
with ops.name_scope(name, default_name, [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if axes is None:
axes = labeled_tensor.axes.keys()
if isinstance(axes, (string_types, tuple)):
axes = [axes]
reduction_axes = {}
axes_to_squeeze = []
for a in axes:
if isinstance(a, string_types):
# We squeeze out this axis.
reduction_axes[a] = a
axes_to_squeeze.append(a)
else:
# We keep this axis, with the user-provided labels.
(axis_name, label) = a
if label is not None:
# The input was a single label, so make it a list so it can be
# turned into an Axis.
label = [label]
reduction_axes[axis_name] = (axis_name, label)
for axis_name in reduction_axes:
if axis_name not in labeled_tensor.axes:
raise ValueError('Axis %s not in axes %s' %
(axis_name, labeled_tensor.axes))
intermediate_axes = []
reduction_dimensions = []
for i, axis in enumerate(labeled_tensor.axes.values()):
if axis.name in reduction_axes:
intermediate_axes.append(reduction_axes[axis.name])
reduction_dimensions.append(i)
else:
intermediate_axes.append(axis)
reduce_op = reduce_fn(labeled_tensor.tensor,
reduction_dimensions,
keep_dims=True)
reduce_lt = core.LabeledTensor(reduce_op, intermediate_axes)
return squeeze(reduce_lt, axes_to_squeeze, name=scope)
op.__doc__ = op.__doc__.format(op_name=op_name)
op.__name__ = op_name
return op
reduce_all = define_reduce_op('reduce_all', math_ops.reduce_all)
reduce_any = define_reduce_op('reduce_any', math_ops.reduce_any)
reduce_logsumexp = define_reduce_op('reduce_logsumexp',
math_ops.reduce_logsumexp)
reduce_max = define_reduce_op('reduce_max', math_ops.reduce_max)
reduce_mean = define_reduce_op('reduce_mean', math_ops.reduce_mean)
reduce_min = define_reduce_op('reduce_min', math_ops.reduce_min)
reduce_prod = define_reduce_op('reduce_prod', math_ops.reduce_prod)
reduce_sum = define_reduce_op('reduce_sum', math_ops.reduce_sum)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Mapping(str, tc.Union(int, ops.Tensor)),
tc.Optional(string_types))
def tile(labeled_tensor, multiples, name=None):
"""Constructs a tensor by tiling a given tensor.
Only axes without tick-labels can be tiled. (Otherwise, axis labels on tiled
tensors would no longer be unique.)
See lt.tile.
Args:
labeled_tensor: The input tensor.
multiples: A mapping where the keys are axis names and the values are the
integer number of times to tile along that axis. Only axes with a multiple
different than 1 need be included.
name: Optional op name.
Returns:
A tensor with the indicated axes tiled.
Raises:
ValueError: If the tiled axes are not axes in the input tensor, or if any
axes in multiples have tick labels.
"""
with ops.name_scope(name, 'lt_tile', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(multiples.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('tile axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(multiples.keys(), labeled_tensor.axes))
labeled_axes = [name for name in multiples
if labeled_tensor.axes[name].labels is not None]
if labeled_axes:
raise ValueError('cannot tile axes with tick labels: %r' % labeled_axes)
multiples_list = [multiples.get(name, 1) for name in labeled_tensor.axes]
tile_op = array_ops.tile(labeled_tensor.tensor, multiples_list, name=scope)
new_axes = [axis.name if axis.labels is None else axis
for axis in labeled_tensor.axes.values()]
return core.LabeledTensor(tile_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike,
tc.Mapping(str, tc.Tuple(core.AxisValue, core.AxisValue)),
string_types, tc.Optional(string_types))
def pad(labeled_tensor, paddings, mode='CONSTANT', name=None):
"""Pads a tensor.
See tf.pad.
Args:
labeled_tensor: The input tensor.
paddings: A mapping where the keys are axis names and the values are
tuples where the first element is the padding to insert at the beginning
of the axis and the second is the padding to insert at the end of the
axis.
mode: One of "CONSTANT", "REFLECT", or "SYMMETRIC".
name: Optional op name.
Returns:
A tensor with the indicated axes padded, optionally with those axes extended
with the provided labels.
Raises:
ValueError: If the padded axes are not axes in the input tensor.
"""
with ops.name_scope(name, 'lt_pad', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
if not set(paddings.keys()) <= set(labeled_tensor.axes.keys()):
raise ValueError('pad axes %r are not contained in the set of axis '
'names %r on the input labeled tensor' %
(paddings.keys(), labeled_tensor.axes))
new_axes = []
padding_pairs = []
for name, axis in labeled_tensor.axes.items():
if name in paddings:
padding_before, padding_after = paddings[name]
axis_before = core.Axis(name, padding_before)
axis_after = core.Axis(name, padding_after)
new_axes.append(core.concat_axes([axis_before, axis, axis_after]))
padding_pairs.append((len(axis_before), len(axis_after)))
else:
new_axes.append(axis)
padding_pairs.append((0, 0))
pad_op = array_ops.pad(
labeled_tensor.tensor, padding_pairs, mode, name=scope)
return core.LabeledTensor(pad_op, new_axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(tc.Union(np.ndarray, list, tuple, core.Scalar),
tc.Optional(dtypes.DType),
tc.Optional(tc.Union(
core.Axes,
tc.Collection(tc.Union(string_types, core.AxisLike)))),
tc.Optional(string_types))
def constant(value, dtype=None, axes=None, name=None):
"""Creates a constant tensor.
If `axes` includes any strings, shape is inferred from `value`. Otherwise,
the sizes of the given `axes` are used to set `shape` for `tf.constant`.
See tf.constant for more details.
Args:
value: The input tensor.
dtype: The type of the returned tensor.
axes: Optional Axes, list of strings or list of objects coercible to Axis
objects. By default, axes are assumed to be an empty list (i.e., `value`
is treated as a scalar).
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_constant', [value]) as scope:
if axes is None:
axes = []
if isinstance(axes, core.Axes):
axes = axes.values()
if any(isinstance(ax, string_types) for ax in axes):
# need to infer shape
shape = None
else:
# axes already indicate shape
axes = [core.as_axis(a) for a in axes]
shape = [a.size for a in axes]
op = array_ops.constant(value, dtype=dtype, shape=shape, name=scope)
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType),
tc.Optional(string_types))
def zeros_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to zero.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to zero.
"""
with ops.name_scope(name, 'lt_zeros_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.zeros_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType),
tc.Optional(string_types))
def ones_like(labeled_tensor, dtype=None, name=None):
"""Creates an identical tensor with all elements set to one.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
The tensor with elements set to one.
"""
with ops.name_scope(name, 'lt_ones_like', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = array_ops.ones_like(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, tc.Optional(dtypes.DType),
tc.Optional(string_types))
def cast(labeled_tensor, dtype=None, name=None):
"""Casts a labeled tensor to a new type.
Args:
labeled_tensor: The input tensor.
dtype: The type of the returned tensor.
name: Optional op name.
Returns:
A labeled tensor with the new dtype.
"""
with ops.name_scope(name, 'lt_cast', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = math_ops.cast(labeled_tensor.tensor, dtype=dtype, name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, string_types, tc.Optional(string_types))
def verify_tensor_all_finite(labeled_tensor, message, name=None):
"""Asserts a tensor doesn't contain NaNs or Infs.
See tf.verify_tensor_all_finite.
Args:
labeled_tensor: The input tensor.
message: Message to log on failure.
name: Optional op name.
Returns:
The input tensor.
"""
with ops.name_scope(name, 'lt_verify_tensor_all_finite',
[labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
op = numerics.verify_tensor_all_finite(labeled_tensor.tensor,
msg=message,
name=scope)
return core.LabeledTensor(op, labeled_tensor.axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
tc.Optional(string_types))
def boolean_mask(labeled_tensor, mask, name=None):
"""Apply a boolean mask to a labeled tensor.
Unlike `tf.boolean_mask`, this currently only works on 1-dimensional masks.
The mask is applied to the first axis of `labeled_tensor`. Labels on the first
axis are removed, because True indices in `mask` may not be known dynamically.
Args:
labeled_tensor: The input tensor.
mask: The type of the returned tensor.
name: Optional op name.
Returns:
The masked labeled tensor.
Raises:
ValueError: if the first axis of the mask
"""
with ops.name_scope(name, 'lt_boolean_mask', [labeled_tensor, mask]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
mask = core.convert_to_labeled_tensor(mask)
if len(mask.axes) > 1:
raise NotImplementedError(
"LabeledTensor's boolean_mask currently only supports 1D masks")
mask_axis = list(mask.axes.values())[0]
lt_axis = list(labeled_tensor.axes.values())[0]
if mask_axis != lt_axis:
raise ValueError('the first axis of the labeled tensor and the mask '
'are not equal:\n%r\n%r' % (lt_axis, mask_axis))
op = array_ops.boolean_mask(labeled_tensor.tensor, mask.tensor, name=scope)
# TODO(shoyer): attempt to infer labels for the masked values, by calling
# tf.contrib.util.constant_value on the mask?
axes = [lt_axis.name] + list(labeled_tensor.axes.values())[1:]
return core.LabeledTensor(op, axes)
@tc.returns(core.LabeledTensor)
@tc.accepts(core.LabeledTensorLike, core.LabeledTensorLike,
core.LabeledTensorLike, tc.Optional(string_types))
def where(condition, x, y, name=None):
"""Return elements from x or y depending on condition.
See `tf.where` for more details. This function currently only implements the
three argument version of where.
Args:
condition: LabeledTensor of type `bool`.
x: LabeledTensor for values where condition is true.
y: LabeledTensor for values where condition is false.
name: Optional op name.
Returns:
The labeled tensor with values according to condition.
Raises:
ValueError: if `x` and `y` have different axes, or if the axes of `x` do not
start with the axes of `condition`.
"""
with ops.name_scope(name, 'lt_where', [condition, x, y]) as scope:
condition = core.convert_to_labeled_tensor(condition)
x = core.convert_to_labeled_tensor(x)
y = core.convert_to_labeled_tensor(y)
if not condition.axes == x.axes == y.axes:
raise ValueError('all inputs to `where` must have equal axes')
op = array_ops.where(condition.tensor, x.tensor, y.tensor, name=scope)
return core.LabeledTensor(op, x.axes)
| apache-2.0 |
marcoscrcamargo/ic | opencv/classifiers/knn.py | 1 | 4679 | # import the necessary packages
from sklearn.neighbors import KNeighborsClassifier
# resolvendo problemas de compatibilidade
from sklearn.model_selection import train_test_split
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
import os
data_path = "DBIM/alldb"
neighbors = 5
jobs = -1
model_pxl = KNeighborsClassifier(n_neighbors=neighbors,
n_jobs=jobs)
model_hst = KNeighborsClassifier(n_neighbors=neighbors,
n_jobs=jobs)
def image_to_feature_vector(image, size=(32, 32)):
# resize the image to a fixed size, then flatten the image into
# a list of raw pixel intensities
return cv2.resize(image, size).flatten()
def extract_color_histogram(image, bins=(8, 8, 8)):
# extract a 3D color histogram from the HSV color space using
# the supplied number of `bins` per channel
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1, 2], None, bins,
[0, 180, 0, 256, 0, 256])
# handle normalizing the histogram if we are using OpenCV 2.4.X
if imutils.is_cv2():
hist = cv2.normalize(hist)
# otherwise, perform "in place" normalization in OpenCV 3 (I
# personally hate the way this is done
else:
cv2.normalize(hist, hist)
# return the flattened histogram as the feature vector
return hist.flatten()
def initializate(data_p = "DBIM/alldb", neighbors = 5, jobs = -1):
data_path = dbp
model_pxl = KNeighborsClassifier(n_neighbors=neighbors,
n_jobs=jobs)
model_hst = KNeighborsClassifier(n_neighbors=neighbors,
n_jobs=jobs)
def fit(info=False):
# grab the list of images that we'll be describing
if(info):
print("[INFO] describing images...")
imagePaths = list(paths.list_images(data_path))
# initialize the raw pixel intensities matrix, the features matrix,
# and labels list
rawImages = []
features = []
labels = []
# loop over the input images
for (i, imagePath) in enumerate(imagePaths):
# load the image and extract the class label (assuming that our
# path as the format: /path/to/dataset/{class}/{image_num}.jpg
image = cv2.imread(imagePath)
label = imagePath.split(os.path.sep)[2]
# extract raw pixel intensity "features", followed by a color
# histogram to characterize the color distribution of the pixels
# in the image
pixels = image_to_feature_vector(image)
hist = extract_color_histogram(image)
# update the raw images, features, and labels matricies,
# respectively
rawImages.append(pixels)
features.append(hist)
labels.append(label)
# show an update every 1,000 images
if i > 0 and i % 1000 == 0 and info:
print("[INFO] processed {}/{}".format(i, len(imagePaths)))
# show some information on the memory consumed by the raw images
# matrix and features matrix
rawImages = np.array(rawImages)
features = np.array(features)
labels = np.array(labels)
if(info):
print("[INFO] pixels matrix: {:.2f}MB".format(
rawImages.nbytes / (1024 * 1000.0)))
print("[INFO] features matrix: {:.2f}MB".format(
features.nbytes / (1024 * 1000.0)))
(trainRI, testRI, trainRL, testRL) = train_test_split(
rawImages, labels, test_size=0, random_state=42)
(trainFeat, testFeat, trainLabels, testLabels) = train_test_split(
features, labels, test_size=0, random_state=42)
model_pxl.fit(trainRI, trainRL)
model_hst.fit(trainFeat, trainLabels)
def get_predict_proba(model, input):
prob = model.predict_proba(input)
label = model.predict(input)[0]
return {'label':label, '0':prob[0][0] ,'1':prob[0][1], '2': prob[0][2] }
def print_proba(ret, full=False):
if(full):
print("KNN")
print("\n PIXEL")
print("Probability:")
print("label 0: " + str(ret['pxl']['0']) )
print("label 1: " + str(ret['pxl']['1']))
print("label 2: " + str(ret['pxl']['2']))
print("image label:" + str(ret['pxl']['label']))
print("")
print("\n HISTOGRAM")
print("Probability:")
print("label 0: " + str(ret['hst']['0']) )
print("label 1: " + str(ret['hst']['1']))
print("label 2: " + str(ret['hst']['2']))
print("image label:" + str(ret['hst']['label']))
print("")
else:
print("KNN\n")
print("Label: " + str(ret['pxl']['label']) +
" prob:" + str(ret['pxl'][str(ret['pxl']['label'])]))
print("Label: " + str(ret['hst']['label']) +
" prob:" + str(ret['hst'][str(ret['hst']['label'])]))
def classify(img_path, imshow=False):
img = cv2.imread(img_path)
if(imshow):
cv2.imshow('image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
pxl = image_to_feature_vector(np.array(img)).reshape(1,-1)
hst = extract_color_histogram(np.array(img)).reshape(1,-1)
pxl_c = get_predict_proba(model_pxl, pxl)
hst_c = get_predict_proba(model_hst, hst)
return {'pxl':pxl_c, 'hst':hst_c } | gpl-3.0 |
Garrett-R/scikit-learn | sklearn/cluster/__init__.py | 19 | 1215 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import mean_shift, MeanShift, estimate_bandwidth, \
get_bin_seeds
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, Ward, WardAgglomeration,
AgglomerativeClustering, linkage_tree, FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'Ward',
'WardAgglomeration',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
vshtanko/scikit-learn | examples/neighbors/plot_classification.py | 287 | 1790 | """
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
| bsd-3-clause |
rs2/pandas | pandas/util/_print_versions.py | 2 | 4297 | import codecs
import json
import locale
import os
import platform
import struct
import sys
from typing import Dict, Optional, Union
from pandas._typing import JSONSerializable
from pandas.compat._optional import VERSIONS, _get_version, import_optional_dependency
def _get_commit_hash() -> Optional[str]:
"""
Use vendored versioneer code to get git hash, which handles
git worktree correctly.
"""
from pandas._version import get_versions
versions = get_versions()
return versions["full-revisionid"]
def _get_sys_info() -> Dict[str, JSONSerializable]:
"""
Returns system information as a JSON serializable dictionary.
"""
uname_result = platform.uname()
language_code, encoding = locale.getlocale()
return {
"commit": _get_commit_hash(),
"python": ".".join(str(i) for i in sys.version_info),
"python-bits": struct.calcsize("P") * 8,
"OS": uname_result.system,
"OS-release": uname_result.release,
"Version": uname_result.version,
"machine": uname_result.machine,
"processor": uname_result.processor,
"byteorder": sys.byteorder,
"LC_ALL": os.environ.get("LC_ALL"),
"LANG": os.environ.get("LANG"),
"LOCALE": {"language-code": language_code, "encoding": encoding},
}
def _get_dependency_info() -> Dict[str, JSONSerializable]:
"""
Returns dependency information as a JSON serializable dictionary.
"""
deps = [
"pandas",
# required
"numpy",
"pytz",
"dateutil",
# install / build,
"pip",
"setuptools",
"Cython",
# test
"pytest",
"hypothesis",
# docs
"sphinx",
# Other, need a min version
"blosc",
"feather",
"xlsxwriter",
"lxml.etree",
"html5lib",
"pymysql",
"psycopg2",
"jinja2",
# Other, not imported.
"IPython",
"pandas_datareader",
]
deps.extend(list(VERSIONS))
result: Dict[str, JSONSerializable] = {}
for modname in deps:
mod = import_optional_dependency(
modname, raise_on_missing=False, on_version="ignore"
)
result[modname] = _get_version(mod) if mod else None
return result
def show_versions(as_json: Union[str, bool] = False) -> None:
"""
Provide useful information, important for bug reports.
It comprises info about hosting operation system, pandas version,
and versions of other installed relative packages.
Parameters
----------
as_json : str or bool, default False
* If False, outputs info in a human readable form to the console.
* If str, it will be considered as a path to a file.
Info will be written to that file in JSON format.
* If True, outputs info in JSON format to the console.
"""
sys_info = _get_sys_info()
deps = _get_dependency_info()
if as_json:
j = dict(system=sys_info, dependencies=deps)
if as_json is True:
print(j)
else:
assert isinstance(as_json, str) # needed for mypy
with codecs.open(as_json, "wb", encoding="utf8") as f:
json.dump(j, f, indent=2)
else:
assert isinstance(sys_info["LOCALE"], dict) # needed for mypy
language_code = sys_info["LOCALE"]["language-code"]
encoding = sys_info["LOCALE"]["encoding"]
sys_info["LOCALE"] = f"{language_code}.{encoding}"
maxlen = max(len(x) for x in deps)
print("\nINSTALLED VERSIONS")
print("------------------")
for k, v in sys_info.items():
print(f"{k:<{maxlen}}: {v}")
print("")
for k, v in deps.items():
print(f"{k:<{maxlen}}: {v}")
def main() -> int:
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
"-j",
"--json",
metavar="FILE",
nargs=1,
help="Save output as JSON into file, pass in '-' to output to stdout",
)
(options, args) = parser.parse_args()
if options.json == "-":
options.json = True
show_versions(as_json=options.json)
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
beneckart/future-robotics | MindMachine/spectrastrobe_test.py | 1 | 3563 | import numpy as np
import time
import matplotlib.pyplot as plt
import pyaudio
import simpleaudio as sa
A_STROBE_FREQ = 19200
REF_TONE_FREQ = 18200
REF_PANNING_FREQ = 30.0
R_FREQ = 18700
G_FREQ = 19200
B_FREQ = 19700
BYTES_PER_SAMPLE = 2
A_FREQ = 440
C_SHARP_FREQ = A_FREQ * 2 ** (4 / 12)
E_FREQ = A_FREQ * 2 ** (7 / 12)
SAMPLE_RATE = 44100 # 44100 samples per second
MAX_INT16 = 2**15 - 1
L_IX = 0
R_IX = 1
N_CHANNELS = 2
MAX_VOL = 1.0
ROUNDING_PARAM_DEFAULT = 0.1
FLASHING_FREQ = 1
def get_empty_buffer(duration):
n_samples = int(SAMPLE_RATE * duration)
return np.zeros((n_samples, N_CHANNELS))
def t_buffer(duration):
n_samples = int(SAMPLE_RATE * duration)
return np.linspace(0, duration, n_samples, False)
def sine_wave(duration, frequency, phase = 0.0):
t = t_buffer(duration)
return np.sin(2 * np.pi * frequency * t + phase)
def rounded_square_wave(duration, frequency, phase = 0.0, rounding_param = ROUNDING_PARAM_DEFAULT):
t = t_buffer(duration)
return np.arctan(np.sin(2.0 * np.pi * t * frequency + phase) / rounding_param)
def sine_tone(frequency_l, frequency_r, vol_l, vol_r, duration):
tone = get_empty_buffer(duration)
tone[:,L_IX] = vol_l * sine_wave(duration, frequency_l)
tone[:,R_IX] = vol_r * sine_wave(duration, frequency_r)
return tone
def spectrastrobe_tone(duration, panning_freq = REF_PANNING_FREQ, tone_freq = REF_TONE_FREQ):
base_tone = sine_tone(tone_freq, tone_freq, MAX_VOL, MAX_VOL, duration)
# Create a square clipping function
clip_mask = get_empty_buffer(duration)
clip_mask[:,0] = np.maximum(0.0, rounded_square_wave(duration, panning_freq))
clip_mask[:,1] = np.maximum(0.0, rounded_square_wave(duration, panning_freq, np.pi))
return base_tone * clip_mask
def plot_tone(tone, xlim = (0.0, 2.0)):
n_samples = tone.shape[0]
t = t_buffer(n_samples / SAMPLE_RATE)
plt.plot(t, tone[:,L_IX], 'g-')
plt.plot(t, tone[:,R_IX], 'b-')
plt.xlim(xlim)
plt.show()
def play_tone(tone):
# Transform float to int16
audio = tone * MAX_INT16 / np.max(np.abs(tone))
audio = audio.astype(np.int16)
# Start playback
play_obj = sa.play_buffer(audio, N_CHANNELS, BYTES_PER_SAMPLE, SAMPLE_RATE)
# Wait for playback to finish before exiting
play_obj.wait_done()
duration = 10.0
#toneAE = sine_tone(A_FREQ, E_FREQ, MAX_VOL, MAX_VOL, 2)
ref_tone = spectrastrobe_tone(duration)
gb_tone = sine_tone(A_FREQ, E_FREQ, MAX_VOL, MAX_VOL, duration)
flashing_mask = sine_wave(duration, FLASHING_FREQ)**2
gb_tone[:,0] *= flashing_mask
gb_tone[:,1] *= flashing_mask
#plot_tone(ref_tone, (0.0, 0.5))
#plot_tone(gb_tone)
# play_tone(ref_tone + gb_tone)
# play_tone(gb_tone)
# time.sleep(2.0)
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
a_tone = sine_wave(duration, A_FREQ)
a_tone *= flashing_mask
tone = gb_tone
audio = tone * MAX_INT16 / np.max(np.abs(tone))
audio = audio.astype(np.int16)
i = 0
# define callback (2)
def callback(in_data, frame_count, time_info, status):
global i
audio_chunk = audio[i : i + frame_count]
i = i + frame_count
return (audio_chunk, pyaudio.paContinue)
# open stream using callback (3)
stream = p.open(format=pyaudio.paInt16,
channels=2,
rate=SAMPLE_RATE,
output=True,
stream_callback=callback)
# start the stream (4)
stream.start_stream()
# wait for stream to finish (5)
while stream.is_active():
time.sleep(0.1)
# stop stream (6)
stream.stop_stream()
stream.close()
# close PyAudio (7)
p.terminate()
| mit |
rseubert/scikit-learn | examples/classification/plot_classification_probability.py | 242 | 2624 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/linear_model/tests/test_coordinate_descent.py | 2 | 9888 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD Style.
import warnings
from sys import version_info
import numpy as np
from scipy import interpolate
from numpy.testing import assert_array_almost_equal, assert_almost_equal, \
assert_equal
from nose import SkipTest
from nose.tools import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet
from sklearn.linear_model import LassoLarsCV
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
"""Check that the lasso can handle zero data without crashing"""
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
"""
Test Lasso on a toy example for various values of alpha.
When validating this against glmnet notice that glmnet divides it
against nobs.
"""
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
"""
Test ElasticNet for various parameters of alpha and rho.
Actualy, the parameters alpha = 0 should not be alowed. However,
we test it as a border case.
ElasticNet is tested with and without precomputed Gram matrix
"""
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, rho=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, rho=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, rho=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_path():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.026, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.026, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)
) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(
mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(),
significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_enet_path():
X, y, X_test, y_test = build_dataset()
max_iter = 150
with warnings.catch_warnings():
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
warnings.simplefilter("ignore", UserWarning)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, rho=[0.9, 0.95], cv=3,
max_iter=max_iter)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.002, 2)
assert_equal(clf.rho_, 0.95)
clf = ElasticNetCV(n_alphas=5, eps=2e-3, rho=[0.9, 0.95], cv=3,
max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.002, 2)
assert_equal(clf.rho_, 0.95)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 50
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
rho=0.5)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.rho)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
# Test that explicit warm restart...
clf = ElasticNet(alpha=1.0, max_iter=50)
clf.fit(X, y)
clf2 = ElasticNet(alpha=0.1, max_iter=50)
clf2.fit(X, y, coef_init=clf.coef_.copy())
#... and implicit warm restart are equivalent.
clf3 = ElasticNet(alpha=1.0, max_iter=50, warm_start=True)
clf3.fit(X, y)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.1)
clf3.fit(X, y)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_lasso_alpha_warning():
check_warnings() # Skip if unsupported Python version
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
clf.fit(X, Y)
assert_greater(len(w), 0) # warnings should be raised
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
#Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap, eps = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_, estimator.eps_)
for k in xrange(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
assert_array_almost_equal(eps[k], estimator.eps_)
if __name__ == '__main__':
import nose
nose.runmodule()
| agpl-3.0 |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/neighbors/unsupervised.py | 11 | 4757 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth:`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params : dict, optional (default = None)
Additional keyword arguments for the metric function.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Affects only :meth:`k_neighbors` and :meth:`kneighbors_graph` methods.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> nbrs = neigh.radius_neighbors([[0, 0, 1.3]], 0.4, return_distance=False)
>>> np.asarray(nbrs[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs, **kwargs)
| mit |
PatrickChrist/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
mkuron/espresso | testsuite/scripts/importlib_wrapper.py | 1 | 12576 | # Copyright (C) 2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import os
import sys
import unittest
import importlib
import espressomd
from unittest.mock import MagicMock
def _id(x):
return x
# global variable: if one import failed, all subsequent imports will be skipped,
# see skip_future_imports_dependency()
skip_future_imports = False
def configure_and_import(filepath,
gpu=False,
substitutions=lambda x: x,
cmd_arguments=None,
script_suffix=None,
move_to_script_dir=True,
random_seeds=True,
mock_visualizers=True,
**parameters):
"""
Copy a Python script to a new location and alter some lines of code:
- change global variables and local variables (up to 1 indentation level)
- pass command line arguments during import to emulate shell execution
- disable the OpenGL/Mayavi modules if they are not compiled
- disable the matplotlib GUI using a text-based backend
- use random seeds for the RNG in NumPy and ESPResSo
- temporarily move to the directory where the script is located
Parameters
----------
filepath : str
python script to import
gpu : bool
whether GPU is necessary or not
substitutions function
custom text replacement operation (useful to edit out calls to the
OpenGL or Mayavi visualizers' ``run()`` method)
cmd_arguments : list
command line arguments, i.e. sys.argv without the script path
script_suffix : str
suffix to append to the configured script (useful when a single
module is being tested by multiple tests in parallel)
random_seeds : bool
if ``True``, use random seeds in RNGs
mock_visualizers : bool
if ``True``, substitute ES visualizers with `Mock()` classes in case
of `ImportError()` (use ``False`` if an `ImportError()` is relevant
to your test)
move_to_script_dir : bool
if ``True``, move to the script's directory (useful when the script
needs to load files hardcoded as relative paths, or when files are
generated and need cleanup); this is enabled by default
\*\*parameters :
global variables to replace
"""
if skip_future_imports:
module = MagicMock()
skipIfMissingImport = skip_future_imports_dependency(filepath)
return module, skipIfMissingImport
if gpu and not espressomd.gpu_available():
skip_future_imports_dependency(filepath)
skipIfMissingGPU = unittest.skip("gpu not available, skipping test!")
module = MagicMock()
return module, skipIfMissingGPU
filepath = os.path.abspath(filepath)
# load original script
# read in binary mode, then decode as UTF-8 to avoid this python3.5 error:
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 915:
# ordinal not in range(128)
with open(filepath, "rb") as f:
code = f.read().decode(encoding="utf-8")
# custom substitutions
code = substitutions(code)
assert code.strip()
# substitute global variables
code = substitute_variable_values(code, **parameters)
# substitute command line arguments
if cmd_arguments is not None:
code, old_sys_argv = set_cmd(code, filepath, cmd_arguments)
# disable matplotlib GUI using the Agg backend
code = disable_matplotlib_gui(code)
# disable OpenGL/Mayavi GUI using MagicMock()
if mock_visualizers:
code = mock_es_visualization(code)
# use random seeds for ES and NumPy RNGs
if random_seeds:
code = set_random_seeds(code)
# save changes to a new file
if script_suffix:
if script_suffix[0] != "_":
script_suffix = "_" + script_suffix
else:
script_suffix = ""
script_suffix += "_processed.py"
output_filepath = os.path.splitext(filepath)[0] + script_suffix
assert os.path.isfile(output_filepath) is False, \
"File {} already processed, cannot overwrite".format(output_filepath)
with open(output_filepath, "wb") as f:
f.write(code.encode(encoding="utf-8"))
# import
dirname, basename = os.path.split(output_filepath)
if move_to_script_dir:
os.chdir(dirname)
sys.path.insert(0, dirname)
module_name = os.path.splitext(basename)[0]
try:
module = importlib.import_module(module_name)
except espressomd.FeaturesError as err:
skip_future_imports_dependency(filepath)
skipIfMissingFeatures = unittest.skip(str(err) + ", skipping test!")
module = MagicMock()
else:
skipIfMissingFeatures = _id
if cmd_arguments is not None:
# restore original command line arguments
sys.argv = old_sys_argv
return module, skipIfMissingFeatures
def set_cmd(code, filepath, cmd_arguments):
assert isinstance(cmd_arguments, list) \
or isinstance(cmd_arguments, tuple)
sys_argv = list(map(str, cmd_arguments))
sys_argv.insert(0, os.path.basename(filepath))
re_import_sys = re.compile("^import[\t\ ]+sys[\t\ ]*$", re.M)
re_import_argparse = re.compile("^import[\t\ ]+argparse[\t\ ]*$", re.M)
if re_import_sys.search(code) is not None:
code = re_import_sys.sub("\g<0>\nsys.argv = " + str(sys_argv), code, 1)
elif re_import_argparse.search(code) is not None:
code = re_import_argparse.sub("\g<0>\nimport sys\nsys.argv = "
+ str(sys_argv), code, 1)
else:
raise AssertionError("module sys (or argparse) is not imported")
old_sys_argv = list(sys.argv)
return code, old_sys_argv
def substitute_variable_values(code, strings_as_is=False, keep_original=True,
**parameters):
"""
Substitute values of global variables.
Parameters
----------
code : str
Source code to edit.
strings_as_is : bool
If ``True``, consider all values in \*\*parameters are strings and
substitute them in-place without formatting by ``repr()``.
keep_original : bool
Keep the original value (e.g. ``N = 10; _N__original = 1000``), helps
with debugging. If ``False``, make sure the original value is not a
multiline statement, because removing its first line would lead to
a syntax error.
\*\*parameters :
Variable names and their new value.
"""
for variable, value in parameters.items():
assert variable in code, "variable {} not found".format(variable)
re_var = re.compile("^(\t|\ {,4})(" + variable + ")(?= *=[^=])", re.M)
assert re_var.search(code) is not None, \
"variable {} has no assignment".format(variable)
val = strings_as_is and value or repr(value)
code = re_var.sub(r"\g<1>\g<2> = " + val + r"; _\g<2>__original", code)
if not keep_original:
code = re.sub(r"; _" + variable + "__original.+", "", code)
return code
def set_random_seeds(code):
# delete explicit ESPResSo seed
aliases = re.findall(r"([^\s;]+) *= *(?:espressomd\.)?System *\(", code)
pattern = r"(?<=[\s;]){}\.(?:seed|random_number_generator_state)(?= *=[^=])"
subst = "{}.set_random_state_PRNG(); _random_seed_es__original"
for varname in set(aliases):
code = re.sub(pattern.format(varname), subst.format(varname), code)
# delete explicit NumPy seed
code = re.sub(r"(?<=[\s;])(?:numpy|np)\.random\.seed *(?=\()",
"_random_seed_np = (lambda *args, **kwargs: None)", code)
return code
def disable_matplotlib_gui(code):
"""
Use the matplotlib Agg backend (no GUI).
"""
# find under which name matplotlib was imported
re_mpl_aliases = [
re.compile(r"^[\t\ ]*import[\t\ ]+(matplotlib)[\t\ ]*$", re.M),
re.compile(r"^[\t\ ]*import[\t\ ]+matplotlib[\t\ ]+as[\t\ ]+([^\s;]+)",
re.M)]
aliases = set(x for re_mpl in re_mpl_aliases for x in re_mpl.findall(code))
# remove any custom backend
for alias in aliases:
code = re.sub(r"^[\t\ ]*" + alias + r"\.use\(([\"']+).+?\1[\t\ ]*\)",
"", code, 0, re.M)
# use the Agg backend
code = re.sub(r"^([\t\ ]*)(?=(?:from|import)[\t\ ]+matplotlib[\.\s])",
r"\g<1>import matplotlib as _mpl;_mpl.use('Agg');",
code, 1, re.M)
return code
def mock_es_visualization(code):
"""
Replace `import espressomd.visualization_<backend>` by a `MagicMock()` when
the visualization module is not installed, by catching the `ImportError()`
exception. Please note that `espressomd.visualization` is deferring the
exception, thus requiring additional checks. Import aliases are supported,
however please don't use `from espressomd.visualization import *` because
it hides the namespace of classes to be mocked.
"""
# consider all legal import statements in Python3
# (the ordering follows regex precedence rules)
re_es_vis_import = re.compile(r"""
^from\ espressomd\ import\ (?:visualization(?:_opengl|_mayavi)?)\ as\ (\S+)
|^from\ espressomd\ import\ (visualization(?:_opengl|_mayavi)?)
|^from\ espressomd\.visualization(?:_opengl|_mayavi)?\ import\ ([^\n]+)
|^import\ espressomd\.visualization(?:_opengl|_mayavi)?\ as\ (\S+)
|^import\ (espressomd\.visualization(?:_opengl|_mayavi)?)
""".replace(r"\ ", r"[\t\ ]+"), re.VERBOSE | re.M)
# replacement template
r_es_vis_mock = r"""
try:
{0}{1}
except ImportError:
from unittest.mock import MagicMock
import espressomd
{2} = MagicMock()
""".lstrip()
# cannot handle "from espressomd.visualization import *"
re_es_vis_import_namespace = re.compile(
r"^from\ espressomd\.visualization(?:_opengl|_mayavi)?\ import\ \*"
.replace(r"\ ", r"[\t\ ]+"), re.M)
m = re_es_vis_import_namespace.search(code)
assert m is None, "cannot use MagicMock() at line '" + m.group(0) + "'"
def check_for_deferred_ImportError(line, alias):
if "_opengl" not in line and "_mayavi" not in line:
if "openGLLive" in line or "mayaviLive" in line:
return """
if hasattr({0}, 'deferred_ImportError'):
raise {0}.deferred_ImportError""".format(alias)
else:
return """
if hasattr({0}.mayaviLive, 'deferred_ImportError') or \\
hasattr({0}.openGLLive, 'deferred_ImportError'):
raise ImportError()""".format(alias)
else:
return ""
def substitution_es_vis_import(m):
aliases = [x for x in m.groups() if x is not None][0].split(',')
guards = []
for alias in aliases:
line = m.group(0)
if len(aliases) >= 2 and 'from espressomd.visualization' in line:
line = line.split('import')[0] + 'import ' + alias.strip()
if ' as ' in alias:
alias = alias.split(' as ')[1]
alias = alias.strip()
checks = check_for_deferred_ImportError(line, alias)
s = r_es_vis_mock.format(line, checks, alias)
guards.append(s)
return '\n'.join(guards)
# handle deferred ImportError
code = re_es_vis_import.sub(substitution_es_vis_import, code)
return code
def skip_future_imports_dependency(filepath):
"""
If an import failed, all subsequent imports will be skipped. The
fixture message provides the name of the module that failed.
"""
global skip_future_imports
if not skip_future_imports:
module_name = os.path.splitext(os.path.basename(filepath))[0]
assert module_name != ""
skip_future_imports = module_name
return unittest.skip("failed to import {}, skipping test!"
.format(skip_future_imports))
| gpl-3.0 |
epierson9/multiphenotype_methods | dimreducer.py | 1 | 15240 | import numpy as np
import scipy.linalg as slin
from multiphenotype_utils import get_continuous_features_as_matrix, assert_zero_mean, add_id, remove_id_and_get_mat, make_age_bins, compute_column_means_with_incomplete_data, compute_correlation_matrix_with_incomplete_data, partition_dataframe_into_binary_and_continuous, divide_idxs_into_batches,cluster_and_plot_correlation_matrix
from IPython import embed
from sklearn.linear_model import LinearRegression, LogisticRegression
import sklearn.decomposition as decomp
import pandas as pd
from sklearn.covariance import EmpiricalCovariance
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow as tf
import time, random, os
from scipy.special import expit
"""
This file contains classes to compute multi-phenotypes.
"""
class DimReducer(object):
"""
Base class.
"""
def __init__(self, **init_kwargs):
pass
def data_preprocessing_function(self, df):
"""
This function is applied to dataframes prior to applying fit or get_projections.
In general, it converts a dataframe to a matrix.
"""
print("Extracting continuous features as matrix.")
X, cols = get_continuous_features_as_matrix(df, return_cols = True)
self.feature_names = cols
return X
def fit(self, df, **fit_kwargs):
"""
fit a model using df.
"""
print("Fitting model using method %s." % self.__class__.__name__)
X = self.data_preprocessing_function(df)
if self.need_ages:
ages = df.loc[:, 'age_sex___age']
self._fit_from_processed_data(X, ages, **fit_kwargs)
else:
self._fit_from_processed_data(X, **fit_kwargs)
def get_projections(self, df, **projection_kwargs):
"""
use the fitted model to get projections for df.
"""
print("Getting projections using method %s." % self.__class__.__name__)
X = self.data_preprocessing_function(df)
Z = self._get_projections_from_processed_data(X, **projection_kwargs)
Z_df = add_id(Z, df) # Z_df will have the same index and individual id as df.
Z_df.columns = ['individual_id'] + ['z%s' % i for i in range(Z.shape[1])]
return Z_df
def reconstruct_data(self, Z_df):
raise NotImplementedError
def _fit_from_processed_data(self, X):
raise NotImplementedError
def _get_projections_from_processed_data(self, X):
raise NotImplementedError
class LinearDimReducer(DimReducer):
"""
Inherits from DimReducer: this is for the special case where we get directions and want to
compute projections on those directions.
"""
def __init__(self, k, plot_correlation_matrix = True):
self.k = k
self.need_ages = False
self.plot_correlation_matrix = plot_correlation_matrix
def data_preprocessing_function(self, df):
print("Extracting continuous features as matrix and zero-meaning.")
X, cols = get_continuous_features_as_matrix(df, return_cols = True)
self.feature_names = cols
X = X - compute_column_means_with_incomplete_data(X)
return X
def _get_projections_from_processed_data(self, X, project_onto_mean=None):
"""
U is a d x k matrix where k is the number of eigenvectors
Returns n x k matrix of projections
project_onto_mean is a dummy variable that's needed for compatability with stochastic models
"""
assert(X.shape[1] == self.U.shape[0])
assert(self.U.shape[1] == self.k)
assert(X.shape[1] == len(self.feature_names))
return X.dot(self.U)
def get_loading_matrix(self):
"""
Special method for this subclass: returns a dataframe L where L_ij is the loading of the ith feature, jth component.
index is feature names, column names are Z0, ... Z(k-1).
"""
loadings_df = pd.DataFrame(self.U)
loadings_df.columns = ['Z%i' % i for i in range(self.k)]
loadings_df.index = self.feature_names
return(loadings_df)
def get_sorted_loadings(self, z_idx):
"""
For a given z_idx, prints out the features contributing to that z in sorted order.
"""
u = self.U[:, z_idx]
sort_index = np.argsort(u)
u_sorted = u[sort_index]
feature_names_sorted = np.array(self.feature_names)[sort_index]
for feature_name, coef in zip(feature_names_sorted, u_sorted):
print("%6.3f %s" % (coef, feature_name))
return feature_names_sorted, u_sorted
def reconstruct_data(self, Z_df):
"""
Input: n x (k+1) data frame with ID column and k latent components
Output: n x (d+1) data frame with ID column and data projected into the original (post-processed) space
"""
Z = remove_id_and_get_mat(Z_df)
X = Z.dot(self.U.T)
df = add_id(Z=X, df_with_id=Z_df)
df.columns = ['individual_id'] + self.feature_names
return df
class PCA(LinearDimReducer):
def _fit_from_processed_data(self, X):
if np.isnan(X).sum() > 0:
print("Warning: X contains fraction %2.3f missing entries. Fitting PCA with incomplete data." % np.isnan(X).mean())
fit_with_incomplete_data = True
else:
fit_with_incomplete_data = False
if fit_with_incomplete_data:
X_zeroed = X - compute_column_means_with_incomplete_data(X)
cov, _ = compute_correlation_matrix_with_incomplete_data(X, correlation_type = 'covariance')
else:
X_zeroed = X - np.mean(X, axis=0)
cov = X_zeroed.T.dot(X_zeroed) / X_zeroed.shape[0]
if self.plot_correlation_matrix:
cluster_and_plot_correlation_matrix(cov, column_names = self.feature_names, how_to_sort = 'hierarchical')
s, U = np.linalg.eig(cov) # Returns eigenvalues s and eigenvectors U
idx = np.argsort(s)[::-1]
s = s[idx]
U = U[:, idx]
U = U[:, :self.k]
print('Distribution of eigenvalues:')
sns.distplot(s)
plt.show()
print('Taking eigenvalues: %s' % s[:self.k])
print('Total sum of eigenvalues : %.3f' % np.sum(s))
print('Total sum of eigenvalues taken : %.3f' % np.sum(s[:self.k]))
print('Total sum of eigenvalues not taken: %.3f' % np.sum(s[self.k:]))
self.U = U
self.s = s
class CPCA(LinearDimReducer):
"""
Requires dataframes passed in to have a column foreground and a column background.
"""
def __init__(self, k, alpha, make_diagnostic_plots=True):
self.k = k
self.alpha = alpha
self.need_ages = False
self.make_diagnostic_plots = make_diagnostic_plots
def _fit_from_processed_data(self, X, foreground, background, take_abs):
# Must pass in matrix X with a boolean column foreground and a boolean column background.
# Require both columns in case they are not mutually exhaustive (ie, there are some rows we don't want to use at all).
# Stores U = d x k matrix of k eigenvectors where U[:, 0] is first eigenvector
# and s = vector of eigenvalues
# take_abs is a boolean that determines whether we take the top k eigenvalues
# by absolute or signed value.
if np.isnan(X).sum() > 0:
print("Warning: X contains fraction %2.3f missing entries. Fitting CPCA with incomplete data." % np.isnan(X).mean())
fit_with_incomplete_data = True
else:
fit_with_incomplete_data = False
fg_mat = X[foreground,:]
bg_mat = X[background,:]
if fit_with_incomplete_data:
fg_mat = fg_mat - compute_column_means_with_incomplete_data(fg_mat)
bg_mat = bg_mat - compute_column_means_with_incomplete_data(bg_mat)
fg_cov, _ = compute_correlation_matrix_with_incomplete_data(fg_mat, correlation_type = 'covariance')
bg_cov, _ = compute_correlation_matrix_with_incomplete_data(bg_mat, correlation_type = 'covariance')
else:
fg_mat = fg_mat - np.mean(fg_mat, axis=0)
bg_mat = bg_mat - np.mean(bg_mat, axis=0)
fg_cov = fg_mat.T.dot(fg_mat) / fg_mat.shape[0]
bg_cov = bg_mat.T.dot(bg_mat) / bg_mat.shape[0]
assert fg_mat.shape[1] == bg_mat.shape[1]
diff_cov = fg_cov - self.alpha * bg_cov
if self.make_diagnostic_plots:
cluster_and_plot_correlation_matrix(diff_cov, column_names = self.feature_names, how_to_sort = 'hierarchical')
s, U = np.linalg.eig(diff_cov) # Returns eigenvalues s and eigenvectors U
if take_abs:
idx = np.argsort(np.abs(s))[::-1]
else:
idx = np.argsort(s)[::-1]
s = s[idx]
U = U[:, idx]
U = U[:, :self.k]
if self.make_diagnostic_plots:
print('Distribution of eigenvalues:')
sns.distplot(s)
plt.show()
print('Taking eigenvalues: %s' % s[:self.k])
print('Total sum of eigenvalues : %.3f' % np.sum(s))
print('Total sum of eigenvalues taken : %.3f' % np.sum(s[:self.k]))
print('Total sum of eigenvalues not taken: %.3f' % np.sum(s[self.k:]))
self.U = U
self.s = s
class TibshiraniMixedCriterion(LinearDimReducer):
"""
6.4 in https://web.stanford.edu/~hastie/Papers/spca_JASA.pdf
Compromise criterion: explain variance in X while also correlating with an external variable.
While we pass in age, this can also be used for eg a genetic matrix.
"""
def __init__(self, k, age_weighting):
self.k = k
self.age_weighting = age_weighting
assert(self.age_weighting >= 0)
assert(self.age_weighting <= 1)
self.need_ages = True
def _fit_from_processed_data(self, X, ages):
y = np.array(ages).reshape([len(ages), 1])
y = y / np.linalg.norm(y)
top_block = np.sqrt(1 - self.age_weighting) * X
bottom_block = np.sqrt(self.age_weighting) * (y.T).dot(X)
X_a = np.vstack([top_block, bottom_block])
u, s, v = np.linalg.svd(X_a, full_matrices = 0)
self.U = v[:self.k,].transpose()
for i in range(self.k):
assert(np.abs(np.linalg.norm(self.U[:, i]) - 1) < 1e-8)
class LinearAgePredictor(LinearDimReducer):
"""
Does a linear regression of age on phenotypes.
"""
def __init__(self):
self.k = 1
self.need_ages = True
def data_preprocessing_function(self, df):
X, self.feature_names = get_matrix_for_age_prediction(df, return_cols = True)
return X
def _fit_from_processed_data(self, X, ages):
self.linear_regression_model = LinearRegression(fit_intercept = True)
self.linear_regression_model.fit(X, ages)
self.U = self.linear_regression_model.coef_.reshape([-1, 1])
def _get_projections_from_processed_data(self, X):
return self.linear_regression_model.predict(X).reshape([len(X), 1])
class NeuralNetAgePredictor(DimReducer):
"""
Uses a neural net to predict age given phenotypes.
"""
def __init__(self, n_units_per_hidden_layer = 30, n_hidden_layers = 3):
self.n_units_per_hidden_layer = n_units_per_hidden_layer
self.n_hidden_layers = n_hidden_layers
self.max_epochs = 100
self.train_set_frac = .9
tf.logging.set_verbosity(tf.logging.INFO) # lots of annoying messages but this prints out loss.
self.k = 1
self.need_ages = True
def data_preprocessing_function(self, df):
X, self.feature_names = get_matrix_for_age_prediction(df, return_cols = True)
return X
def _fit_from_processed_data(self, X, ages):
t0 = time.time()
Y = np.array(ages)
feature_columns = [tf.feature_column.numeric_column('x', shape=np.array(X).shape[1:])]
hidden_unit_layers = [self.n_units_per_hidden_layer for layer in range(self.n_hidden_layers)]
# save checkpoints in a scratch dir so they don't fill up the disk.
tf_model_dir = '/scratch/tensorflow_model_checkpoints/'
os.system('rm -rf %s' % tf_model_dir)
self.model = tf.contrib.learn.DNNRegressor(feature_columns = feature_columns,
hidden_units = hidden_unit_layers,
model_dir = tf_model_dir,
optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=3))
# Train.
train_idxs = np.random.random(X.shape[0]) < self.train_set_frac # need a validation set to assess whether loss is improving.
train_input_fn = tf.estimator.inputs.numpy_input_fn(x={'x': X[train_idxs,]}, y=Y[train_idxs], batch_size = 100, num_epochs = self.max_epochs, shuffle = True)
validation_input_fn = tf.estimator.inputs.numpy_input_fn(x={'x': X[~train_idxs,]}, y=Y[~train_idxs], batch_size = 100, shuffle = False, num_epochs = 1)
validation_monitor = tf.contrib.learn.monitors.ValidationMonitor(input_fn = validation_input_fn, every_n_steps = 1000) # this doesn't actually stop us early; it just prints out a validation loss so we can make sure we're not undertraining.
self.model.fit(input_fn = train_input_fn, monitors = [validation_monitor])
print("Total time to train: %2.3f seconds" % (time.time() - t0))
def _get_projections_from_processed_data(self, X):
test_input_fn = tf.estimator.inputs.numpy_input_fn(x={'x': X}, y = None, batch_size=100, num_epochs = 1, shuffle=False)
predictions = self.model.predict_scores(input_fn = test_input_fn)
y_predicted = np.array([a for a in predictions])
return y_predicted.reshape([len(y_predicted), 1])
class MahalanobisDistance(DimReducer):
"""
Computes a person's Mahalanobis distance
using the mean and covariance estimated from a set of young people.
Uses sklearn; verified this matches up with the normal matrix computation.
"""
def __init__(self, age_lower, age_upper):
self.age_lower = age_lower
self.age_upper = age_upper
self.need_ages = True
self.k = 1
def _fit_from_processed_data(self, X, ages):
young_people = (ages >= self.age_lower) & (ages <= self.age_upper)
print("%i people between %s and %s used for mean/cov calculation" % (
young_people.sum(),
self.age_lower,
self.age_upper))
assert young_people.sum() > 1000
self.model = EmpiricalCovariance(assume_centered=False)
self.model.fit(X[young_people, :])
def _get_projections_from_processed_data(self, X):
md = np.sqrt(self.model.mahalanobis(X)).reshape([-1, 1])
return md
| mit |
gotomypc/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
Infatum/neural-networks | src/binary_classifier.py | 1 | 3465 | import time
import data_resolver
import feed_forward_neural_network as DNN
from enum import Enum
import matplotlib.pyplot as plt
import numpy as np
class Model_Type(Enum):
Logistic_Regression = 0,
DNN = 1,
CNN = 2,
class Binary_Classifier:
def __init__(self, layers_dimensions, model_type=Model_Type.Logistic_Regression, number_of_iterations=3000, learning_rate=0.0075, print_cost=True):
self._costs = []
self._model_type = model_type
self._learning_rate = learning_rate
self._numb_of_iter = number_of_iterations
self._print_cost = print_cost
self._model = self.init_model(layers_dimensions, Model_Type.DNN)
@property
def model(self):
return self._model
def init_model(self, layers_dims=0, model_type=Model_Type.Logistic_Regression):
"""
Initialize predictive model for a binary classification problem
:param layers_dims: -- layers structure(list that contains values with numbers of neurons on each layer)
:param model_type: -- type of a predictive model
:return:
"""
model = None
if model_type == Model_Type.DNN:
if layers_dims != 0 and len(layers_dims) > 0:
model = DNN.Feed_Forward_Neural_Network(layers_dims)
else:
raise ValueError('Please, provide a list with layers dimensions')
else:
raise NotImplemented('Haven''t implemented yet, sorry folks')
return model
def train_model(self, data_manager):
for i in range(0, self._numb_of_iter):
model_output = self._model.forward_propagation(data_manager.train_image_data)
cost = self._model.compute_cost(model_output, data_manager.train_label_data)
grads = self._model.backward_propagation(data_manager.train_label_data)
self._model.update_parameters(grads, self._learning_rate)
# Print the cost every 100 training example
if self._print_cost and i % 100 == 0:
print("Cost after iteration %i: %f" % (i, cost))
if self._print_cost and i % 100 == 0:
self._costs.append(cost)
plt.plot(np.squeeze(self._costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title('Learning rate =' + str(self._learning_rate))
def predict(self, data_resolver, dataset_type='train', print_results=True):
if dataset_type == 'train':
predictions, accuracy = self._model.predict(data_resolver.train_image_data, data_resolver.train_label_data)
if print_results:
print('Train accuracy: ', accuracy)
elif dataset_type == 'test':
predictions, accuracy = self._model.predict(data_resolver.test_image_data, data_resolver.test_label_data)
if print_results:
print('Test accuracy: ', accuracy)
else:
raise NotImplemented('No development set available now. Please enter correct values: train or test')
return predictions, accuracy
def main():
bin_classifier = Binary_Classifier((12288, 20, 7, 5, 1), Model_Type.DNN, 2500, print_cost=True)
data_manager = data_resolver.Data_Resolver(True)
bin_classifier.train_model(data_manager)
bin_classifier.predict(data_manager, 'train', True)
bin_classifier.predict(data_manager, 'test', True)
plt.show()
if __name__ == '__main__':
main() | mit |
harterj/moose | modules/stochastic_tools/examples/batch/execute.py | 9 | 2610 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from __future__ import print_function
import time
import argparse
import pandas
import matplotlib.pyplot as plt
import multiprocessing
import mooseutils
def runner(infile, outfile, n_start, n_stop, mode, mpi):
data = dict(n_samples=[], total=[], per_proc=[], max_proc=[], time=[])
exe = mooseutils.find_moose_executable_recursive()
for n in range(n_start, n_stop+1):
n_samples = 2**n
exe_args = ['-i', infile, 'Samplers/mc/num_rows={}'.format(n_samples),
'MultiApps/runner/mode={}'.format(mode),
'Outputs/file_base={}'.format(mode)]
print('{} {}'.format(exe, ' '.join(exe_args)))
t = time.time()
out = mooseutils.run_executable(exe, *exe_args, mpi=mpi, suppress_output=True)
t = time.time() - t
local = pandas.read_csv('{}.csv'.format(mode))
data['n_samples'].append(n_samples)
data['total'].append(local['total'].iloc[-1])
data['per_proc'].append(local['per_proc'].iloc[-1])
data['max_proc'].append(local['max_proc'].iloc[-1])
data['time'].append(t)
df = pandas.DataFrame(data, columns=['n_samples', 'total', 'per_proc', 'max_proc', 'time'])
df.to_csv('{}_{}.csv'.format(outfile, mode), index=False)
def execute(infile, outfile, n_start, n_stop, modes, mpi=None):
"""Run input for memory data"""
if mpi:
for mode in modes:
runner(infile, outfile, n_start, n_stop, mode, mpi)
else:
jobs = []
for mode in modes:
p = multiprocessing.Process(target=runner, args=(infile, outfile, n_start, n_stop, mode, mpi))
p.start()
jobs.append(p)
for job in jobs:
job.join()
if __name__ == '__main__':
# This took about 8 hours on:
# Mac Pro (2019)
# 2.5 GHz 28-Core Intel Xeon W
# 240 GB 2933 MHz DDR4
execute('full_solve.i', 'full_solve_memory_serial', 0, 10, ['normal', 'batch-reset', 'batch-restore'])
execute('full_solve.i', 'full_solve_memory_mpi', 5, 16, ['normal', 'batch-reset', 'batch-restore'], 28)
execute('transient.i', 'transient_memory_serial', 0, 10, ['normal', 'batch-restore'])
execute('transient.i', 'transient_memory_mpi', 5, 16, ['normal', 'batch-restore'], 28)
| lgpl-2.1 |
TomAugspurger/pandas | pandas/tests/series/test_timeseries.py | 2 | 4799 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, date_range, timedelta_range
import pandas._testing as tm
class TestTimeSeries:
def test_timeseries_coercion(self):
idx = tm.makeDateIndex(10000)
ser = Series(np.random.randn(len(idx)), idx.astype(object))
assert ser.index.is_all_dates
assert isinstance(ser.index, DatetimeIndex)
def test_contiguous_boolean_preserve_freq(self):
rng = date_range("1/1/2000", "3/1/2000", freq="B")
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
assert expected.freq == rng.freq
tm.assert_index_equal(masked, expected)
mask[22] = True
masked = rng[mask]
assert masked.freq is None
def test_promote_datetime_date(self):
rng = date_range("1/1/2000", periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq("4H", method="ffill")
expected = ts[5:].asfreq("4H", method="ffill")
tm.assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
tm.assert_numpy_array_equal(result, expected)
def test_groupby_count_dateparseerror(self):
dr = date_range(start="1/1/2012", freq="5min", periods=10)
# BAD Example, datetimes first
s = Series(np.arange(10), index=[dr, np.arange(10)])
grouped = s.groupby(lambda x: x[1] % 2 == 0)
result = grouped.count()
s = Series(np.arange(10), index=[np.arange(10), dr])
grouped = s.groupby(lambda x: x[0] % 2 == 0)
expected = grouped.count()
tm.assert_series_equal(result, expected)
def test_series_map_box_timedelta(self):
# GH 11349
s = Series(timedelta_range("1 day 1 s", periods=5, freq="h"))
def f(x):
return x.total_seconds()
s.map(f)
s.apply(f)
DataFrame(s).applymap(f)
def test_asfreq_resample_set_correct_freq(self):
# GH5613
# we test if .asfreq() and .resample() set the correct value for .freq
df = pd.DataFrame(
{"date": ["2012-01-01", "2012-01-02", "2012-01-03"], "col": [1, 2, 3]}
)
df = df.set_index(pd.to_datetime(df.date))
# testing the settings before calling .asfreq() and .resample()
assert df.index.freq is None
assert df.index.inferred_freq == "D"
# does .asfreq() set .freq correctly?
assert df.asfreq("D").index.freq == "D"
# does .resample() set .freq correctly?
assert df.resample("D").asfreq().index.freq == "D"
def test_view_tz(self):
# GH#24024
ser = pd.Series(pd.date_range("2000", periods=4, tz="US/Central"))
result = ser.view("i8")
expected = pd.Series(
[
946706400000000000,
946792800000000000,
946879200000000000,
946965600000000000,
]
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "US/Central"])
def test_asarray_object_dt64(self, tz):
ser = pd.Series(pd.date_range("2000", periods=2, tz=tz))
with tm.assert_produces_warning(None):
# Future behavior (for tzaware case) with no warning
result = np.asarray(ser, dtype=object)
expected = np.array(
[pd.Timestamp("2000-01-01", tz=tz), pd.Timestamp("2000-01-02", tz=tz)]
)
tm.assert_numpy_array_equal(result, expected)
def test_asarray_tz_naive(self):
# This shouldn't produce a warning.
ser = pd.Series(pd.date_range("2000", periods=2))
expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")
result = np.asarray(ser)
tm.assert_numpy_array_equal(result, expected)
def test_asarray_tz_aware(self):
tz = "US/Central"
ser = pd.Series(pd.date_range("2000", periods=2, tz=tz))
expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]")
result = np.asarray(ser, dtype="datetime64[ns]")
tm.assert_numpy_array_equal(result, expected)
# Old behavior with no warning
result = np.asarray(ser, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/backends/tkagg.py | 1 | 1026 |
from matplotlib.backends import _tkagg
import tkinter as Tk
def blit(photoimage, aggimage, bbox=None, colormode=1):
tk = photoimage.tk
if bbox is not None:
bbox_array = bbox.__array__()
else:
bbox_array = None
try:
tk.call("PyAggImagePhoto", photoimage, id(aggimage), colormode, id(bbox_array))
except Tk.TclError:
try:
try:
_tkagg.tkinit(tk.interpaddr(), 1)
except AttributeError:
_tkagg.tkinit(id(tk), 0)
tk.call("PyAggImagePhoto", photoimage, id(aggimage), colormode, id(bbox_array))
except (ImportError, AttributeError, Tk.TclError):
raise
def test(aggimage):
import time
r = Tk.Tk()
c = Tk.Canvas(r, width=aggimage.width, height=aggimage.height)
c.pack()
p = Tk.PhotoImage(width=aggimage.width, height=aggimage.height)
blit(p, aggimage)
c.create_image(aggimage.width,aggimage.height,image=p)
blit(p, aggimage)
while 1: r.update_idletasks()
| gpl-3.0 |
trichter/yam | setup.py | 1 | 1890 | # Copyright 2017-2018 Tom Eulenfeld, GPLv3
import os.path
import re
from setuptools import find_packages, setup
def find_version(*paths):
fname = os.path.join(os.path.dirname(__file__), *paths)
with open(fname) as fp:
code = fp.read()
match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", code, re.M)
if match:
return match.group(1)
raise RuntimeError("Unable to find version string.")
VERSION = find_version('yam', '__init__.py')
DESCRIPTION = (
'Yet another monitoring tool using correlations of '
'ambient noise (seismology)')
LONG_DESCRIPTION = (
'Please look at the project site for tutorials and information.')
ENTRY_POINTS = {
'console_scripts': ['yam-runtests = yam.tests:run',
'yam = yam.main:run_cmdline']}
REQUIRES = ['h5py', 'matplotlib', 'numpy', 'obspy>=1.1', 'obspyh5>=0.3',
'scipy>=0.18', 'setuptools', 'tqdm']
CLASSIFIERS = [
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Physics'
]
setup(name='yam',
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url='https://github.com/trichter/yam',
author='Tom Eulenfeld',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
package_dir={'yam': 'yam'},
install_requires=REQUIRES,
entry_points=ENTRY_POINTS,
include_package_data=True,
zip_safe=False,
classifiers=CLASSIFIERS
)
| mit |
MJuddBooth/pandas | pandas/tests/series/test_replace.py | 2 | 9970 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pytest
import pandas as pd
import pandas.util.testing as tm
from .common import TestData
class TestSeriesReplace(TestData):
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
ser.replace([np.nan], -1, inplace=True)
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0., np.nan)
ser[ser == 0.] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, 'foo', 'bar', np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
ser = pd.Series(self.ts.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# make sure that we aren't just masking a TypeError because bools don't
# implement indexing
with pytest.raises(TypeError, match='Cannot compare types .+'):
ser.replace([1, 2], [np.nan, 0])
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
# GH 5797
ser = pd.Series(pd.date_range('20130101', periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp('20120101')
result = ser.replace({pd.Timestamp('20130103'):
pd.Timestamp('20120101')})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp('20130103'),
pd.Timestamp('20120101'))
tm.assert_series_equal(result, expected)
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp('2015/01/01', tz='UTC')
s = pd.Series([pd.NaT, pd.Timestamp('2015/01/01', tz='UTC')])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
s.replace([1, 2, 3], inplace=True)
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (r"Invalid fill method\. Expecting pad \(ffill\) or backfill"
r" \(bfill\)\. Got crash_cymbal")
with pytest.raises(ValueError, match=msg):
s.replace([1, 2, 3], inplace=True, method='crash_cymbal')
tm.assert_series_equal(s, ser)
def test_replace_with_empty_list(self):
# GH 21977
s = pd.Series([[1], [2, 3], [], np.nan, [4]])
expected = s
result = s.replace([], np.nan)
tm.assert_series_equal(result, expected)
# GH 19266
with pytest.raises(ValueError, match="cannot assign mismatch"):
s.replace({np.nan: []})
with pytest.raises(ValueError, match="cannot assign mismatch"):
s.replace({np.nan: ['dummy', 'alt']})
def test_replace_mixed_types(self):
s = pd.Series(np.arange(5), dtype='int64')
def check_replace(to_rep, val, expected):
sc = s.copy()
r = s.replace(to_rep, val)
sc.replace(to_rep, val, inplace=True)
tm.assert_series_equal(expected, r)
tm.assert_series_equal(expected, sc)
# MUST upcast to float
e = pd.Series([0., 1., 2., 3., 4.])
tr, v = [3], [3.0]
check_replace(tr, v, e)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, 'a'])
tr, v = [3, 4], [3.5, 'a']
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp('20130101')])
tr, v = [3, 4], [3.5, pd.Timestamp('20130101')]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype='object')
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.date_range('1/1/2001', '1/10/2001',
freq='D').to_series().reset_index(drop=True)
result = dr.astype(object).replace(
[dr[0], dr[1], dr[2]], [1.0, 2, 'a'])
expected = pd.Series([1.0, 2, 'a'] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace('fun', 'in-the-sun')
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, '2u')
expected = pd.Series(['2u', False, '2u'])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
with pytest.raises(TypeError, match='Cannot compare types .+'):
s.replace({'asdf': 'asdb', True: 'yes'})
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N),
dtype=object)
ser[:5] = np.nan
ser[6:10] = 'foo'
ser[20:30] = 'bar'
# replace list with a single value
rs = ser.replace([np.nan, 'foo', 'bar'], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, 'foo': -2, 'bar': -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, 'foo', 'bar'], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
ser.replace([np.nan, 'foo', 'bar'], -1, inplace=True)
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list('abcd'))
tm.assert_series_equal(s, s.replace(dict()))
tm.assert_series_equal(s, s.replace(pd.Series([])))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace('2', np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(['a', 'b'])
expected = pd.Series(['b', 'a'])
result = s.replace({'a': 'b', 'b': 'a'})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace(u'2', np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, '4', 4, 5])
result = s.replace([2, '4'], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
| bsd-3-clause |
Connor-R/NSBL | ad_hoc/metric_comparison/NSBL_metric_comparison.py | 1 | 2546 | from py_db import db
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import linregress
import argparse
import csv
import NSBL_helpers as helper
import os
# Investigating how well zips projects babip onto the sim
db = db('NSBL')
def initiate():
path = os.getcwd()+'/'
for hb in ('hitters', 'pitchers'):
zips_list = []
observed_list = []
process(zips_list, observed_list, hb)
plot(zips_list, observed_list, path, 'zips', 'observed', 'babip', hb)
def process(zips_list, observed_list, hb):
if hb == 'hitters':
q = """SELECT YEAR, player_name,
n.babip AS sim_babip, z.babip AS zips_babip
FROM processed_compWAR_offensive n
JOIN zips_WAR_hitters_comp z USING (YEAR, player_name)
WHERE YEAR >= 2011
AND YEAR < 2019
AND n.pa > 400;
"""
else:
q = """SELECT YEAR, player_name,
n.babip AS sim_babip, z.babip AS zips_babip
FROM register_pitching_analytical n
JOIN zips_WAR_pitchers_comp z USING (YEAR, player_name)
WHERE YEAR >= 2011
AND YEAR < 2019
AND bip > 300;
"""
qry = q
res = db.query(qry)
for row in res:
year, player_name, sim, zips = row
zips_list.append(float(zips))
observed_list.append(float(sim))
def plot(x_list, y_list, path, x_name='x_title', y_name='y_title', val='babip', hb='hitters'):
size = len(x_list)
ay_min = min(min(x_list), min(y_list))
ay_max = max(max(x_list), max(y_list))
ax_min = min(min(x_list), min(y_list))
ax_max = max(max(x_list), max(y_list))
ylims = [ay_min,ay_max]
xlims = [ax_min,ax_max]
fit = linregress(x_list,y_list)
label = '$slope = ' + str(fit.slope) + '$ \n $r^2 = ' + str(fit.rvalue) + '$'
data = pd.DataFrame(
{x_name:x_list,
y_name:y_list
})
ax = sns.regplot(x=x_name, y=y_name, data=data, ci=None)
title_str = x_name + ' vs ' + y_name + ': Sample Size = '
ax.set_title(title_str + str(size))
figtit = path+"NSBL_comparison_%s_%s_%s_vs_%s.png" % (val, hb, x_name, y_name)
ax.plot(xlims, ylims, linestyle='dashed', alpha=0.9, zorder=0, color='black')
ax.text(ax_min + ((ax_max-ax_min)/20), ay_max - ((ay_max-ay_min)/10), label, style='normal')
ax.set_xlim(xlims)
ax.set_ylim(ylims)
fig = ax.get_figure()
fig.savefig(figtit)
fig.clf()
if __name__ == "__main__":
initiate()
| mit |
pnedunuri/scikit-learn | sklearn/metrics/tests/test_ranking.py | 127 | 40813 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| bsd-3-clause |
kaiserroll14/301finalproject | main/pandas/tseries/offsets.py | 9 | 87012 | from datetime import date, datetime, timedelta
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.tseries.tools import to_datetime
from pandas.tseries.timedeltas import to_timedelta
from pandas.core.common import ABCSeries, ABCDatetimeIndex
# import after tools, dateutil check
from dateutil.relativedelta import relativedelta, weekday
from dateutil.easter import easter
import pandas.tslib as tslib
from pandas.tslib import Timestamp, OutOfBoundsDatetime, Timedelta
import functools
__all__ = ['Day', 'BusinessDay', 'BDay', 'CustomBusinessDay', 'CDay',
'CBMonthEnd','CBMonthBegin',
'MonthBegin', 'BMonthBegin', 'MonthEnd', 'BMonthEnd',
'BusinessHour',
'YearBegin', 'BYearBegin', 'YearEnd', 'BYearEnd',
'QuarterBegin', 'BQuarterBegin', 'QuarterEnd', 'BQuarterEnd',
'LastWeekOfMonth', 'FY5253Quarter', 'FY5253',
'Week', 'WeekOfMonth', 'Easter',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset']
# convert to/from datetime/timestamp to allow invalid Timestamp ranges to pass thru
def as_timestamp(obj):
if isinstance(obj, Timestamp):
return obj
try:
return Timestamp(obj)
except (OutOfBoundsDatetime):
pass
return obj
def as_datetime(obj):
f = getattr(obj,'to_pydatetime',None)
if f is not None:
obj = f()
return obj
def apply_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
if other is tslib.NaT:
return tslib.NaT
elif isinstance(other, (timedelta, Tick, DateOffset)):
# timedelta path
return func(self, other)
elif isinstance(other, (np.datetime64, datetime, date)):
other = as_timestamp(other)
tz = getattr(other, 'tzinfo', None)
nano = getattr(other, 'nanosecond', 0)
try:
if self._adjust_dst and isinstance(other, Timestamp):
other = other.tz_localize(None)
result = func(self, other)
if self._adjust_dst:
result = tslib._localize_pydatetime(result, tz)
result = Timestamp(result)
if self.normalize:
result = result.normalize()
# nanosecond may be deleted depending on offset process
if not self.normalize and nano != 0:
if not isinstance(self, Nano) and result.nanosecond != nano:
if result.tz is not None:
# convert to UTC
value = tslib.tz_convert_single(result.value, 'UTC', result.tz)
else:
value = result.value
result = Timestamp(value + nano)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
except OutOfBoundsDatetime:
result = func(self, as_datetime(other))
if self.normalize:
# normalize_date returns normal datetime
result = normalize_date(result)
if tz is not None and result.tzinfo is None:
result = tslib._localize_pydatetime(result, tz)
return result
return wrapper
def apply_index_wraps(func):
@functools.wraps(func)
def wrapper(self, other):
result = func(self, other)
if self.normalize:
result = result.to_period('D').to_timestamp()
return result
return wrapper
def _is_normalized(dt):
if (dt.hour != 0 or dt.minute != 0 or dt.second != 0
or dt.microsecond != 0 or getattr(dt, 'nanosecond', 0) != 0):
return False
return True
#----------------------------------------------------------------------
# DateOffset
class ApplyTypeError(TypeError):
# sentinel class for catching the apply error to return NotImplemented
pass
class CacheableOffset(object):
_cacheable = True
class DateOffset(object):
"""
Standard kind of date increment used for a date range.
Works exactly like relativedelta in terms of the keyword args you
pass in, use of the keyword n is discouraged-- you would be better
off specifying n in the keywords you use, but regardless it is
there for you. n is needed for DateOffset subclasses.
DateOffets work as follows. Each offset specify a set of dates
that conform to the DateOffset. For example, Bday defines this
set to be the set of dates that are weekdays (M-F). To test if a
date is in the set of a DateOffset dateOffset we can use the
onOffset method: dateOffset.onOffset(date).
If a date is not on a valid date, the rollback and rollforward
methods can be used to roll the date to the nearest valid date
before/after the date.
DateOffsets can be created to move dates forward a given number of
valid dates. For example, Bday(2) can be added to a date to move
it two business days forward. If the date does not start on a
valid date, first it is moved to a valid date. Thus psedo code
is:
def __add__(date):
date = rollback(date) # does nothing if date is valid
return date + <n number of periods>
When a date offset is created for a negitive number of periods,
the date is first rolled forward. The pseudo code is:
def __add__(date):
date = rollforward(date) # does nothing is date is valid
return date + <n number of periods>
Zero presents a problem. Should it roll forward or back? We
arbitrarily have it rollforward:
date + BDay(0) == BDay.rollforward(date)
Since 0 is a bit weird, we suggest avoiding its use.
"""
_cacheable = False
_normalize_cache = True
_kwds_use_relativedelta = (
'years', 'months', 'weeks', 'days',
'year', 'month', 'week', 'day', 'weekday',
'hour', 'minute', 'second', 'microsecond'
)
_use_relativedelta = False
_adjust_dst = False
# default for prior pickles
normalize = False
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self._offset, self._use_relativedelta = self._determine_offset()
def _determine_offset(self):
# timedelta is used for sub-daily plural offsets and all singular offsets
# relativedelta is used for plural offsets of daily length or more
# nanosecond(s) are handled by apply_wraps
kwds_no_nanos = dict(
(k, v) for k, v in self.kwds.items()
if k not in ('nanosecond', 'nanoseconds')
)
use_relativedelta = False
if len(kwds_no_nanos) > 0:
if any(k in self._kwds_use_relativedelta for k in kwds_no_nanos):
use_relativedelta = True
offset = relativedelta(**kwds_no_nanos)
else:
# sub-daily offset - use timedelta (tz-aware)
offset = timedelta(**kwds_no_nanos)
else:
offset = timedelta(1)
return offset, use_relativedelta
@apply_wraps
def apply(self, other):
if self._use_relativedelta:
other = as_datetime(other)
if len(self.kwds) > 0:
tzinfo = getattr(other, 'tzinfo', None)
if tzinfo is not None and self._use_relativedelta:
# perform calculation in UTC
other = other.replace(tzinfo=None)
if self.n > 0:
for i in range(self.n):
other = other + self._offset
else:
for i in range(-self.n):
other = other - self._offset
if tzinfo is not None and self._use_relativedelta:
# bring tz back from UTC calculation
other = tslib._localize_pydatetime(other, tzinfo)
return as_timestamp(other)
else:
return other + timedelta(self.n)
@apply_index_wraps
def apply_index(self, i):
"""
Vectorized apply of DateOffset to DatetimeIndex,
raises NotImplentedError for offsets without a
vectorized implementation
.. versionadded:: 0.17.0
Parameters
----------
i : DatetimeIndex
Returns
-------
y : DatetimeIndex
"""
if not type(self) is DateOffset:
raise NotImplementedError("DateOffset subclass %s "
"does not have a vectorized "
"implementation"
% (self.__class__.__name__,))
relativedelta_fast = set(['years', 'months', 'weeks',
'days', 'hours', 'minutes',
'seconds', 'microseconds'])
# relativedelta/_offset path only valid for base DateOffset
if (self._use_relativedelta and
set(self.kwds).issubset(relativedelta_fast)):
months = ((self.kwds.get('years', 0) * 12
+ self.kwds.get('months', 0)) * self.n)
if months:
shifted = tslib.shift_months(i.asi8, months)
i = i._shallow_copy(shifted)
weeks = (self.kwds.get('weeks', 0)) * self.n
if weeks:
i = (i.to_period('W') + weeks).to_timestamp() + i.to_perioddelta('W')
timedelta_kwds = dict((k,v) for k,v in self.kwds.items()
if k in ['days','hours','minutes',
'seconds','microseconds'])
if timedelta_kwds:
delta = Timedelta(**timedelta_kwds)
i = i + (self.n * delta)
return i
elif not self._use_relativedelta and hasattr(self, '_offset'):
# timedelta
return i + (self._offset * self.n)
else:
# relativedelta with other keywords
raise NotImplementedError("DateOffset with relativedelta "
"keyword(s) %s not able to be "
"applied vectorized" %
(set(self.kwds) - relativedelta_fast),)
def isAnchored(self):
return (self.n == 1)
def copy(self):
return self.__class__(self.n, normalize=self.normalize, **self.kwds)
def _should_cache(self):
return self.isAnchored() and self._cacheable
def _params(self):
all_paras = dict(list(vars(self).items()) + list(self.kwds.items()))
if 'holidays' in all_paras and not all_paras['holidays']:
all_paras.pop('holidays')
exclude = ['kwds', 'name','normalize', 'calendar']
attrs = [(k, v) for k, v in all_paras.items() if (k not in exclude ) and (k[0] != '_')]
attrs = sorted(set(attrs))
params = tuple([str(self.__class__)] + attrs)
return params
def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', type(self).__name__)
exclude = set(['n', 'inc', 'normalize'])
attrs = []
for attr in sorted(self.__dict__):
if ((attr == 'kwds' and len(self.kwds) == 0)
or attr.startswith('_')):
continue
elif attr == 'kwds':
kwds_new = {}
for key in self.kwds:
if not hasattr(self, key):
kwds_new[key] = self.kwds[key]
if len(kwds_new) > 0:
attrs.append('='.join((attr, repr(kwds_new))))
else:
if attr not in exclude:
attrs.append('='.join((attr, repr(getattr(self, attr)))))
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural
if attrs:
out += ': ' + ', '.join(attrs)
out += '>'
return out
@property
def name(self):
if hasattr(self, '_named'):
return self._named
else:
return self.rule_code
def __eq__(self, other):
if other is None:
return False
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if not isinstance(other, DateOffset):
return False
return self._params() == other._params()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._params())
def __call__(self, other):
return self.apply(other)
def __add__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other + self
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, datetime):
raise TypeError('Cannot subtract datetime from offset.')
elif type(other) == type(self):
return self.__class__(self.n - other.n, normalize=self.normalize, **self.kwds)
else: # pragma: no cover
return NotImplemented
def __rsub__(self, other):
if isinstance(other, (ABCDatetimeIndex, ABCSeries)):
return other - self
return self.__class__(-self.n, normalize=self.normalize, **self.kwds) + other
def __mul__(self, someInt):
return self.__class__(n=someInt * self.n, normalize=self.normalize, **self.kwds)
def __rmul__(self, someInt):
return self.__mul__(someInt)
def __neg__(self):
return self.__class__(-self.n, normalize=self.normalize, **self.kwds)
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt - self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
# XXX, see #1395
if type(self) == DateOffset or isinstance(self, Tick):
return True
# Default (slow) method for determining if some date is a member of the
# date range generated by this offset. Subclasses may have this
# re-implemented in a nicer way.
a = dt
b = ((dt + self) - self)
return a == b
# helpers for vectorized offsets
def _beg_apply_index(self, i, freq):
"""Offsets index to beginning of Period frequency"""
off = i.to_perioddelta('D')
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n < 0:
# when subtracting, dates on start roll to prior
roll = np.where(base_period.to_timestamp() == i - off,
self.n, self.n + 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp()
return base + off
def _end_apply_index(self, i, freq):
"""Offsets index to end of Period frequency"""
off = i.to_perioddelta('D')
import pandas.tseries.frequencies as frequencies
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n > 0:
# when adding, dtates on end roll to next
roll = np.where(base_period.to_timestamp(how='end') == i - off,
self.n, self.n - 1)
else:
roll = self.n
base = (base_period + roll).to_timestamp(how='end')
return base + off
# way to get around weirdness with rule_code
@property
def _prefix(self):
raise NotImplementedError('Prefix not defined')
@property
def rule_code(self):
return self._prefix
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
return fstr
class SingleConstructorOffset(DateOffset):
@classmethod
def _from_name(cls, suffix=None):
# default _from_name calls cls with no args
if suffix:
raise ValueError("Bad freq suffix %s" % suffix)
return cls()
class BusinessMixin(object):
""" mixin to business types to provide related functions """
# TODO: Combine this with DateOffset by defining a whitelisted set of
# attributes on each object rather than the existing behavior of iterating
# over internal ``__dict__``
def __repr__(self):
if hasattr(self, '_named'):
return self._named
className = getattr(self, '_outputName', self.__class__.__name__)
if abs(self.n) != 1:
plural = 's'
else:
plural = ''
n_str = ""
if self.n != 1:
n_str = "%s * " % self.n
out = '<%s' % n_str + className + plural + self._repr_attrs() + '>'
return out
def _repr_attrs(self):
if self.offset:
attrs = ['offset=%s' % repr(self.offset)]
else:
attrs = None
out = ''
if attrs:
out += ': ' + ', '.join(attrs)
return out
class BusinessDay(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
"""
_prefix = 'B'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
@property
def freqstr(self):
try:
code = self.rule_code
except NotImplementedError:
return repr(self)
if self.n != 1:
fstr = '%d%s' % (self.n, code)
else:
fstr = code
if self.offset:
fstr += self._offset_str()
return fstr
def _offset_str(self):
def get_str(td):
off_str = ''
if td.days > 0:
off_str += str(td.days) + 'D'
if td.seconds > 0:
s = td.seconds
hrs = int(s / 3600)
if hrs != 0:
off_str += str(hrs) + 'H'
s -= hrs * 3600
mts = int(s / 60)
if mts != 0:
off_str += str(mts) + 'Min'
s -= mts * 60
if s != 0:
off_str += str(s) + 's'
if td.microseconds > 0:
off_str += str(td.microseconds) + 'us'
return off_str
if isinstance(self.offset, timedelta):
zero = timedelta(0, 0, 0)
if self.offset >= zero:
off_str = '+' + get_str(self.offset)
else:
off_str = '-' + get_str(-self.offset)
return off_str
else:
return '+' + repr(self.offset)
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
if isinstance(other, datetime):
n = self.n
if n == 0 and other.weekday() > 4:
n = 1
result = other
# avoid slowness below
if abs(n) > 5:
k = n // 5
result = result + timedelta(7 * k)
if n < 0 and result.weekday() > 4:
n += 1
n -= 5 * k
if n == 0 and result.weekday() > 4:
n -= 1
while n != 0:
k = n // abs(n)
result = result + timedelta(k)
if result.weekday() < 5:
n -= k
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine business day with '
'datetime or timedelta.')
@apply_index_wraps
def apply_index(self, i):
time = i.to_perioddelta('D')
# to_period rolls forward to next BDay; track and
# reduce n where it does when rolling forward
shifted = (i.to_perioddelta('B') - time).asi8 != 0
if self.n > 0:
roll = np.where(shifted, self.n - 1, self.n)
else:
roll = self.n
return (i.to_period('B') + roll).to_timestamp() + time
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() < 5
class BusinessHour(BusinessMixin, SingleConstructorOffset):
"""
DateOffset subclass representing possibly n business days
.. versionadded: 0.16.1
"""
_prefix = 'BH'
_anchor = 0
def __init__(self, n=1, normalize=False, **kwds):
self.n = int(n)
self.normalize = normalize
# must be validated here to equality check
kwds['start'] = self._validate_time(kwds.get('start', '09:00'))
kwds['end'] = self._validate_time(kwds.get('end', '17:00'))
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.start = kwds.get('start', '09:00')
self.end = kwds.get('end', '17:00')
# used for moving to next businessday
if self.n >= 0:
self.next_bday = BusinessDay(n=1)
else:
self.next_bday = BusinessDay(n=-1)
def _validate_time(self, t_input):
from datetime import time as dt_time
import time
if isinstance(t_input, compat.string_types):
try:
t = time.strptime(t_input, '%H:%M')
return dt_time(hour=t.tm_hour, minute=t.tm_min)
except ValueError:
raise ValueError("time data must match '%H:%M' format")
elif isinstance(t_input, dt_time):
if t_input.second != 0 or t_input.microsecond != 0:
raise ValueError("time data must be specified only with hour and minute")
return t_input
else:
raise ValueError("time data must be string or datetime.time")
def _get_daytime_flag(self):
if self.start == self.end:
raise ValueError('start and end must not be the same')
elif self.start < self.end:
return True
else:
return False
def _repr_attrs(self):
out = super(BusinessHour, self)._repr_attrs()
attrs = ['BH=%s-%s' % (self.start.strftime('%H:%M'),
self.end.strftime('%H:%M'))]
out += ': ' + ', '.join(attrs)
return out
def _next_opening_time(self, other):
"""
If n is positive, return tomorrow's business day opening time.
Otherwise yesterday's business day's opening time.
Opening time always locates on BusinessDay.
Otherwise, closing time may not if business hour extends over midnight.
"""
if not self.next_bday.onOffset(other):
other = other + self.next_bday
else:
if self.n >= 0 and self.start < other.time():
other = other + self.next_bday
elif self.n < 0 and other.time() < self.start:
other = other + self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _prev_opening_time(self, other):
"""
If n is positive, return yesterday's business day opening time.
Otherwise yesterday business day's opening time.
"""
if not self.next_bday.onOffset(other):
other = other - self.next_bday
else:
if self.n >= 0 and other.time() < self.start:
other = other - self.next_bday
elif self.n < 0 and other.time() > self.start:
other = other - self.next_bday
return datetime(other.year, other.month, other.day,
self.start.hour, self.start.minute)
def _get_business_hours_by_sec(self):
"""
Return business hours in a day by seconds.
"""
if self._get_daytime_flag():
# create dummy datetime to calcurate businesshours in a day
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 1, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
else:
self.daytime = False
dtstart = datetime(2014, 4, 1, self.start.hour, self.start.minute)
until = datetime(2014, 4, 2, self.end.hour, self.end.minute)
return tslib.tot_seconds(until - dtstart)
@apply_wraps
def rollback(self, dt):
"""Roll provided date backward to next offset only if not on offset"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec()
if self.n >= 0:
dt = self._prev_opening_time(dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(dt) + timedelta(seconds=businesshours)
return dt
@apply_wraps
def rollforward(self, dt):
"""Roll provided date forward to next offset only if not on offset"""
if not self.onOffset(dt):
if self.n >= 0:
return self._next_opening_time(dt)
else:
return self._prev_opening_time(dt)
return dt
@apply_wraps
def apply(self, other):
# calcurate here because offset is not immutable
daytime = self._get_daytime_flag()
businesshours = self._get_business_hours_by_sec()
bhdelta = timedelta(seconds=businesshours)
if isinstance(other, datetime):
# used for detecting edge condition
nanosecond = getattr(other, 'nanosecond', 0)
# reset timezone and nanosecond
# other may be a Timestamp, thus not use replace
other = datetime(other.year, other.month, other.day,
other.hour, other.minute,
other.second, other.microsecond)
n = self.n
if n >= 0:
if (other.time() == self.end or
not self._onOffset(other, businesshours)):
other = self._next_opening_time(other)
else:
if other.time() == self.start:
# adjustment to move to previous business day
other = other - timedelta(seconds=1)
if not self._onOffset(other, businesshours):
other = self._next_opening_time(other)
other = other + bhdelta
bd, r = divmod(abs(n * 60), businesshours // 60)
if n < 0:
bd, r = -bd, -r
if bd != 0:
skip_bd = BusinessDay(n=bd)
# midnight busienss hour may not on BusinessDay
if not self.next_bday.onOffset(other):
remain = other - self._prev_opening_time(other)
other = self._next_opening_time(other + skip_bd) + remain
else:
other = other + skip_bd
hours, minutes = divmod(r, 60)
result = other + timedelta(hours=hours, minutes=minutes)
# because of previous adjustment, time will be larger than start
if ((daytime and (result.time() < self.start or self.end < result.time())) or
not daytime and (self.end < result.time() < self.start)):
if n >= 0:
bday_edge = self._prev_opening_time(other)
bday_edge = bday_edge + bhdelta
# calcurate remainder
bday_remain = result - bday_edge
result = self._next_opening_time(other)
result += bday_remain
else:
bday_edge = self._next_opening_time(other)
bday_remain = result - bday_edge
result = self._next_opening_time(result) + bhdelta
result += bday_remain
# edge handling
if n >= 0:
if result.time() == self.end:
result = self._next_opening_time(result)
else:
if result.time() == self.start and nanosecond == 0:
# adjustment to move to previous business day
result = self._next_opening_time(result- timedelta(seconds=1)) +bhdelta
return result
else:
raise ApplyTypeError('Only know how to combine business hour with ')
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if dt.tzinfo is not None:
dt = datetime(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, dt.microsecond)
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
businesshours = self._get_business_hours_by_sec()
return self._onOffset(dt, businesshours)
def _onOffset(self, dt, businesshours):
"""
Slight speedups using calcurated values
"""
# if self.normalize and not _is_normalized(dt):
# return False
# Valid BH can be on the different BusinessDay during midnight
# Distinguish by the time spent from previous opening time
if self.n >= 0:
op = self._prev_opening_time(dt)
else:
op = self._next_opening_time(dt)
span = tslib.tot_seconds(dt - op)
if span <= businesshours:
return True
else:
return False
class CustomBusinessDay(BusinessDay):
"""
**EXPERIMENTAL** DateOffset subclass representing possibly n business days
excluding holidays
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'C'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
calendar, holidays = self.get_calendar(weekmask=weekmask,
holidays=holidays,
calendar=calendar)
# CustomBusinessDay instances are identified by the
# following two attributes. See DateOffset._params()
# holidays, weekmask
self.kwds['weekmask'] = self.weekmask = weekmask
self.kwds['holidays'] = self.holidays = holidays
self.kwds['calendar'] = self.calendar = calendar
def get_calendar(self, weekmask, holidays, calendar):
'''Generate busdaycalendar'''
if isinstance(calendar, np.busdaycalendar):
if not holidays:
holidays = tuple(calendar.holidays)
elif not isinstance(holidays, tuple):
holidays = tuple(holidays)
else:
# trust that calendar.holidays and holidays are
# consistent
pass
return calendar, holidays
if holidays is None:
holidays = []
try:
holidays = holidays + calendar.holidays().tolist()
except AttributeError:
pass
holidays = [self._to_dt64(dt, dtype='datetime64[D]') for dt in
holidays]
holidays = tuple(sorted(holidays))
kwargs = {'weekmask': weekmask}
if holidays:
kwargs['holidays'] = holidays
try:
busdaycalendar = np.busdaycalendar(**kwargs)
except:
# Check we have the required numpy version
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < '1.7.0':
raise NotImplementedError("CustomBusinessDay requires numpy >= "
"1.7.0. Current version: " +
np.__version__)
else:
raise
return busdaycalendar, holidays
def __getstate__(self):
"""Return a pickleable state"""
state = self.__dict__.copy()
del state['calendar']
# we don't want to actually pickle the calendar object
# as its a np.busyday; we recreate on deserilization
try:
state['kwds'].pop('calendar')
except:
pass
return state
def __setstate__(self, state):
"""Reconstruct an instance from a pickled state"""
self.__dict__ = state
calendar, holidays = self.get_calendar(weekmask=self.weekmask,
holidays=self.holidays,
calendar=None)
self.kwds['calendar'] = self.calendar = calendar
self.kwds['holidays'] = self.holidays = holidays
self.kwds['weekmask'] = state['weekmask']
@apply_wraps
def apply(self, other):
if self.n <= 0:
roll = 'forward'
else:
roll = 'backward'
if isinstance(other, datetime):
date_in = other
np_dt = np.datetime64(date_in.date())
np_incr_dt = np.busday_offset(np_dt, self.n, roll=roll,
busdaycal=self.calendar)
dt_date = np_incr_dt.astype(datetime)
result = datetime.combine(dt_date, date_in.time())
if self.offset:
result = result + self.offset
return result
elif isinstance(other, (timedelta, Tick)):
return BDay(self.n, offset=self.offset + other,
normalize=self.normalize)
else:
raise ApplyTypeError('Only know how to combine trading day with '
'datetime, datetime64 or timedelta.')
def apply_index(self, i):
raise NotImplementedError
@staticmethod
def _to_dt64(dt, dtype='datetime64'):
# Currently
# > np.datetime64(dt.datetime(2013,5,1),dtype='datetime64[D]')
# numpy.datetime64('2013-05-01T02:00:00.000000+0200')
# Thus astype is needed to cast datetime to datetime64[D]
if getattr(dt, 'tzinfo', None) is not None:
i8 = tslib.pydt_to_i8(dt)
dt = tslib.tz_convert_single(i8, 'UTC', dt.tzinfo)
dt = Timestamp(dt)
dt = np.datetime64(dt)
if dt.dtype.name != dtype:
dt = dt.astype(dtype)
return dt
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
day64 = self._to_dt64(dt,'datetime64[D]')
return np.is_busday(day64, busdaycal=self.calendar)
class MonthOffset(SingleConstructorOffset):
_adjust_dst = True
@property
def name(self):
if self.isAnchored:
return self.rule_code
else:
return "%s-%s" % (self.rule_code, _int_to_month[self.n])
class MonthEnd(MonthOffset):
"""DateOffset of one month end"""
@apply_wraps
def apply(self, other):
n = self.n
_, days_in_month = tslib.monthrange(other.year, other.month)
if other.day != days_in_month:
other = other + relativedelta(months=-1, day=31)
if n <= 0:
n = n + 1
other = other + relativedelta(months=n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
months = self.n - 1 if self.n >= 0 else self.n
shifted = tslib.shift_months(i.asi8, months, 'end')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
days_in_month = tslib.monthrange(dt.year, dt.month)[1]
return dt.day == days_in_month
_prefix = 'M'
class MonthBegin(MonthOffset):
"""DateOffset of one month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
if other.day > 1 and n <= 0: # then roll forward if n<=0
n += 1
return other + relativedelta(months=n, day=1)
@apply_index_wraps
def apply_index(self, i):
months = self.n + 1 if self.n < 0 else self.n
shifted = tslib.shift_months(i.asi8, months, 'start')
return i._shallow_copy(shifted)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.day == 1
_prefix = 'MS'
class BusinessMonthEnd(MonthOffset):
"""DateOffset increments between business EOM dates"""
def isAnchored(self):
return (self.n == 1)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
if n > 0 and not other.day >= lastBDay:
n = n - 1
elif n <= 0 and other.day > lastBDay:
n = n + 1
other = other + relativedelta(months=n, day=31)
if other.weekday() > 4:
other = other - BDay()
return other
_prefix = 'BM'
class BusinessMonthBegin(MonthOffset):
"""DateOffset of one business month at beginning"""
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
if other.day > first and n <= 0:
# as if rolled forward already
n += 1
elif other.day < first and n > 0:
other = other + timedelta(days=first - other.day)
n -= 1
other = other + relativedelta(months=n)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first, other.hour, other.minute,
other.second, other.microsecond)
return result
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
first_weekday, _ = tslib.monthrange(dt.year, dt.month)
if first_weekday == 5:
return dt.day == 3
elif first_weekday == 6:
return dt.day == 2
else:
return dt.day == 1
_prefix = 'BMS'
class CustomBusinessMonthEnd(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBM'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthEnd(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
n = self.n
# First move to month offset
cur_mend = self.m_offset.rollforward(other)
# Find this custom month offset
cur_cmend = self.cbday.rollback(cur_mend)
# handle zero case. arbitrarily rollforward
if n == 0 and other != cur_cmend:
n += 1
if other < cur_cmend and n >= 1:
n -= 1
elif other > cur_cmend and n <= -1:
n += 1
new = cur_mend + n * self.m_offset
result = self.cbday.rollback(new)
return result
class CustomBusinessMonthBegin(BusinessMixin, MonthOffset):
"""
**EXPERIMENTAL** DateOffset of one custom business month
.. warning:: EXPERIMENTAL
This class is not officially supported and the API is likely to change
in future versions. Use this at your own risk.
Parameters
----------
n : int, default 1
offset : timedelta, default timedelta(0)
normalize : bool, default False
Normalize start/end dates to midnight before generating date range
weekmask : str, Default 'Mon Tue Wed Thu Fri'
weekmask of valid business days, passed to ``numpy.busdaycalendar``
holidays : list
list/array of dates to exclude from the set of valid business days,
passed to ``numpy.busdaycalendar``
calendar : pd.HolidayCalendar or np.busdaycalendar
"""
_cacheable = False
_prefix = 'CBMS'
def __init__(self, n=1, normalize=False, weekmask='Mon Tue Wed Thu Fri',
holidays=None, calendar=None, **kwds):
self.n = int(n)
self.normalize = normalize
self.kwds = kwds
self.offset = kwds.get('offset', timedelta(0))
self.cbday = CustomBusinessDay(n=self.n, normalize=normalize,
weekmask=weekmask, holidays=holidays,
calendar=calendar, **kwds)
self.m_offset = MonthBegin(n=1, normalize=normalize, **kwds)
self.kwds['calendar'] = self.cbday.calendar # cache numpy calendar
@apply_wraps
def apply(self,other):
n = self.n
dt_in = other
# First move to month offset
cur_mbegin = self.m_offset.rollback(dt_in)
# Find this custom month offset
cur_cmbegin = self.cbday.rollforward(cur_mbegin)
# handle zero case. arbitrarily rollforward
if n == 0 and dt_in != cur_cmbegin:
n += 1
if dt_in > cur_cmbegin and n <= -1:
n += 1
elif dt_in < cur_cmbegin and n >= 1:
n -= 1
new = cur_mbegin + n * self.m_offset
result = self.cbday.rollforward(new)
return result
class Week(DateOffset):
"""
Weekly offset
Parameters
----------
weekday : int, default None
Always generate specific day of week. 0 for Monday
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds.get('weekday', None)
if self.weekday is not None:
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self._inc = timedelta(weeks=1)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.weekday is not None)
@apply_wraps
def apply(self, other):
base = other
if self.weekday is None:
return other + self.n * self._inc
if self.n > 0:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
k = k - 1
other = other
for i in range(k):
other = other + self._inc
else:
k = self.n
otherDay = other.weekday()
if otherDay != self.weekday:
other = other + timedelta((self.weekday - otherDay) % 7)
for i in range(-k):
other = other - self._inc
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
@apply_index_wraps
def apply_index(self, i):
if self.weekday is None:
return (i.to_period('W') + self.n).to_timestamp() + i.to_perioddelta('W')
else:
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.weekday() == self.weekday
_prefix = 'W'
@property
def rule_code(self):
suffix = ''
if self.weekday is not None:
suffix = '-%s' % (_int_to_weekday[self.weekday])
return self._prefix + suffix
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
weekday = None
else:
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class WeekDay(object):
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
_int_to_weekday = {
WeekDay.MON: 'MON',
WeekDay.TUE: 'TUE',
WeekDay.WED: 'WED',
WeekDay.THU: 'THU',
WeekDay.FRI: 'FRI',
WeekDay.SAT: 'SAT',
WeekDay.SUN: 'SUN'
}
_weekday_to_int = dict((v, k) for k, v in _int_to_weekday.items())
class WeekOfMonth(DateOffset):
"""
Describes monthly dates like "the Tuesday of the 2nd week of each month"
Parameters
----------
n : int
week : {0, 1, 2, 3, ...}
0 is 1st week of month, 1 2nd week, etc.
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
self.week = kwds['week']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
if self.week < 0 or self.week > 3:
raise ValueError('Week must be 0<=day<=3, got %d' %
self.week)
self.kwds = kwds
@apply_wraps
def apply(self, other):
base = other
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
other = self.getOffsetOfMonth(other + relativedelta(months=months, day=1))
other = datetime(other.year, other.month, other.day, base.hour,
base.minute, base.second, base.microsecond)
return other
def getOffsetOfMonth(self, dt):
w = Week(weekday=self.weekday)
d = datetime(dt.year, dt.month, 1, tzinfo=dt.tzinfo)
d = w.rollforward(d)
for i in range(self.week):
d = w.apply(d)
return d
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
d = datetime(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)
return d == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%d%s' % (self._prefix, self.week + 1,
_int_to_weekday.get(self.weekday, ''))
_prefix = 'WOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
# only one digit weeks (1 --> week 0, 2 --> week 1, etc.)
week = int(suffix[0]) - 1
weekday = _weekday_to_int[suffix[1:]]
return cls(week=week, weekday=weekday)
class LastWeekOfMonth(DateOffset):
"""
Describes monthly dates in last week of month like "the last Tuesday of each month"
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
"""
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.weekday = kwds['weekday']
if self.n == 0:
raise ValueError('N cannot be 0')
if self.weekday < 0 or self.weekday > 6:
raise ValueError('Day must be 0<=day<=6, got %d' %
self.weekday)
self.kwds = kwds
@apply_wraps
def apply(self, other):
offsetOfMonth = self.getOffsetOfMonth(other)
if offsetOfMonth > other:
if self.n > 0:
months = self.n - 1
else:
months = self.n
elif offsetOfMonth == other:
months = self.n
else:
if self.n > 0:
months = self.n
else:
months = self.n + 1
return self.getOffsetOfMonth(other + relativedelta(months=months, day=1))
def getOffsetOfMonth(self, dt):
m = MonthEnd()
d = datetime(dt.year, dt.month, 1, dt.hour, dt.minute,
dt.second, dt.microsecond, tzinfo=dt.tzinfo)
eom = m.rollforward(d)
w = Week(weekday=self.weekday)
return w.rollback(eom)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt == self.getOffsetOfMonth(dt)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_weekday.get(self.weekday, ''))
_prefix = 'LWOM'
@classmethod
def _from_name(cls, suffix=None):
if not suffix:
raise ValueError("Prefix %r requires a suffix." % (cls._prefix))
# TODO: handle n here...
weekday = _weekday_to_int[suffix]
return cls(weekday=weekday)
class QuarterOffset(DateOffset):
"""Quarter representation - doesn't call super"""
#: default month for __init__
_default_startingMonth = None
#: default month in _from_name
_from_name_startingMonth = None
_adjust_dst = True
# TODO: Consider combining QuarterOffset and YearOffset __init__ at some
# point
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth',
self._default_startingMonth)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['startingMonth'] = _month_to_int[suffix]
else:
if cls._from_name_startingMonth is not None:
kwargs['startingMonth'] = cls._from_name_startingMonth
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.startingMonth])
class BQuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
"""
_outputName = 'BusinessQuarterEnd'
_default_startingMonth = 3
# 'BQ'
_from_name_startingMonth = 12
_prefix = 'BQ'
@apply_wraps
def apply(self, other):
n = self.n
base = other
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
lastBDay = days_in_month - max(((wkday + days_in_month - 1)
% 7) - 4, 0)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= lastBDay and monthsToGo == 0):
n = n - 1
elif n <= 0 and other.day > lastBDay and monthsToGo == 0:
n = n + 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
other = tslib._localize_pydatetime(other, base.tzinfo)
if other.weekday() > 4:
other = other - BDay()
return other
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return BMonthEnd().onOffset(dt) and modMonth == 0
_int_to_month = tslib._MONTH_ALIASES
_month_to_int = dict((v, k) for k, v in _int_to_month.items())
# TODO: This is basically the same as BQuarterEnd
class BQuarterBegin(QuarterOffset):
_outputName = "BusinessQuarterBegin"
# I suspect this is wrong for *all* of them.
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'BQS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0: # make sure to roll forward so negate
monthsSince = monthsSince - 3
# roll forward if on same month later than first bday
if n <= 0 and (monthsSince == 0 and other.day > first):
n = n + 1
# pretend to roll back if on same month but before firstbday
elif n > 0 and (monthsSince == 0 and other.day < first):
n = n - 1
# get the first bday for result
other = other + relativedelta(months=3 * n - monthsSince)
wkday, _ = tslib.monthrange(other.year, other.month)
first = _get_firstbday(wkday)
result = datetime(other.year, other.month, first,
other.hour, other.minute, other.second,
other.microsecond)
return result
class QuarterEnd(QuarterOffset):
"""DateOffset increments between business Quarter dates
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/31/2007, 6/30/2007, ...
"""
_outputName = 'QuarterEnd'
_default_startingMonth = 3
_prefix = 'Q'
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds.get('startingMonth', 3)
self.kwds = kwds
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
other = datetime(other.year, other.month, other.day,
other.hour, other.minute, other.second,
other.microsecond)
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsToGo = 3 - ((other.month - self.startingMonth) % 3)
if monthsToGo == 3:
monthsToGo = 0
if n > 0 and not (other.day >= days_in_month and monthsToGo == 0):
n = n - 1
other = other + relativedelta(months=monthsToGo + 3 * n, day=31)
return other
@apply_index_wraps
def apply_index(self, i):
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
modMonth = (dt.month - self.startingMonth) % 3
return MonthEnd().onOffset(dt) and modMonth == 0
class QuarterBegin(QuarterOffset):
_outputName = 'QuarterBegin'
_default_startingMonth = 3
_from_name_startingMonth = 1
_prefix = 'QS'
def isAnchored(self):
return (self.n == 1 and self.startingMonth is not None)
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, other.month)
monthsSince = (other.month - self.startingMonth) % 3
if n <= 0 and monthsSince != 0:
# make sure you roll forward, so negate
monthsSince = monthsSince - 3
if n < 0 and (monthsSince == 0 and other.day > 1):
# after start, so come back an extra period as if rolled forward
n = n + 1
other = other + relativedelta(months=3 * n - monthsSince, day=1)
return other
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
freqstr = 'Q-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
class YearOffset(DateOffset):
"""DateOffset that just needs a month"""
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.month = kwds.get('month', self._default_month)
if self.month < 1 or self.month > 12:
raise ValueError('Month must go from 1 to 12')
DateOffset.__init__(self, n=n, normalize=normalize, **kwds)
@classmethod
def _from_name(cls, suffix=None):
kwargs = {}
if suffix:
kwargs['month'] = _month_to_int[suffix]
return cls(**kwargs)
@property
def rule_code(self):
return '%s-%s' % (self._prefix, _int_to_month[self.month])
class BYearEnd(YearOffset):
"""DateOffset increments between business EOM dates"""
_outputName = 'BusinessYearEnd'
_default_month = 12
_prefix = 'BA'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
lastBDay = (days_in_month -
max(((wkday + days_in_month - 1) % 7) - 4, 0))
years = n
if n > 0:
if (other.month < self.month or
(other.month == self.month and other.day < lastBDay)):
years -= 1
elif n <= 0:
if (other.month > self.month or
(other.month == self.month and other.day > lastBDay)):
years += 1
other = other + relativedelta(years=years)
_, days_in_month = tslib.monthrange(other.year, self.month)
result = datetime(other.year, self.month, days_in_month,
other.hour, other.minute, other.second,
other.microsecond)
if result.weekday() > 4:
result = result - BDay()
return result
class BYearBegin(YearOffset):
"""DateOffset increments between business year begin dates"""
_outputName = 'BusinessYearBegin'
_default_month = 1
_prefix = 'BAS'
@apply_wraps
def apply(self, other):
n = self.n
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
years = n
if n > 0: # roll back first for positive n
if (other.month < self.month or
(other.month == self.month and other.day < first)):
years -= 1
elif n <= 0: # roll forward
if (other.month > self.month or
(other.month == self.month and other.day > first)):
years += 1
# set first bday for result
other = other + relativedelta(years=years)
wkday, days_in_month = tslib.monthrange(other.year, self.month)
first = _get_firstbday(wkday)
return datetime(other.year, self.month, first, other.hour,
other.minute, other.second, other.microsecond)
class YearEnd(YearOffset):
"""DateOffset increments between calendar year ends"""
_default_month = 12
_prefix = 'A'
@apply_wraps
def apply(self, other):
def _increment(date):
if date.month == self.month:
_, days_in_month = tslib.monthrange(date.year, self.month)
if date.day != days_in_month:
year = date.year
else:
year = date.year + 1
elif date.month < self.month:
year = date.year
else:
year = date.year + 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _decrement(date):
year = date.year if date.month > self.month else date.year - 1
_, days_in_month = tslib.monthrange(year, self.month)
return datetime(year, self.month, days_in_month,
date.hour, date.minute, date.second,
date.microsecond)
def _rollf(date):
if date.month != self.month or\
date.day < tslib.monthrange(date.year, date.month)[1]:
date = _increment(date)
return date
n = self.n
result = other
if n > 0:
while n > 0:
result = _increment(result)
n -= 1
elif n < 0:
while n < 0:
result = _decrement(result)
n += 1
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
# convert month anchor to annual period tuple
return self._end_apply_index(i, self.freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
wkday, days_in_month = tslib.monthrange(dt.year, self.month)
return self.month == dt.month and dt.day == days_in_month
class YearBegin(YearOffset):
"""DateOffset increments between calendar year begin dates"""
_default_month = 1
_prefix = 'AS'
@apply_wraps
def apply(self, other):
def _increment(date, n):
year = date.year + n - 1
if date.month >= self.month:
year += 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _decrement(date, n):
year = date.year + n + 1
if date.month < self.month or (date.month == self.month and
date.day == 1):
year -= 1
return datetime(year, self.month, 1, date.hour, date.minute,
date.second, date.microsecond)
def _rollf(date):
if (date.month != self.month) or date.day > 1:
date = _increment(date, 1)
return date
n = self.n
result = other
if n > 0:
result = _increment(result, n)
elif n < 0:
result = _decrement(result, n)
else:
# n == 0, roll forward
result = _rollf(result)
return result
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.month == 1 else self.month - 1
freqstr = 'A-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return dt.month == self.month and dt.day == 1
class FY5253(DateOffset):
"""
Describes 52-53 week fiscal year. This is also known as a 4-4-5 calendar.
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'RE'
_suffix_prefix_last = 'L'
_suffix_prefix_nearest = 'N'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.startingMonth = kwds['startingMonth']
self.weekday = kwds["weekday"]
self.variation = kwds["variation"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
if self.variation not in ["nearest", "last"]:
raise ValueError('%s is not a valid variation' % self.variation)
if self.variation == "nearest":
weekday_offset = weekday(self.weekday)
self._rd_forward = relativedelta(weekday=weekday_offset)
self._rd_backward = relativedelta(weekday=weekday_offset(-1))
else:
self._offset_lwom = LastWeekOfMonth(n=1, weekday=self.weekday)
def isAnchored(self):
return self.n == 1 \
and self.startingMonth is not None \
and self.weekday is not None
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
dt = datetime(dt.year, dt.month, dt.day)
year_end = self.get_year_end(dt)
if self.variation == "nearest":
# We have to check the year end of "this" cal year AND the previous
return year_end == dt or \
self.get_year_end(dt - relativedelta(months=1)) == dt
else:
return year_end == dt
@apply_wraps
def apply(self, other):
n = self.n
prev_year = self.get_year_end(
datetime(other.year - 1, self.startingMonth, 1))
cur_year = self.get_year_end(
datetime(other.year, self.startingMonth, 1))
next_year = self.get_year_end(
datetime(other.year + 1, self.startingMonth, 1))
prev_year = tslib._localize_pydatetime(prev_year, other.tzinfo)
cur_year = tslib._localize_pydatetime(cur_year, other.tzinfo)
next_year = tslib._localize_pydatetime(next_year, other.tzinfo)
if n > 0:
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other < prev_year:
year = other.year - 1
n -= 1
elif other < cur_year:
year = other.year
n -= 1
elif other < next_year:
year = other.year + 1
n -= 1
else:
assert False
result = self.get_year_end(datetime(year + n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second, other.microsecond)
return result
else:
n = -n
if other == prev_year:
year = other.year - 1
elif other == cur_year:
year = other.year
elif other == next_year:
year = other.year + 1
elif other > next_year:
year = other.year + 1
n -= 1
elif other > cur_year:
year = other.year
n -= 1
elif other > prev_year:
year = other.year - 1
n -= 1
else:
assert False
result = self.get_year_end(datetime(year - n, self.startingMonth, 1))
result = datetime(result.year, result.month, result.day,
other.hour, other.minute, other.second, other.microsecond)
return result
def get_year_end(self, dt):
if self.variation == "nearest":
return self._get_year_end_nearest(dt)
else:
return self._get_year_end_last(dt)
def get_target_month_end(self, dt):
target_month = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
next_month_first_of = target_month + relativedelta(months=+1)
return next_month_first_of + relativedelta(days=-1)
def _get_year_end_nearest(self, dt):
target_date = self.get_target_month_end(dt)
if target_date.weekday() == self.weekday:
return target_date
else:
forward = target_date + self._rd_forward
backward = target_date + self._rd_backward
if forward - target_date < target_date - backward:
return forward
else:
return backward
def _get_year_end_last(self, dt):
current_year = datetime(dt.year, self.startingMonth, 1, tzinfo=dt.tzinfo)
return current_year + self._offset_lwom
@property
def rule_code(self):
suffix = self.get_rule_code_suffix()
return "%s-%s" % (self._get_prefix(), suffix)
def _get_prefix(self):
return self._prefix
def _get_suffix_prefix(self):
if self.variation == "nearest":
return self._suffix_prefix_nearest
else:
return self._suffix_prefix_last
def get_rule_code_suffix(self):
return '%s-%s-%s' % (self._get_suffix_prefix(), \
_int_to_month[self.startingMonth], \
_int_to_weekday[self.weekday])
@classmethod
def _parse_suffix(cls, varion_code, startingMonth_code, weekday_code):
if varion_code == "N":
variation = "nearest"
elif varion_code == "L":
variation = "last"
else:
raise ValueError(
"Unable to parse varion_code: %s" % (varion_code,))
startingMonth = _month_to_int[startingMonth_code]
weekday = _weekday_to_int[weekday_code]
return {
"weekday": weekday,
"startingMonth": startingMonth,
"variation": variation,
}
@classmethod
def _from_name(cls, *args):
return cls(**cls._parse_suffix(*args))
class FY5253Quarter(DateOffset):
"""
DateOffset increments between business quarter dates
for 52-53 week fiscal year (also known as a 4-4-5 calendar).
It is used by companies that desire that their
fiscal year always end on the same day of the week.
It is a method of managing accounting periods.
It is a common calendar structure for some industries,
such as retail, manufacturing and parking industry.
For more information see:
http://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar
The year may either:
- end on the last X day of the Y month.
- end on the last X day closest to the last day of the Y month.
X is a specific day of the week.
Y is a certain month of the year
startingMonth = 1 corresponds to dates like 1/31/2007, 4/30/2007, ...
startingMonth = 2 corresponds to dates like 2/28/2007, 5/31/2007, ...
startingMonth = 3 corresponds to dates like 3/30/2007, 6/29/2007, ...
Parameters
----------
n : int
weekday : {0, 1, ..., 6}
0: Mondays
1: Tuesdays
2: Wednesdays
3: Thursdays
4: Fridays
5: Saturdays
6: Sundays
startingMonth : The month in which fiscal years end. {1, 2, ... 12}
qtr_with_extra_week : The quarter number that has the leap
or 14 week when needed. {1,2,3,4}
variation : str
{"nearest", "last"} for "LastOfMonth" or "NearestEndMonth"
"""
_prefix = 'REQ'
_adjust_dst = True
def __init__(self, n=1, normalize=False, **kwds):
self.n = n
self.normalize = normalize
self.qtr_with_extra_week = kwds["qtr_with_extra_week"]
self.kwds = kwds
if self.n == 0:
raise ValueError('N cannot be 0')
self._offset = FY5253( \
startingMonth=kwds['startingMonth'], \
weekday=kwds["weekday"],
variation=kwds["variation"])
def isAnchored(self):
return self.n == 1 and self._offset.isAnchored()
@apply_wraps
def apply(self, other):
base = other
n = self.n
if n > 0:
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
start = other - self._offset
else:
start = other
qtr_lens = self.get_weeks(other + self._offset)
for weeks in qtr_lens:
start += relativedelta(weeks=weeks)
if start > other:
other = start
n -= 1
break
else:
n = -n
while n > 0:
if not self._offset.onOffset(other):
qtr_lens = self.get_weeks(other)
end = other + self._offset
else:
end = other
qtr_lens = self.get_weeks(other)
for weeks in reversed(qtr_lens):
end -= relativedelta(weeks=weeks)
if end < other:
other = end
n -= 1
break
other = datetime(other.year, other.month, other.day,
base.hour, base.minute, base.second, base.microsecond)
return other
def get_weeks(self, dt):
ret = [13] * 4
year_has_extra_week = self.year_has_extra_week(dt)
if year_has_extra_week:
ret[self.qtr_with_extra_week - 1] = 14
return ret
def year_has_extra_week(self, dt):
if self._offset.onOffset(dt):
prev_year_end = dt - self._offset
next_year_end = dt
else:
next_year_end = dt + self._offset
prev_year_end = dt - self._offset
week_in_year = (next_year_end - prev_year_end).days / 7
return week_in_year == 53
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
if self._offset.onOffset(dt):
return True
next_year_end = dt - self._offset
qtr_lens = self.get_weeks(dt)
current = next_year_end
for qtr_len in qtr_lens[0:4]:
current += relativedelta(weeks=qtr_len)
if dt == current:
return True
return False
@property
def rule_code(self):
suffix = self._offset.get_rule_code_suffix()
return "%s-%s" % (self._prefix,
"%s-%d" % (suffix, self.qtr_with_extra_week))
@classmethod
def _from_name(cls, *args):
return cls(**dict(FY5253._parse_suffix(*args[:-1]),
qtr_with_extra_week=int(args[-1])))
class Easter(DateOffset):
'''
DateOffset for the Easter holiday using
logic defined in dateutil. Right now uses
the revised method which is valid in years
1583-4099.
'''
_adjust_dst = True
def __init__(self, n=1, **kwds):
super(Easter, self).__init__(n, **kwds)
@apply_wraps
def apply(self, other):
currentEaster = easter(other.year)
currentEaster = datetime(currentEaster.year, currentEaster.month, currentEaster.day)
currentEaster = tslib._localize_pydatetime(currentEaster, other.tzinfo)
# NOTE: easter returns a datetime.date so we have to convert to type of other
if self.n >= 0:
if other >= currentEaster:
new = easter(other.year + self.n)
else:
new = easter(other.year + self.n - 1)
else:
if other > currentEaster:
new = easter(other.year + self.n + 1)
else:
new = easter(other.year + self.n)
new = datetime(new.year, new.month, new.day, other.hour,
other.minute, other.second, other.microsecond)
return new
def onOffset(self, dt):
if self.normalize and not _is_normalized(dt):
return False
return date(dt.year, dt.month, dt.day) == easter(dt.year)
#----------------------------------------------------------------------
# Ticks
import operator
def _tick_comp(op):
def f(self, other):
return op(self.delta, other.delta)
return f
class Tick(SingleConstructorOffset):
_inc = Timedelta(microseconds=1000)
__gt__ = _tick_comp(operator.gt)
__ge__ = _tick_comp(operator.ge)
__lt__ = _tick_comp(operator.lt)
__le__ = _tick_comp(operator.le)
__eq__ = _tick_comp(operator.eq)
__ne__ = _tick_comp(operator.ne)
def __add__(self, other):
if isinstance(other, Tick):
if type(self) == type(other):
return type(self)(self.n + other.n)
else:
return _delta_to_tick(self.delta + other.delta)
try:
return self.apply(other)
except ApplyTypeError:
return NotImplemented
def __eq__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta == other.delta
else:
return DateOffset.__eq__(self, other)
# This is identical to DateOffset.__hash__, but has to be redefined here
# for Python 3, because we've redefined __eq__.
def __hash__(self):
return hash(self._params())
def __ne__(self, other):
if isinstance(other, compat.string_types):
from pandas.tseries.frequencies import to_offset
other = to_offset(other)
if isinstance(other, Tick):
return self.delta != other.delta
else:
return DateOffset.__ne__(self, other)
@property
def delta(self):
return self.n * self._inc
@property
def nanos(self):
return _delta_to_nanoseconds(self.delta)
def apply(self, other):
# Timestamp can handle tz and nano sec, thus no need to use apply_wraps
if isinstance(other, (datetime, np.datetime64, date)):
return as_timestamp(other) + self
if isinstance(other, timedelta):
return other + self.delta
elif isinstance(other, type(self)):
return type(self)(self.n + other.n)
else:
raise ApplyTypeError('Unhandled type: %s' % type(other).__name__)
_prefix = 'undefined'
def isAnchored(self):
return False
def _delta_to_tick(delta):
if delta.microseconds == 0:
if delta.seconds == 0:
return Day(delta.days)
else:
seconds = delta.days * 86400 + delta.seconds
if seconds % 3600 == 0:
return Hour(seconds / 3600)
elif seconds % 60 == 0:
return Minute(seconds / 60)
else:
return Second(seconds)
else:
nanos = _delta_to_nanoseconds(delta)
if nanos % 1000000 == 0:
return Milli(nanos // 1000000)
elif nanos % 1000 == 0:
return Micro(nanos // 1000)
else: # pragma: no cover
return Nano(nanos)
_delta_to_nanoseconds = tslib._delta_to_nanoseconds
class Day(Tick):
_inc = Timedelta(days=1)
_prefix = 'D'
class Hour(Tick):
_inc = Timedelta(hours=1)
_prefix = 'H'
class Minute(Tick):
_inc = Timedelta(minutes=1)
_prefix = 'T'
class Second(Tick):
_inc = Timedelta(seconds=1)
_prefix = 'S'
class Milli(Tick):
_inc = Timedelta(milliseconds=1)
_prefix = 'L'
class Micro(Tick):
_inc = Timedelta(microseconds=1)
_prefix = 'U'
class Nano(Tick):
_inc = Timedelta(nanoseconds=1)
_prefix = 'N'
BDay = BusinessDay
BMonthEnd = BusinessMonthEnd
BMonthBegin = BusinessMonthBegin
CBMonthEnd = CustomBusinessMonthEnd
CBMonthBegin = CustomBusinessMonthBegin
CDay = CustomBusinessDay
def _get_firstbday(wkday):
"""
wkday is the result of monthrange(year, month)
If it's a saturday or sunday, increment first business day to reflect this
"""
first = 1
if wkday == 5: # on Saturday
first = 3
elif wkday == 6: # on Sunday
first = 2
return first
def generate_range(start=None, end=None, periods=None,
offset=BDay(), time_rule=None):
"""
Generates a sequence of dates corresponding to the specified time
offset. Similar to dateutil.rrule except uses pandas DateOffset
objects to represent time increments
Parameters
----------
start : datetime (default None)
end : datetime (default None)
periods : int, optional
time_rule : (legacy) name of DateOffset object to be used, optional
Corresponds with names expected by tseries.frequencies.get_offset
Notes
-----
* This method is faster for generating weekdays than dateutil.rrule
* At least two of (start, end, periods) must be specified.
* If both start and end are specified, the returned dates will
satisfy start <= date <= end.
* If both time_rule and offset are specified, time_rule supersedes offset.
Returns
-------
dates : generator object
"""
if time_rule is not None:
from pandas.tseries.frequencies import get_offset
offset = get_offset(time_rule)
start = to_datetime(start)
end = to_datetime(end)
if start and not offset.onOffset(start):
start = offset.rollforward(start)
elif end and not offset.onOffset(end):
end = offset.rollback(end)
if periods is None and end < start:
end = None
periods = 0
if end is None:
end = start + (periods - 1) * offset
if start is None:
start = end - (periods - 1) * offset
cur = start
if offset.n >= 0:
while cur <= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date <= cur:
raise ValueError('Offset %s did not increment date' % offset)
cur = next_date
else:
while cur >= end:
yield cur
# faster than cur + offset
next_date = offset.apply(cur)
if next_date >= cur:
raise ValueError('Offset %s did not decrement date' % offset)
cur = next_date
prefix_mapping = dict((offset._prefix, offset) for offset in [
YearBegin, # 'AS'
YearEnd, # 'A'
BYearBegin, # 'BAS'
BYearEnd, # 'BA'
BusinessDay, # 'B'
BusinessMonthBegin, # 'BMS'
BusinessMonthEnd, # 'BM'
BQuarterEnd, # 'BQ'
BQuarterBegin, # 'BQS'
BusinessHour, # 'BH'
CustomBusinessDay, # 'C'
CustomBusinessMonthEnd, # 'CBM'
CustomBusinessMonthBegin, # 'CBMS'
MonthEnd, # 'M'
MonthBegin, # 'MS'
Week, # 'W'
Second, # 'S'
Minute, # 'T'
Micro, # 'U'
QuarterEnd, # 'Q'
QuarterBegin, # 'QS'
Milli, # 'L'
Hour, # 'H'
Day, # 'D'
WeekOfMonth, # 'WOM'
FY5253,
FY5253Quarter,
])
prefix_mapping['N'] = Nano
def _make_offset(key):
"""Gets offset based on key. KeyError if prefix is bad, ValueError if
suffix is bad. All handled by `get_offset` in tseries/frequencies. Not
public."""
if key is None:
return None
split = key.split('-')
klass = prefix_mapping[split[0]]
# handles case where there's no suffix (and will TypeError if too many '-')
obj = klass._from_name(*split[1:])
obj._named = key
return obj
| gpl-3.0 |
erikgrinaker/BOUT-dev | tools/pylib/boututils/showdata.py | 1 | 19987 | """
Visualisation and animation routines
Written by Luke Easy
[email protected]
Last Updated 19/3/2015
Additional functionality by George Breyiannis 26/12/2014
"""
from __future__ import print_function
from __future__ import division
try:
from builtins import str
from builtins import chr
from builtins import range
except:
pass
#import numpy as np
from mpl_toolkits.mplot3d import axes3d
from matplotlib import pyplot as plt
from matplotlib import animation
from numpy import linspace, meshgrid, array, min, max, floor, pi
from boutdata.collect import collect
####################################################################
# Specify manually ffmpeg path
#plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg'
FFwriter = animation.FFMpegWriter()
####################################################################
###################
#http://stackoverflow.com/questions/16732379/stop-start-pause-in-python-matplotlib-animation
#
j=-2
pause = False
###################
def showdata(vars, titles=[], legendlabels = [], surf = [], polar = [], tslice = 0, movie = 0, intv = 1, Ncolors = 25, x = [], y = [], global_colors = False, symmetric_colors = False,hold_aspect=False):
"""
A Function to animate time dependent data from BOUT++
Requires numpy, mpl_toolkits, matplotlib, boutdata libaries.
To animate multiple variables on different axes:
showdata([var1, var2, var3])
To animate more than one line on a single axes:
showdata([[var1, var2, var3]])
The default graph types are:
2D (time + 1 spatial dimension) arrays = animated line plot
3D (time + 2 spatial dimensions) arrays = animated contour plot.
To use surface or polar plots:
showdata(var, surf = 1)
showdata(var, polar = 1)
Can plot different graph types on different axes. Default graph types will be used depending on the dimensions of the input arrays. To specify polar/surface plots on different axes:
showdata([var1,var2], surf = [1,0], polar = [0,1])
Movies require FFmpeg to be installed.
The tslice variable is used to control the time value that is printed on each
frame of the animation. If the input data matches the time values found within
BOUT++'s dmp data files, then these time values will be used. Otherwise, an
integer counter is used.
During animation click once to stop in the current frame. Click again to continue.
global_colors = True: if "vars" is a list the colorlevels are determined from the mximum of the maxima and and the minimum of the minima in all fields in vars.
symmetric_colors = True: colorlevels are symmetric.
"""
plt.ioff()
# Check to see whether vars is a list or not.
if isinstance(vars, list):
Nvar = len(vars)
else:
vars = [vars]
Nvar = len(vars)
if Nvar < 1:
raise ValueError("No data supplied")
# Check to see whether each variable is a list - used for line plots only
Nlines = []
for i in range(0, Nvar):
if isinstance(vars[i], list):
Nlines.append(len(vars[i]))
else:
Nlines.append(1)
vars[i] = [vars[i]]
# Sort out titles
if len(titles) == 0:
for i in range(0,Nvar):
titles.append(('Var' + str(i+1)))
elif len(titles) != Nvar:
raise ValueError('The length of the titles input list must match the length of the vars list.')
# Sort out legend labels
if len(legendlabels) == 0:
for i in range(0,Nvar):
legendlabels.append([])
for j in range(0,Nlines[i]):
legendlabels[i].append(chr(97+j))
elif (isinstance(legendlabels[0], list) != 1):
if Nvar != 1:
check = 0
for i in range(0,Nvar):
if len(legendlabels) != Nlines[i]:
check = check+1
if check == 0:
print("Warning, the legendlabels list does not contain a sublist for each variable, but it's length matches the number of lines on each plot. Will apply labels to each plot")
legendlabelsdummy = []
for i in range(0, Nvar):
legendlabelsdummy.append([])
for j in range(0,Nlines[i]):
legendlabelsdummy[i].append(legendlabels[j])
legendlabels = legendlabelsdummy
else:
print("Warning, the legendlabels list does not contain a sublist for each variable, and it's length does not match the number of lines on each plot. Will default apply labels to each plot")
legendlabels = []
for i in range(0,Nvar):
legendlabels.append([])
for j in range(0,Nlines[i]):
legendlabels[i].append(chr(97+j))
else:
if (Nlines[0] == len(legendlabels)):
legendlabels = [legendlabels]
elif len(legendlabels) != Nvar:
print("Warning, the length of the legendlabels list does not match the length of the vars list, will continue with default values")
legendlabels = []
for i in range(0,Nvar):
legendlabels.append([])
for j in range(0,Nlines[i]):
legendlabels[i].append(chr(97+j))
else:
for i in range(0,Nvar):
if isinstance(legendlabels[i], list):
if len(legendlabels[i]) != Nlines[i]:
print('Warning, the length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot. Will continue with default values')
legendlabels[i] = []
for j in range(0,Nlines[i]):
legendlabels[i].append(chr(97+j))
else:
legendlabels[i] = [legendlabels[i]]
if len(legendlabels[i]) != Nlines[i]:
print('Warning, the length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot. Will continue with default values')
legendlabels[i] = []
for j in range(0,Nlines[i]):
legendlabels[i].append(chr(97+j))
# Sort out surf list
if isinstance(surf, list):
if (len(surf) == Nvar):
for i in range(0, Nvar):
if surf[i] >= 1:
surf[i] = 1
else:
surf[i] = 0
elif (len(surf) == 1):
if surf[0] >= 1:
surf[0] = 1
else:
surf[0] = 0
if (Nvar > 1):
for i in range(1,Nvar):
surf.append(surf[0])
elif (len(surf) == 0):
for i in range(0,Nvar):
surf.append(0)
else:
print('Warning, length of surf list does not match number of variables. Will default to no polar plots')
for i in range(0,Nvar):
surf.append(0)
else:
surf = [surf]
if surf[0] >= 1:
surf[0] = 1
else:
surf[0] = 0
if (Nvar > 1):
for i in range(1,Nvar):
surf.append(surf[0])
# Sort out polar list
if isinstance(polar, list):
if (len(polar) == Nvar):
for i in range(0, Nvar):
if polar[i] >= 1:
polar[i] = 1
else:
polar[i] = 0
elif (len(polar) == 1):
if polar[0] >= 1:
polar[0] = 1
else:
polar[0] = 0
if (Nvar > 1):
for i in range(1,Nvar):
polar.append(polar[0])
elif (len(polar) == 0):
for i in range(0,Nvar):
polar.append(0)
else:
print('Warning, length of polar list does not match number of variables. Will default to no polar plots')
for i in range(0,Nvar):
polar.append(0)
else:
polar = [polar]
if polar[0] >= 1:
polar[0] = 1
else:
polar[0] = 0
if (Nvar > 1):
for i in range(1,Nvar):
polar.append(polar[0])
# Determine shapes of arrays
dims = []
Ndims = []
lineplot = []
contour = []
for i in range(0,Nvar):
dims.append([])
Ndims.append([])
for j in range(0, Nlines[i]):
dims[i].append(array((vars[i][j].shape)))
Ndims[i].append(dims[i][j].shape[0])
# Perform check to make sure that data is either 2D or 3D
if (Ndims[i][j] < 2):
raise ValueError('data must be either 2 or 3 dimensional. Exiting')
if (Ndims[i][j] > 3):
raise ValueError('data must be either 2 or 3 dimensional. Exiting')
if ((Ndims[i][j] == 2) & (polar[i] != 0)):
print('Warning, data must be 3 dimensional (time, r, theta) for polar plots. Will plot lineplot instead')
if ((Ndims[i][j] == 2) & (surf[i] != 0)):
print('Warning, data must be 3 dimensional (time, x, y) for surface plots. Will plot lineplot instead')
if ((Ndims[i][j] == 3) & (Nlines[i] != 1)):
raise ValueError('cannot have multiple sets of 3D (time + 2 spatial dimensions) on each subplot')
if ((Ndims[i][j] != Ndims[i][0])):
raise ValueError('Error, Number of dimensions must be the same for all variables on each plot.')
if (Ndims[i][0] == 2): # Set polar and surf list entries to 0
polar[i] = 0
surf[i] = 0
lineplot.append(1)
contour.append(0)
else:
if ((polar[i] == 1) & (surf[i] == 1)):
print('Warning - cannot do polar and surface plots at the same time. Default to contour plot')
contour.append(1)
lineplot.append(0)
polar[i] = 0
surf[i] = 0
elif (polar[i] == 1) | (surf[i] == 1):
contour.append(0)
lineplot.append(0)
else:
contour.append(1)
lineplot.append(0)
# Obtain size of data arrays
Nt = []
Nx = []
Ny = []
for i in range(0, Nvar):
Nt.append([])
Nx.append([])
Ny.append([])
for j in range(0, Nlines[i]):
Nt[i].append(vars[i][j].shape[0])
Nx[i].append(vars[i][j].shape[1])
if (Nt[i][j] != Nt[0][0]):
raise ValueError('time dimensions must be the same for all variables.')
#if (Nx[i][j] != Nx[i][0]):
# raise ValueError('Dimensions must be the same for all variables on each plot.')
if (Ndims[i][j] == 3):
Ny[i].append(vars[i][j].shape[2])
#if (Ny[i][j] != Ny[i][0]):
# raise ValueError('Dimensions must be the same for all variables.')
# Collect time data from file
if (tslice == 0): # Only wish to collect time data if it matches
try:
t = collect('t_array')
if t == None:
raise ValueError("t_array is None")
if len(t) != Nt[0][0]:
raise ValueError("t_array is wrong size")
except:
t = linspace(0,Nt[0][0], Nt[0][0])
# Obtain number of frames
Nframes = int(Nt[0][0]/intv)
# Generate grids for plotting
# Try to use provided grids where possible
xnew = []
ynew = []
for i in range(0,Nvar):
xnew.append([])
try:
xnew[i].append(x[i])
except:
for j in range(0, Nlines[i]):
xnew[i].append(linspace(0,Nx[i][j]-1, Nx[i][j]))
#x.append(linspace(0,Nx[i][0]-1, Nx[i][0]))
if (Ndims[i][0] == 3):
try:
ynew.append(y[i])
except:
ynew.append(linspace(0, Ny[i][0]-1, Ny[i][0]))
else:
ynew.append(0)
x = xnew
y = ynew
# Determine range of data. Used to ensure constant colour map and
# to set y scale of line plot.
fmax = []
fmin = []
xmax = []
dummymax = []
dummymin = []
clevels = []
for i in range(0,Nvar):
dummymax.append([])
dummymin.append([])
for j in range(0,Nlines[i]):
dummymax[i].append(max(vars[i][j]))
dummymin[i].append(min(vars[i][j]))
fmax.append(max(dummymax[i]))
fmin.append(min(dummymin[i]))
if(symmetric_colors):
absmax = max(abs(fmax[i]),abs(fmin[i]))
fmax[i] = absmax
fmin[i] = -absmax
for j in range(0,Nlines[i]):
dummymax[i][j] = max(x[i][j])
xmax.append(max(dummymax[i]))
if not (global_colors):
clevels.append(linspace(fmin[i], fmax[i], Ncolors))
if(global_colors):
fmaxglobal = max(fmax)
fminglobal = min(fmin)
for i in range(0,Nvar):
fmax[i] = fmaxglobal
fmin[i] = fminglobal
clevels.append(linspace(fmin[i], fmax[i], Ncolors))
# Create figures for animation plotting
if (Nvar < 2):
row = 1
col = 1
h = 6.0
w = 8.0
elif (Nvar <3):
row = 1
col = 2
h = 6.0
w = 12.0
elif (Nvar < 5):
row = 2
col = 2
h = 8.0
w = 12.0
elif (Nvar < 7):
row = 2
col = 3
h = 8.0
w = 14.0
elif (Nvar < 10) :
row = 3
col = 3
h = 12.0
w = 14.0
else:
raise ValueError('too many variables...')
fig = plt.figure(figsize=(w,h))
title = fig.suptitle(r' ', fontsize=14 )
# Initiate all list variables required for plotting here
ax = []
lines = []
plots = []
cbars = []
xstride = []
ystride = []
r = []
theta = []
# Initiate figure frame
for i in range(0,Nvar):
lines.append([])
if (lineplot[i] == 1):
ax.append(fig.add_subplot(row,col,i+1))
ax[i].set_xlim((0,xmax[i]))
ax[i].set_ylim((fmin[i], fmax[i]))
for j in range(0,Nlines[i]):
lines[i].append(ax[i].plot([],[],lw=2, label = legendlabels[i][j])[0])
#Need the [0] to 'unpack' the line object from tuple. Alternatively:
#lines[i], = lines[i]
ax[i].set_xlabel(r'x')
ax[i].set_ylabel(titles[i])
if (Nlines[i] != 1):
legendneeded = 1
for k in range(0,i):
if (Nlines[i] == Nlines[k]):
legendneeded = 0
if (legendneeded == 1):
plt.axes(ax[i])
plt.legend(loc = 0)
# Pad out unused list variables with zeros
plots.append(0)
cbars.append(0)
xstride.append(0)
ystride.append(0)
r.append(0)
theta.append(0)
elif (contour[i] == 1):
ax.append(fig.add_subplot(row,col,i+1))
#ax[i].set_xlim((0,Nx[i][0]-1))
#ax[i].set_ylim((0,Ny[i][0]-1))
ax[i].set_xlim(min(x[i]),max(x[i]))
ax[i].set_ylim(min(y[i]),max(y[i]))
ax[i].set_xlabel(r'x')
ax[i].set_ylabel(r'y')
ax[i].set_title(titles[i])
if hold_aspect:
ax[i].set_aspect('equal')
plots.append(ax[i].contourf(x[i][0],y[i],vars[i][0][0,:,:].T, Ncolors, lw=0, levels=clevels[i] ))
plt.axes(ax[i])
cbars.append(fig.colorbar(plots[i], format='%1.1e'))
# Pad out unused list variables with zeros
lines[i].append(0)
xstride.append(0)
ystride.append(0)
r.append(0)
theta.append(0)
elif (surf[i] == 1):
x[i][0],y[i] = meshgrid(x[i][0],y[i])
if (Nx[i][0]<= 20):
xstride.append(1)
else:
xstride.append(int(floor(Nx[i][0]/20)))
if (Ny[i][0]<=20):
ystride.append(1)
else:
ystride.append(int(floor(Ny[i][0]/20)))
ax.append(fig.add_subplot(row,col,i+1, projection='3d'))
plots.append(ax[i].plot_wireframe(x[i][0], y[i], vars[i][0][0,:,:].T, rstride=ystride[i], cstride=xstride[i]))
title = fig.suptitle(r'', fontsize=14 )
ax[i].set_xlabel(r'x')
ax[i].set_ylabel(r'y')
ax[i].set_zlabel(titles[i])
# Pad out unused list variables with zeros
lines[i].append(0)
cbars.append(0)
r.append(0)
theta.append(0)
elif (polar[i] == 1):
r.append(linspace(1,Nx[i][0], Nx[i][0]))
theta.append(linspace(0,2*pi, Ny[i][0]))
r[i],theta[i] = meshgrid(r[i], theta[i])
ax.append(fig.add_subplot(row,col,i+1, projection='polar'))
plots.append(ax[i].contourf(theta[i], r[i], vars[i][0][0,:,:].T, levels=clevels[i]))
plt.axes(ax[i])
cbars.append(fig.colorbar(plots[i], format='%1.1e'))
ax[i].set_rmax(Nx[i][0]-1)
ax[i].set_title(titles[i])
# Pad out unused list variables with zeros
lines[i].append(0)
xstride.append(0)
ystride.append(0)
def onClick(event):
global pause
pause ^= True
def control():
global j, pause
if j == Nframes-1 : j = -1
if not pause:
j=j+1
return j
# Animation function
def animate(i):
j=control()
index = j*intv
for j in range(0,Nvar):
if (lineplot[j] == 1):
for k in range(0,Nlines[j]):
lines[j][k].set_data(x[j][k], vars[j][k][index,:])
elif (contour[j] == 1):
plots[j] = ax[j].contourf(x[j][0],y[j],vars[j][0][index,:,:].T, Ncolors, lw=0, levels=clevels[j])
elif (surf[j] == 1):
ax[j] = fig.add_subplot(row,col,j+1, projection='3d')
plots[j] = ax[j].plot_wireframe(x[j][0], y[j], vars[j][0][index,:,:].T, rstride=ystride[j], cstride=xstride[j])
ax[j].set_zlim(fmin[j],fmax[j])
ax[j].set_xlabel(r'x')
ax[j].set_ylabel(r'y')
ax[j].set_title(titles[j])
elif (polar[j] == 1):
plots[j] = ax[j].contourf(theta[j], r[j], vars[j][0][index,:,:].T, levels=clevels[j])
ax[j].set_rmax(Nx[j][0]-1)
if (tslice == 0):
title.set_text('t = %1.2e' % t[index])
else:
title.set_text('t = %i' % index)
return plots
def init():
global j, pause
j=-2
pause = False
return animate(0)
# Call Animation function
fig.canvas.mpl_connect('button_press_event', onClick)
anim = animation.FuncAnimation(fig, animate, init_func=init, frames=Nframes)
# Save movie with given name
if ((isinstance(movie,str)==1)):
try:
anim.save(movie+'.mp4',writer = FFwriter, fps=30, extra_args=['-vcodec', 'libx264'])
except Exception:
print("Save failed: Check ffmpeg path")
# Save movie with default name
if ((isinstance(movie,str)==0)):
if (movie != 0):
try:
anim.save('animation.mp4',writer = FFwriter, fps=28, extra_args=['-vcodec', 'libx264'])
except Exception:
print("Save failed: Check ffmpeg path")
# Show animation
if (movie == 0):
plt.show()
"""
To do list
1. Speed up animations ????
2. Look at theta in polar plots - perioidic?!?
3. Log axes, colorbars
4. Figureplot
"""
| gpl-3.0 |
aldian/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 137 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/io/formats/test_style.py | 5 | 36923 | import copy
import textwrap
import re
import pytest
import numpy as np
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
jinja2 = pytest.importorskip('jinja2')
from pandas.io.formats.style import Styler, _get_level_lengths # noqa
class TestStyler(object):
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({'A': np.random.permutation(range(6))})
self.df = DataFrame({'A': [0, 1], 'B': np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo='bar'):
return pd.Series(['color: %s' % foo], index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = pd.DataFrame({'A': ['color: red', 'color: blue']})
self.dataframes = [
self.df,
pd.DataFrame({'f': [1., 2.], 'o': ['a', 'b'],
'c': pd.Categorical(['a', 'b'])})
]
def test_init_non_pandas(self):
with pytest.raises(TypeError):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): ['color: red'],
(1, 0): ['color: blue']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi(self):
attrs = DataFrame({"A": ['color: red; foo: bar',
'color: blue; foo: baz']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_traliing_semi(self):
attrs = DataFrame({"A": ['color: red; foo: bar;',
'color: blue; foo: baz;']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_copy(self):
s2 = copy.copy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is s2.ctx # shallow
assert self.styler._todo is s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx == s2.ctx
assert self.styler._todo == s2._todo
def test_deepcopy(self):
s2 = copy.deepcopy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is not s2.ctx
assert self.styler._todo is not s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx != s2.ctx
assert s2._todo == []
assert self.styler._todo != s2._todo
def test_clear(self):
s = self.df.style.highlight_max()._compute()
assert len(s.ctx) > 0
assert len(s._todo) > 0
s.clear()
assert len(s.ctx) == 0
assert len(s._todo) == 0
def test_render(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.render()
# An index but no columns
DataFrame(columns=['a']).style.render()
# A column but no index
DataFrame(index=['a']).style.render()
# No IndexError raised?
def test_render_double(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red; border: 1px",
"color: blue; border: 2px"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_set_properties(self):
df = pd.DataFrame({"A": [0, 1]})
result = df.style.set_properties(color='white',
size='10px')._compute().ctx
# order is deterministic
v = ["color: white", "size: 10px"]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = pd.DataFrame({'A': [0, 1]})
result = df.style.set_properties(subset=pd.IndexSlice[0, 'A'],
color='white')._compute().ctx
expected = {(0, 0): ['color: white']}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'is_visible': True, 'display_value': ''},
{'class': 'col_heading level0 col0',
'display_value': 'A',
'type': 'th',
'value': 'A',
'is_visible': True,
},
{'class': 'col_heading level0 col1',
'display_value': 'B',
'type': 'th',
'value': 'B',
'is_visible': True,
},
{'class': 'col_heading level0 col2',
'display_value': 'C',
'type': 'th',
'value': 'C',
'is_visible': True,
}]]
assert result['head'] == expected
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index('A').style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'B', 'display_value': 'B', 'is_visible': True},
{'class': 'col_heading level0 col1', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'blank', 'type': 'th', 'value': ''},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index(['A', 'B']).style._translate()
expected = [[
{'class': 'blank', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'index_name level1', 'type': 'th',
'value': 'B'},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = pd.DataFrame({0: [1, 2, 3]})
df.style._translate()
def test_apply_axis(self):
df = pd.DataFrame({'A': [0, 0], 'B': [1, 1]})
f = lambda x: ['val: %s' % x.max() for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {(0, 0): ['val: 1'], (0, 1): ['val: 1'],
(1, 0): ['val: 1'], (1, 1): ['val: 1']}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {(0, 0): ['val: 0'], (0, 1): ['val: 1'],
(1, 0): ['val: 0'], (1, 1): ['val: 1']}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
def test_apply_subset(self):
axes = [0, 1]
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for ax in axes:
for slice_ in slices:
result = self.df.style.apply(self.h, axis=ax, subset=slice_,
foo='baz')._compute().ctx
expected = dict(((r, c), ['color: baz'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_applymap_subset(self):
def f(x):
return 'foo: bar'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.applymap(f, subset=slice_)._compute().ctx
expected = dict(((r, c), ['foo: bar'])
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns)
assert result == expected
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
s.ctx = {(0, 0): ['color: red'],
(1, 0): ['']}
result = s._translate()['cellstyle']
expected = [{'props': [['color', ' red']], 'selector': 'row0_col0'},
{'props': [['', '']], 'selector': 'row1_col0'}]
assert result == expected
def test_bar_align_left(self):
df = pd.DataFrame({'A': [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 50.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 100.0%, transparent 0%)']
}
assert result == expected
result = df.style.bar(color='red', width=50)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 25.0%, transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 50.0%, transparent 0%)']
}
assert result == expected
df['C'] = ['a'] * len(df)
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
df['C'] = df['C'].astype('category')
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
def test_bar_align_left_0points(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.style.bar()._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%'],
(0, 2): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
result = df.style.bar(axis=1)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 0%)'],
(0, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(1, 0): ['width: 10em', ' height: 80%'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)'],
(2, 0): ['width: 10em', ' height: 80%'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 0%)']}
assert result == expected
def test_bar_align_mid_pos_and_neg(self):
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #d65f5f 0.0%, '
'#d65f5f 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#d65f5f 10.0%, #d65f5f 10.0%, '
'transparent 10.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, #5fba7d 10.0%'
', #5fba7d 30.0%, transparent 30.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 10.0%, '
'#5fba7d 10.0%, #5fba7d 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_pos(self):
df = pd.DataFrame({'A': [10, 20, 50, 100]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 20.0%, transparent 20.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 50.0%, transparent 50.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, #5fba7d 0.0%, '
'#5fba7d 100.0%, transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_neg(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 0.0%, '
'#d65f5f 0.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 40.0%, '
'#d65f5f 40.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 70.0%, '
'#d65f5f 70.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 80.0%, '
'#d65f5f 80.0%, #d65f5f 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_zero_pos_and_neg(self):
# See https://github.com/pandas-dev/pandas/pull/14757
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='zero', color=[
'#d65f5f', '#5fba7d'], width=90)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 45.0%, '
'#d65f5f 45.0%, #d65f5f 50%, '
'transparent 50%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, '
'#5fba7d 50%, #5fba7d 50.0%, '
'transparent 50.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 60.0%, transparent 60.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 0%, transparent 50%, #5fba7d 50%, '
'#5fba7d 95.0%, transparent 95.0%)']}
assert result == expected
def test_bar_bad_align_raises(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
with pytest.raises(ValueError):
df.style.bar(align='poorly', color=['#d65f5f', '#5fba7d'])
def test_highlight_null(self, null_color='red'):
df = pd.DataFrame({'A': [0, np.nan]})
result = df.style.highlight_null()._compute().ctx
expected = {(0, 0): [''],
(1, 0): ['background-color: red']}
assert result == expected
def test_nonunique_raises(self):
df = pd.DataFrame([[1, 2]], columns=['A', 'A'])
with pytest.raises(ValueError):
df.style
with pytest.raises(ValueError):
Styler(df)
def test_caption(self):
styler = Styler(self.df, caption='foo')
result = styler.render()
assert all(['caption' in result, 'foo' in result])
styler = self.df.style
result = styler.set_caption('baz')
assert styler is result
assert styler.caption == 'baz'
def test_uuid(self):
styler = Styler(self.df, uuid='abc123')
result = styler.render()
assert 'abc123' in result
styler = self.df.style
result = styler.set_uuid('aaa')
assert result is styler
assert result.uuid == 'aaa'
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = pd.DataFrame({'a': [1, 3, 5, 6], 'b': [2, 4, 12, 21]})
result = df.style.render(uuid='test')
assert 'test' in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{'selector': 'th', 'props': [('foo', 'bar')]}]
styler = Styler(self.df, table_styles=style)
result = ' '.join(styler.render().split())
assert 'th { foo: bar; }' in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.render()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).render()
assert 'class="foo" data-bar' in result
def test_precision(self):
with pd.option_context('display.precision', 10):
s = Styler(self.df)
assert s.precision == 10
s = Styler(self.df, precision=2)
assert s.precision == 2
s2 = s.set_precision(4)
assert s is s2
assert s.precision == 4
def test_apply_none(self):
def f(x):
return pd.DataFrame(np.where(x == x.max(), 'color: red', ''),
index=x.index, columns=x.columns)
result = (pd.DataFrame([[1, 2], [3, 4]])
.style.apply(f, axis=None)._compute().ctx)
assert result[(1, 1)] == ['color: red']
def test_trim(self):
result = self.df.style.render() # trim=True
assert result.count('#') == 0
result = self.df.style.highlight_max().render()
assert result.count('#') == len(self.df.columns)
def test_highlight_max(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
# max(df) = min(-df)
for max_ in [True, False]:
if max_:
attr = 'highlight_max'
else:
df = -df
attr = 'highlight_min'
result = getattr(df.style, attr)()._compute().ctx
assert result[(1, 1)] == ['background-color: yellow']
result = getattr(df.style, attr)(color='green')._compute().ctx
assert result[(1, 1)] == ['background-color: green']
result = getattr(df.style, attr)(subset='A')._compute().ctx
assert result[(1, 0)] == ['background-color: yellow']
result = getattr(df.style, attr)(axis=0)._compute().ctx
expected = {(1, 0): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 1): [''], (0, 0): ['']}
assert result == expected
result = getattr(df.style, attr)(axis=1)._compute().ctx
expected = {(0, 1): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 0): [''], (1, 0): ['']}
assert result == expected
# separate since we cant negate the strs
df['C'] = ['a', 'b']
result = df.style.highlight_max()._compute().ctx
expected = {(1, 1): ['background-color: yellow']}
result = df.style.highlight_min()._compute().ctx
expected = {(0, 0): ['background-color: yellow']}
def test_export(self):
f = lambda x: 'color: red' if x > 0 else 'color: blue'
g = lambda x, y, z: 'color: %s' if x > 0 else 'color: %s' % z
style1 = self.styler
style1.applymap(f)\
.applymap(g, y='a', z='b')\
.highlight_max()
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.render()
def test_display_format(self):
df = pd.DataFrame(np.random.random(size=(2, 2)))
ctx = df.style.format("{:0.1f}")._translate()
assert all(['display_value' in c for c in row] for row in ctx['body'])
assert (all([len(c['display_value']) <= 3 for c in row[1:]]
for row in ctx['body']))
assert len(ctx['body'][0][1]['display_value'].lstrip('-')) <= 3
def test_display_format_raises(self):
df = pd.DataFrame(np.random.randn(2, 2))
with pytest.raises(TypeError):
df.style.format(5)
with pytest.raises(TypeError):
df.style.format(True)
def test_display_subset(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"},
subset=pd.IndexSlice[0, :])._translate()
expected = '0.1'
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1234'
assert ctx['body'][0][2]['display_value'] == '12.34%'
raw_11 = '1.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice['a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][0][2]['display_value'] == '0.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, 'a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[[0, 1], ['a']])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1'
assert ctx['body'][0][2]['display_value'] == '0.1234'
assert ctx['body'][1][2]['display_value'] == '1.1234'
def test_display_dict(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][2]['display_value'] == '12.34%'
df['c'] = ['aaa', 'bbb']
ctx = df.style.format({"a": "{:0.1f}", "c": str.upper})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][3]['display_value'] == 'AAA'
def test_bad_apply_shape(self):
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(lambda x: 'x', subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: [''], subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', '', ''])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], subset=1)
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], axis=1)
def test_apply_bad_return(self):
def f(x):
return ''
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(TypeError):
df.style._apply(f, axis=None)
def test_apply_bad_labels(self):
def f(x):
return pd.DataFrame(index=[1, 2], columns=['a', 'b'])
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = pd.MultiIndex.from_product([['a', 'b'], [0, 1, 2]])
expected = {(0, 0): 3, (0, 3): 3, (1, 0): 1, (1, 1): 1, (1, 2): 1,
(1, 3): 1, (1, 4): 1, (1, 5): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_get_level_lengths_un_sorted(self):
index = pd.MultiIndex.from_arrays([
[1, 1, 2, 1],
['a', 'b', 'b', 'd']
])
expected = {(0, 0): 2, (0, 2): 1, (0, 3): 1,
(1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_mi_sparse(self):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body_0 = result['body'][0][0]
expected_0 = {
"value": "a", "display_value": "a", "is_visible": True,
"type": "th", "attributes": ["rowspan=2"],
"class": "row_heading level0 row0", "id": "level0_row0"
}
tm.assert_dict_equal(body_0, expected_0)
body_1 = result['body'][0][1]
expected_1 = {
"value": 0, "display_value": 0, "is_visible": True,
"type": "th", "class": "row_heading level1 row0",
"id": "level1_row0"
}
tm.assert_dict_equal(body_1, expected_1)
body_10 = result['body'][1][0]
expected_10 = {
"value": 'a', "display_value": 'a', "is_visible": False,
"type": "th", "class": "row_heading level0 row1",
"id": "level0_row1"
}
tm.assert_dict_equal(body_10, expected_10)
head = result['head'][0]
expected = [
{'type': 'th', 'class': 'blank', 'value': '',
'is_visible': True, "display_value": ''},
{'type': 'th', 'class': 'blank level0', 'value': '',
'is_visible': True, 'display_value': ''},
{'type': 'th', 'class': 'col_heading level0 col0', 'value': 'A',
'is_visible': True, 'display_value': 'A'}]
assert head == expected
def test_mi_sparse_disabled(self):
with pd.option_context('display.multi_sparse', False):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body = result['body']
for row in body:
assert 'attributes' not in row[0]
def test_mi_sparse_index_names(self):
df = pd.DataFrame({'A': [1, 2]}, index=pd.MultiIndex.from_arrays(
[['a', 'a'], [0, 1]],
names=['idx_level_0', 'idx_level_1'])
)
result = df.style._translate()
head = result['head'][1]
expected = [{
'class': 'index_name level0', 'value': 'idx_level_0',
'type': 'th'},
{'class': 'index_name level1', 'value': 'idx_level_1',
'type': 'th'},
{'class': 'blank', 'value': '', 'type': 'th'}]
assert head == expected
def test_mi_sparse_column_names(self):
df = pd.DataFrame(
np.arange(16).reshape(4, 4),
index=pd.MultiIndex.from_arrays(
[['a', 'a', 'b', 'a'], [0, 1, 1, 2]],
names=['idx_level_0', 'idx_level_1']),
columns=pd.MultiIndex.from_arrays(
[['C1', 'C1', 'C2', 'C2'], [1, 0, 1, 0]],
names=['col_0', 'col_1']
)
)
result = df.style._translate()
head = result['head'][1]
expected = [
{'class': 'blank', 'value': '', 'display_value': '',
'type': 'th', 'is_visible': True},
{'class': 'index_name level1', 'value': 'col_1',
'display_value': 'col_1', 'is_visible': True, 'type': 'th'},
{'class': 'col_heading level1 col0',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col1',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
{'class': 'col_heading level1 col2',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col3',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
]
assert head == expected
class TestStylerMatplotlibDep(object):
def test_background_gradient(self):
tm._skip_if_no_mpl()
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
for c_map in [None, 'YlOrRd']:
result = df.style.background_gradient(cmap=c_map)._compute().ctx
assert all("#" in x[0] for x in result.values())
assert result[(0, 0)] == result[(0, 1)]
assert result[(1, 0)] == result[(1, 1)]
result = df.style.background_gradient(
subset=pd.IndexSlice[1, 'A'])._compute().ctx
assert result[(1, 0)] == ['background-color: #fff7fb']
def test_block_names():
# catch accidental removal of a block
expected = {
'before_style', 'style', 'table_styles', 'before_cellstyle',
'cellstyle', 'before_table', 'table', 'caption', 'thead', 'tbody',
'after_table', 'before_head_rows', 'head_tr', 'after_head_rows',
'before_rows', 'tr', 'after_rows',
}
result = set(Styler.template.blocks)
assert result == expected
def test_from_custom_template(tmpdir):
p = tmpdir.mkdir("templates").join("myhtml.tpl")
p.write(textwrap.dedent("""\
{% extends "html.tpl" %}
{% block table %}
<h1>{{ table_title|default("My Table") }}</h1>
{{ super() }}
{% endblock table %}"""))
result = Styler.from_custom_template(str(tmpdir.join('templates')),
'myhtml.tpl')
assert issubclass(result, Styler)
assert result.env is not Styler.env
assert result.template is not Styler.template
styler = result(pd.DataFrame({"A": [1, 2]}))
assert styler.render()
def test_shim():
# https://github.com/pandas-dev/pandas/pull/16059
# Remove in 0.21
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
from pandas.formats.style import Styler as _styler # noqa
| mit |
ankurankan/scikit-learn | sklearn/covariance/graph_lasso_.py | 17 | 23130 | """GraphLasso: sparse inverse covariance estimation with an l1-penalized
estimator.
"""
# Author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import warnings
import operator
import sys
import time
import numpy as np
from scipy import linalg
from .empirical_covariance_ import (empirical_covariance, EmpiricalCovariance,
log_likelihood)
from ..utils import ConvergenceWarning
from ..utils.extmath import pinvh
from ..utils.validation import check_random_state
from ..linear_model import lars_path
from ..linear_model import cd_fast
from ..cross_validation import _check_cv as check_cv, cross_val_score
from ..externals.joblib import Parallel, delayed
import collections
# Helper functions to compute the objective and dual objective functions
# of the l1-penalized estimator
def _objective(mle, precision_, alpha):
"""Evaluation of the graph-lasso objective function
the objective function is made of a shifted scaled version of the
normalized log-likelihood (i.e. its empirical mean over the samples) and a
penalisation term to promote sparsity
"""
p = precision_.shape[0]
cost = - 2. * log_likelihood(mle, precision_) + p * np.log(2 * np.pi)
cost += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return cost
def _dual_gap(emp_cov, precision_, alpha):
"""Expression of the dual gap convergence criterion
The specific definition is given in Duchi "Projected Subgradient Methods
for Learning Sparse Gaussians".
"""
gap = np.sum(emp_cov * precision_)
gap -= precision_.shape[0]
gap += alpha * (np.abs(precision_).sum()
- np.abs(np.diag(precision_)).sum())
return gap
def alpha_max(emp_cov):
"""Find the maximum alpha for which there are some non-zeros off-diagonal.
Parameters
----------
emp_cov : 2D array, (n_features, n_features)
The sample covariance matrix
Notes
-----
This results from the bound for the all the Lasso that are solved
in GraphLasso: each time, the row of cov corresponds to Xy. As the
bound for alpha is given by `max(abs(Xy))`, the result follows.
"""
A = np.copy(emp_cov)
A.flat[::A.shape[0] + 1] = 0
return np.max(np.abs(A))
# The g-lasso algorithm
def graph_lasso(emp_cov, alpha, cov_init=None, mode='cd', tol=1e-4,
max_iter=100, verbose=False, return_costs=False,
eps=np.finfo(np.float).eps, return_n_iter=False):
"""l1-penalized covariance estimator
Parameters
----------
emp_cov : 2D ndarray, shape (n_features, n_features)
Empirical covariance from which to compute the covariance estimate.
alpha : positive float
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
printed at each iteration.
return_costs : boolean, optional
If return_costs is True, the objective function and dual gap
at each iteration are returned.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
The estimated covariance matrix.
precision : 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrix.
costs : list of (objective, dual_gap) pairs
The list of values of the objective function and the dual gap at
each iteration. Returned only if return_costs is True.
n_iter : int
Number of iterations. Returned only if `return_n_iter` is set to True.
See Also
--------
GraphLasso, GraphLassoCV
Notes
-----
The algorithm employed to solve this problem is the GLasso algorithm,
from the Friedman 2008 Biostatistics paper. It is the same algorithm
as in the R `glasso` package.
One possible difference with the `glasso` R package is that the
diagonal coefficients are not penalized.
"""
_, n_features = emp_cov.shape
if alpha == 0:
if return_costs:
precision_ = linalg.inv(emp_cov)
cost = - 2. * log_likelihood(emp_cov, precision_)
cost += n_features * np.log(2 * np.pi)
d_gap = np.sum(emp_cov * precision_) - n_features
if return_n_iter:
return emp_cov, precision_, (cost, d_gap), 0
else:
return emp_cov, precision_, (cost, d_gap)
else:
if return_n_iter:
return emp_cov, linalg.inv(emp_cov), 0
else:
return emp_cov, linalg.inv(emp_cov)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init.copy()
# As a trivial regularization (Tikhonov like), we scale down the
# off-diagonal coefficients of our starting point: This is needed, as
# in the cross-validation the cov_init can easily be
# ill-conditioned, and the CV loop blows. Beside, this takes
# conservative stand-point on the initial conditions, and it tends to
# make the convergence go faster.
covariance_ *= 0.95
diagonal = emp_cov.flat[::n_features + 1]
covariance_.flat[::n_features + 1] = diagonal
precision_ = pinvh(covariance_)
indices = np.arange(n_features)
costs = list()
# The different l1 regression solver have different numerical errors
if mode == 'cd':
errors = dict(over='raise', invalid='ignore')
else:
errors = dict(invalid='raise')
try:
for i in range(max_iter):
for idx in range(n_features):
sub_covariance = covariance_[indices != idx].T[indices != idx]
row = emp_cov[idx, indices != idx]
with np.errstate(**errors):
if mode == 'cd':
# Use coordinate descent
coefs = -(precision_[indices != idx, idx]
/ (precision_[idx, idx] + 1000 * eps))
coefs, _, _, _ = cd_fast.enet_coordinate_descent_gram(
coefs, alpha, 0, sub_covariance, row, row,
max_iter, tol, check_random_state(None), False)
else:
# Use LARS
_, _, coefs = lars_path(
sub_covariance, row, Xy=row, Gram=sub_covariance,
alpha_min=alpha / (n_features - 1), copy_Gram=True,
method='lars')
coefs = coefs[:, -1]
# Update the precision matrix
precision_[idx, idx] = (
1. / (covariance_[idx, idx]
- np.dot(covariance_[indices != idx, idx], coefs)))
precision_[indices != idx, idx] = (- precision_[idx, idx]
* coefs)
precision_[idx, indices != idx] = (- precision_[idx, idx]
* coefs)
coefs = np.dot(sub_covariance, coefs)
covariance_[idx, indices != idx] = coefs
covariance_[indices != idx, idx] = coefs
d_gap = _dual_gap(emp_cov, precision_, alpha)
cost = _objective(emp_cov, precision_, alpha)
if verbose:
print(
'[graph_lasso] Iteration % 3i, cost % 3.2e, dual gap %.3e'
% (i, cost, d_gap))
if return_costs:
costs.append((cost, d_gap))
if np.abs(d_gap) < tol:
break
if not np.isfinite(cost) and i > 0:
raise FloatingPointError('Non SPD result: the system is '
'too ill-conditioned for this solver')
else:
warnings.warn('graph_lasso: did not converge after %i iteration:'
' dual gap: %.3e' % (max_iter, d_gap),
ConvergenceWarning)
except FloatingPointError as e:
e.args = (e.args[0]
+ '. The system is too ill-conditioned for this solver',)
raise e
if return_costs:
if return_n_iter:
return covariance_, precision_, costs, i + 1
else:
return covariance_, precision_, costs
else:
if return_n_iter:
return covariance_, precision_, i + 1
else:
return covariance_, precision_
class GraphLasso(EmpiricalCovariance):
"""Sparse inverse covariance estimation with an l1-penalized estimator.
Parameters
----------
alpha : positive float, optional
The regularization parameter: the higher alpha, the more
regularization, the sparser the inverse covariance.
cov_init : 2D array (n_features, n_features), optional
The initial guess for the covariance.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : boolean, optional
If verbose is True, the objective function and dual gap are
plotted at each iteration.
Attributes
----------
covariance_ : array-like, shape (n_features, n_features)
Estimated covariance matrix
precision_ : array-like, shape (n_features, n_features)
Estimated pseudo inverse matrix.
n_iter_ : int
Number of iterations run.
See Also
--------
graph_lasso, GraphLassoCV
"""
def __init__(self, alpha=.01, mode='cd', tol=1e-4, max_iter=100,
verbose=False, assume_centered=False):
self.alpha = alpha
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = np.asarray(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=self.alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
return_n_iter=True)
return self
# Cross-validation with GraphLasso
def graph_lasso_path(X, alphas, cov_init=None, X_test=None, mode='cd',
tol=1e-4, max_iter=100, verbose=False):
"""l1-penalized covariance estimator along a path of decreasing alphas
Parameters
----------
X : 2D ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate.
alphas : list of positive floats
The list of regularization parameters, decreasing order.
X_test : 2D array, shape (n_test_samples, n_features), optional
Optional test matrix to measure generalisation error.
mode : {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where p > n. Elsewhere prefer cd
which is more numerically stable.
tol : positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter : integer, optional
The maximum number of iterations.
verbose : integer, optional
The higher the verbosity flag, the more information is printed
during the fitting.
Returns
-------
covariances_ : List of 2D ndarray, shape (n_features, n_features)
The estimated covariance matrices.
precisions_ : List of 2D ndarray, shape (n_features, n_features)
The estimated (sparse) precision matrices.
scores_ : List of float
The generalisation error (log-likelihood) on the test data.
Returned only if test data is passed.
"""
inner_verbose = max(0, verbose - 1)
emp_cov = empirical_covariance(X)
if cov_init is None:
covariance_ = emp_cov.copy()
else:
covariance_ = cov_init
covariances_ = list()
precisions_ = list()
scores_ = list()
if X_test is not None:
test_emp_cov = empirical_covariance(X_test)
for alpha in alphas:
try:
# Capture the errors, and move on
covariance_, precision_ = graph_lasso(
emp_cov, alpha=alpha, cov_init=covariance_, mode=mode, tol=tol,
max_iter=max_iter, verbose=inner_verbose)
covariances_.append(covariance_)
precisions_.append(precision_)
if X_test is not None:
this_score = log_likelihood(test_emp_cov, precision_)
except FloatingPointError:
this_score = -np.inf
covariances_.append(np.nan)
precisions_.append(np.nan)
if X_test is not None:
if not np.isfinite(this_score):
this_score = -np.inf
scores_.append(this_score)
if verbose == 1:
sys.stderr.write('.')
elif verbose > 1:
if X_test is not None:
print('[graph_lasso_path] alpha: %.2e, score: %.2e'
% (alpha, this_score))
else:
print('[graph_lasso_path] alpha: %.2e' % alpha)
if X_test is not None:
return covariances_, precisions_, scores_
return covariances_, precisions_
class GraphLassoCV(GraphLasso):
"""Sparse inverse covariance w/ cross-validated choice of the l1 penalty
Parameters
----------
alphas : integer, or list positive float, optional
If an integer is given, it fixes the number of points on the
grids of alpha to be used. If a list is given, it gives the
grid to be used. See the notes in the class docstring for
more details.
n_refinements: strictly positive integer
The number of times the grid is refined. Not used if explicit
values of alphas are passed.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, defaults to
a 3-fold strategy
tol: positive float, optional
The tolerance to declare convergence: if the dual gap goes below
this value, iterations are stopped.
max_iter: integer, optional
Maximum number of iterations.
mode: {'cd', 'lars'}
The Lasso solver to use: coordinate descent or LARS. Use LARS for
very sparse underlying graphs, where number of features is greater
than number of samples. Elsewhere prefer cd which is more numerically
stable.
n_jobs: int, optional
number of jobs to run in parallel (default 1).
verbose: boolean, optional
If verbose is True, the objective function and duality gap are
printed at each iteration.
Attributes
----------
covariance_ : numpy.ndarray, shape (n_features, n_features)
Estimated covariance matrix.
precision_ : numpy.ndarray, shape (n_features, n_features)
Estimated precision matrix (inverse covariance).
alpha_ : float
Penalization parameter selected.
cv_alphas_ : list of float
All penalization parameters explored.
`grid_scores`: 2D numpy.ndarray (n_alphas, n_folds)
Log-likelihood score on left-out data across folds.
n_iter_ : int
Number of iterations run for the optimal alpha.
See Also
--------
graph_lasso, GraphLasso
Notes
-----
The search for the optimal penalization parameter (alpha) is done on an
iteratively refined grid: first the cross-validated scores on a grid are
computed, then a new refined grid is centered around the maximum, and so
on.
One of the challenges which is faced here is that the solvers can
fail to converge to a well-conditioned estimate. The corresponding
values of alpha then come out as missing values, but the optimum may
be close to these missing values.
"""
def __init__(self, alphas=4, n_refinements=4, cv=None, tol=1e-4,
max_iter=100, mode='cd', n_jobs=1, verbose=False,
assume_centered=False):
self.alphas = alphas
self.n_refinements = n_refinements
self.mode = mode
self.tol = tol
self.max_iter = max_iter
self.verbose = verbose
self.cv = cv
self.n_jobs = n_jobs
self.assume_centered = assume_centered
# The base class needs this for the score method
self.store_precision = True
def fit(self, X, y=None):
X = np.asarray(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
emp_cov = empirical_covariance(
X, assume_centered=self.assume_centered)
cv = check_cv(self.cv, X, y, classifier=False)
# List of (alpha, scores, covs)
path = list()
n_alphas = self.alphas
inner_verbose = max(0, self.verbose - 1)
if isinstance(n_alphas, collections.Sequence):
alphas = self.alphas
n_refinements = 1
else:
n_refinements = self.n_refinements
alpha_1 = alpha_max(emp_cov)
alpha_0 = 1e-2 * alpha_1
alphas = np.logspace(np.log10(alpha_0), np.log10(alpha_1),
n_alphas)[::-1]
t0 = time.time()
for i in range(n_refinements):
with warnings.catch_warnings():
# No need to see the convergence warnings on this grid:
# they will always be points that will not converge
# during the cross-validation
warnings.simplefilter('ignore', ConvergenceWarning)
# Compute the cross-validated loss on the current grid
# NOTE: Warm-restarting graph_lasso_path has been tried, and
# this did not allow to gain anything (same execution time with
# or without).
this_path = Parallel(
n_jobs=self.n_jobs,
verbose=self.verbose
)(
delayed(graph_lasso_path)(
X[train], alphas=alphas,
X_test=X[test], mode=self.mode,
tol=self.tol,
max_iter=int(.1 * self.max_iter),
verbose=inner_verbose)
for train, test in cv)
# Little danse to transform the list in what we need
covs, _, scores = zip(*this_path)
covs = zip(*covs)
scores = zip(*scores)
path.extend(zip(alphas, scores, covs))
path = sorted(path, key=operator.itemgetter(0), reverse=True)
# Find the maximum (avoid using built in 'max' function to
# have a fully-reproducible selection of the smallest alpha
# in case of equality)
best_score = -np.inf
last_finite_idx = 0
for index, (alpha, scores, _) in enumerate(path):
this_score = np.mean(scores)
if this_score >= .1 / np.finfo(np.float).eps:
this_score = np.nan
if np.isfinite(this_score):
last_finite_idx = index
if this_score >= best_score:
best_score = this_score
best_index = index
# Refine the grid
if best_index == 0:
# We do not need to go back: we have chosen
# the highest value of alpha for which there are
# non-zero coefficients
alpha_1 = path[0][0]
alpha_0 = path[1][0]
elif (best_index == last_finite_idx
and not best_index == len(path) - 1):
# We have non-converged models on the upper bound of the
# grid, we need to refine the grid there
alpha_1 = path[best_index][0]
alpha_0 = path[best_index + 1][0]
elif best_index == len(path) - 1:
alpha_1 = path[best_index][0]
alpha_0 = 0.01 * path[best_index][0]
else:
alpha_1 = path[best_index - 1][0]
alpha_0 = path[best_index + 1][0]
if not isinstance(n_alphas, collections.Sequence):
alphas = np.logspace(np.log10(alpha_1), np.log10(alpha_0),
n_alphas + 2)
alphas = alphas[1:-1]
if self.verbose and n_refinements > 1:
print('[GraphLassoCV] Done refinement % 2i out of %i: % 3is'
% (i + 1, n_refinements, time.time() - t0))
path = list(zip(*path))
grid_scores = list(path[1])
alphas = list(path[0])
# Finally, compute the score with alpha = 0
alphas.append(0)
grid_scores.append(cross_val_score(EmpiricalCovariance(), X,
cv=cv, n_jobs=self.n_jobs,
verbose=inner_verbose))
self.grid_scores = np.array(grid_scores)
best_alpha = alphas[best_index]
self.alpha_ = best_alpha
self.cv_alphas_ = alphas
# Finally fit the model with the selected alpha
self.covariance_, self.precision_, self.n_iter_ = graph_lasso(
emp_cov, alpha=best_alpha, mode=self.mode, tol=self.tol,
max_iter=self.max_iter, verbose=inner_verbose,
return_n_iter=True)
return self
| bsd-3-clause |
wanggang3333/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 110 | 34127 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
sonnyhu/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 7 | 5186 | import numpy as np
import scipy.sparse as sp
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics.cluster import silhouette_score
from sklearn.metrics.cluster import calinski_harabaz_score
from sklearn.metrics import pairwise_distances
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X_dense = dataset.data
X_csr = csr_matrix(X_dense)
X_dok = sp.dok_matrix(X_dense)
X_lil = sp.lil_matrix(X_dense)
y = dataset.target
for X in [X_dense, X_csr, X_dok, X_lil]:
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
score_precomputed = silhouette_score(D, y, metric='precomputed')
assert_greater(score_precomputed, 0)
# Test without calculating D
score_euclidean = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(score_precomputed, score_euclidean)
if X is X_dense:
score_dense_without_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean,
score_dense_without_sampling)
# Test with sampling
score_precomputed = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
score_euclidean = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert_greater(score_precomputed, 0)
assert_greater(score_euclidean, 0)
assert_almost_equal(score_euclidean, score_precomputed)
if X is X_dense:
score_dense_with_sampling = score_precomputed
else:
assert_almost_equal(score_euclidean, score_dense_with_sampling)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
def test_non_encoded_labels():
dataset = datasets.load_iris()
X = dataset.data
labels = dataset.target
assert_equal(
silhouette_score(X, labels + 10), silhouette_score(X, labels))
def test_non_numpy_labels():
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
assert_equal(
silhouette_score(list(X), list(y)), silhouette_score(X, y))
def test_calinski_harabaz_score():
rng = np.random.RandomState(seed=0)
# Assert message when there is only one label
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.zeros(10))
# Assert message when all point are in different clusters
assert_raise_message(ValueError, "Number of labels is",
calinski_harabaz_score,
rng.rand(10, 2), np.arange(10))
# Assert the value is 1. when all samples are equals
assert_equal(1., calinski_harabaz_score(np.ones((10, 2)),
[0] * 5 + [1] * 5))
# Assert the value is 0. when all the mean cluster are equal
assert_equal(0., calinski_harabaz_score([[-1, -1], [1, 1]] * 10,
[0] * 10 + [1] * 10))
# General case (with non numpy arrays)
X = ([[0, 0], [1, 1]] * 5 + [[3, 3], [4, 4]] * 5 +
[[0, 4], [1, 3]] * 5 + [[3, 1], [4, 0]] * 5)
labels = [0] * 10 + [1] * 10 + [2] * 10 + [3] * 10
assert_almost_equal(calinski_harabaz_score(X, labels),
45 * (40 - 4) / (5 * (4 - 1)))
| bsd-3-clause |
lucidfrontier45/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 6 | 1959 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import (assert_equal, assert_array_equal,
assert_raises)
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
"""Affinity Propagation algorithm
"""
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
| bsd-3-clause |
kaiodt/nanook_path_tracking | planta_motores/plot_ajuste.py | 1 | 1214 | #! /usr/bin/env python
# coding=utf-8
import matplotlib.pyplot as plt
path = "/home/nanook/nanook_ros/src/nanook_path_tracking/planta_motores/ensaios/ensaio_ajuste.txt"
f = open(path, 'r')
samples = []
t = []
left_speed = []
right_speed = []
##### Leitura do arquivo #####
for line in f:
line = line.split()
samples.append(int(line[0]))
t.append(float(line[1]))
left_speed.append(float(line[2]))
right_speed.append(float(line[3]))
f.close()
##### Média #####
n = len(samples)
s0 = 50
sf = n - 10
left_avg = left_speed[s0:sf+1]
right_avg = right_speed[s0:sf+1]
left_speed_avg = sum(left_avg) / len(left_avg)
right_speed_avg = sum(right_avg) / len(right_avg)
print('Média Esquerda = %f' % left_speed_avg)
print('Média Direita = %f' % right_speed_avg)
##### Plot #####
try:
plt.close('all')
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(samples, left_speed, 'b-')
plt.title('Motor Esquerdo')
plt.xlabel('Amostra')
plt.ylabel('Velocidade')
plt.grid('on')
plt.subplot(1, 2, 2)
plt.plot(samples, right_speed, 'b-')
plt.title('Motor Direito')
plt.xlabel('Amostra')
plt.ylabel('Velocidade')
plt.grid('on')
plt.show()
except KeyboardInterrupt:
plt.close('all')
raise SystemExit
| gpl-3.0 |
PhenixI/machine-learning | 1_supervised_classification/6-Perceptron and Neural Networks/ANN_python/ANN_python.py | 1 | 4867 | #Classifying handwritten digits
#1.obtaining the mnist dataset
import os
import struct
import numpy as np
#1.the frst being an n m �� dimensional
#NumPy array (images), where n is the number of samples and m is the number
#of features
#2.The second array (labels) returned by the load_mnist
#function contains the corresponding target variable, the class labels (integers 0-9) of
#the handwritten digits.
def load_mnist(path,kind = 'train'):
"""Load Mnist data from path"""
labels_path = os.path.join(path,'%s-labels-idx1-ubyte' % kind)
images_path = os.path.join(path,'%s-images-idx3-ubyte' % kind)
with open(labels_path,'rb') as lbpath:
#read 8 byte ,each 4 byte as unsigned integer
magic,n = struct.unpack('>II',lbpath.read(8))
labels = np.fromfile(lbpath,dtype = np.uint8)
with open(images_path,'rb') as imgpath:
magic,num,rows,cols = struct.unpack(">IIII",imgpath.read(16))
images = np.fromfile(imgpath,dtype = np.uint8).reshape(len(labels),784)
return images,labels
#load the 60,000 training instances as well as the 10,000 test samples
X_train,y_train = load_mnist('F:\developSamples\ml\mnist',kind = 'train')
print ('Rows: %d,columns: %d' % (X_train.shape[0],X_train.shape[1]))
X_test,y_test = load_mnist('F:\developSamples\ml\mnist',kind='t10k')
print('Rows %d,columns: %d' % (X_test.shape[0],X_test.shape[1]))
#visualize examples of the digits 0-9 after reshaping the 784-pixel vectors
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True,sharey=True,)
ax = ax.flatten()
for i in range(10):
img = X_train[y_train == i][0].reshape(28,28)
ax[i].imshow(img,cmap = 'Greys',interpolation='nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
#plot multiple examples of the same digit
fig,ax = plt.subplots(nrows = 5,ncols = 5,sharex = True,sharey = True,)
ax = ax.flatten()
for i in range(25):
img = X_train[y_train == 7][i].reshape(28,28)
ax[i].imshow(img,cmap = 'Greys',interpolation = 'nearest')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
import os
os.chdir('E:/machine-learning/1_supervised_classification/6-Perceptron and Neural Networks/ANN_python')
from neuralnet import NeuralNetMLP
#a 784-50-10 MLP
nn = NeuralNetMLP(n_output = 10,
n_features = X_train.shape[1],
n_hidden = 50,
l2 = 0.1,
l1 = 0.0,
epochs = 1000,
eta = 0.001,
alpha = 0.001,
decrease_const = 0.00001,
shuffle = True,
minibatches = 50,
random_state = 1)
nn.fit(X_train,y_train,print_progress = True)
#plot every 50th step to account for the 50 mini-batches (50 mini-batches x1000 epochs)
plt.plot(range(len(nn.cost_)),nn.cost_)
plt.ylim([0,2000])
plt.ylabel('Cost')
plt.xlabel('Epochs *50')
plt.tight_layout()
plt.show()
#plot a smoother version of the cost function against the number of epochs by averaging over the mini-batch
#intervals
batches = np.array_split(range(len(nn.cost_)),1000)
cost_ary = np.array(nn.cost_)
cost_avgs = [np.mean(cost_ary[i]) for i in batches]
plt.plot(range(len(cost_avgs)),cost_avgs,color = 'red')
plt.ylim([0,2000])
plt.ylabel('Cost')
plt.xlabel('Epochs')
plt.tight_layout()
plt.show()
#evaluate the performance of the model by calculating the prediction accuracy
y_train_pred = nn.predict(X_train)
acc = np.sum(y_train == y_train_pred,axis = 0)/X_train.shape[0]
print ('Training accuracy %.2f%%' % (acc*100))
#generalize to data that it has not seen before
y_test_pred = nn.predict(X_test)
acc = np.sum(y_test == y_test_pred,axis = 0)/X_test.shape[0]
print ('Training accuracy %.2f%%' % (acc*100))
#plot the images that MLP struggles with
miscl_img = X_test[y_test != y_test_pred][:25]
correct_lab = y_test[y_test != y_test_pred][:25]
miscl_lab = y_test_pred[y_test != y_test_pred][:25]
fig,ax = plt.subplots(nrows = 5,
ncols = 5,
sharex = True,
sharey = True,)
ax = ax.flatten()
for i in range(25):
img = miscl_img[i].reshape(28,28)
ax[i].imshow(img,cmap='Greys',interpolation = 'nearest')
ax[i].set_title(' %d) t:%d p: %d' % (i+1,correct_lab[i],miscl_lab[i]))
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
plt.show()
#gradient checking example
nn = NeuralNetMLP(n_output = 10,
n_features = X_train.shape[1],
n_hidden = 10,
l2 = 0.0,
l1 = 0.0,
epochs = 10,
eta = 0.001,
alpha = 0.0,
decrease_const = 0.0,
minibatches = 1,
random_state = 1)
nn.fit(X_train[:5],y_train[:5],print_progress = False)
| gpl-2.0 |
codeaudit/gpss-research | source/sandpit.py | 4 | 36385 | '''
Created on Nov 2012
@authors: James Robert Lloyd ([email protected])
David Duvenaud ([email protected])
Roger Grosse ([email protected])
'''
import flexiblekernel as fk
import grammar
import gpml
import utils.latex
import numpy as np
import pylab
import scipy.io
import sys
import os
from job_controller import *
import flexiblekernel as fk
from flexiblekernel import ScoredKernel
import grammar
import gpml
import utils.latex
import utils.fear
from config import *
from utils import gaussians, psd_matrices
import numpy as np
nax = np.newaxis
import pylab
import scipy.io
import sys
import os
import tempfile
import subprocess
import time
import cblparallel
from cblparallel.util import mkstemp_safe
import re
import shutil
import random
def kernel_test():
k = fk.MaskKernel(4, 3, fk.SqExpKernel(0, 0))
print k.gpml_kernel_expression()
print k.pretty_print()
print '[%s]' % k.param_vector()
print 'kernel_test complete'
def base_kernel_test():
print [k.pretty_print() for k in fk.base_kernels(1)]
print 'base_kernel_test complete'
def expand_test():
k1 = fk.SqExpKernel(1, 1)
k2 = fk.SqExpPeriodicKernel(2, 2, 2)
e = fk.SumKernel([k1, k2])
g = grammar.OneDGrammar()
print ''
for f in grammar.expand(e, g):
#print f
print f.pretty_print()
print grammar.canonical(f).pretty_print()
print
print ' ***** duplicates removed *****'
print
kernels = grammar.expand(e, g)
for f in grammar.remove_duplicates(kernels):
print f.pretty_print()
print
print '%d originally, %d without duplicates' % (len(kernels), len(grammar.remove_duplicates(kernels)))
print 'expand_test complete'
def expand_test2():
k1 = fk.MaskKernel(2, 0, fk.SqExpKernel(1, 1))
k2 = fk.MaskKernel(2, 1, fk.SqExpPeriodicKernel(2, 2, 2))
e = fk.SumKernel([k1, k2])
g = grammar.MultiDGrammar(2)
print ''
for f in grammar.expand(e, g):
print f.pretty_print()
print grammar.canonical(f).pretty_print()
print
print ' ***** duplicates removed *****'
print
kernels = grammar.expand(e, g)
for f in grammar.remove_duplicates(kernels):
print f.pretty_print()
print
print '%d originally, %d without duplicates' % (len(kernels), len(grammar.remove_duplicates(kernels)))
print 'expand_test complete'
def load_mauna():
'''2011 Mauna dataset.'''
data_file = '../data/mauna.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def load_mauna_original():
"""
Original Mauna dataset made to match the experiments from Carl's book.
For details, see data/preprocess_mauna_2004.m
"""
data_file = '../data/mauna2003.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def call_gpml_test():
np.random.seed(0)
k = fk.SumKernel([fk.SqExpKernel(0, 0), fk.SqExpKernel(0, 0)])
print k.gpml_kernel_expression()
print k.pretty_print()
print '[%s]' % k.param_vector()
X, y = load_mauna()
N_orig = X.shape[0]
X = X[:N_orig//3, :]
y = y[:N_orig//3, :]
results = []
pylab.figure()
for i in range(15):
init_params = np.random.normal(size=k.param_vector().size)
#kernel_hypers, nll, nlls = gpml.optimize_params(k.gpml_kernel_expression(), k.param_vector(), X, y, return_all=True)
kernel_hypers, nll, nlls = gpml.optimize_params(k.gpml_kernel_expression(), init_params, X, y, return_all=True)
print "kernel_hypers =", kernel_hypers
print "nll =", nll
k_opt = k.family().from_param_vector(kernel_hypers)
print k_opt.gpml_kernel_expression()
print k_opt.pretty_print()
print '[%s]' % k_opt.param_vector()
pylab.semilogx(range(1, nlls.size+1), nlls)
results.append((kernel_hypers, nll))
pylab.draw()
print
print
results = sorted(results, key=lambda p: p[1])
for kernel_hypers, nll in results:
print nll, kernel_hypers
print "done"
def sample_mauna_best():
# This kernel was chosen from a run of Mauna datapoints.
kernel = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
X = np.linspace(0,50,500)
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X)
pylab.figure()
pylab.plot(X, sample)
pylab.title('( SqExp(ell=-0.7, sf=-1.3) + SqExp(ell=4.8, sf=2.3) ) \n x ( SqExp(ell=3.0, sf=0.5) + Periodic(ell=0.4, p=-0.0, sf=-0.9) )')
def sample_Carls_kernel():
kernel = fk.Carls_Mauna_kernel()
X = np.linspace(0,50,500)
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X)
pylab.figure()
pylab.plot(X, sample)
pylab.title('Carl''s kernel');
def compare_kernels_experiment():
kernel1 = fk.Carls_Mauna_kernel()
kernel2 = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
#kernel2 = ( SqExp(ell=-0.8, sf=-1.4) + Periodic(ell=0.5, p=-0.3, sf=-1.1) + RQ(ell=1.9, sf=1.6, a=0.2) + ( SqExp(ell=4.5, sf=1.0) x Periodic(ell=0.6, p=-0.0, sf=0.1) ) )
X, y = load_mauna_original()
N_orig = X.shape[0] # subsample data.
X = X[:N_orig//5, :]
y = y[:N_orig//5, :]
print "Carl's kernel"
print kernel1.pretty_print()
kernel_hypers1, nll1 = gpml.optimize_params(kernel1.gpml_kernel_expression(), kernel1.param_vector(), \
X, y, noise=np.log(0.19), iters=100 )
k1_opt = kernel1.family().from_param_vector(kernel_hypers1)
print k1_opt.pretty_print()
print "Carl's NLL =", nll1
print "Our kernel"
print kernel2.pretty_print()
kernel_hypers2, nll2 = gpml.optimize_params(kernel2.gpml_kernel_expression(), kernel2.param_vector(), \
X, y, noise=np.log(0.19), iters=100)
k2_opt = kernel2.family().from_param_vector(kernel_hypers2)
print k2_opt.pretty_print()
print "Our NLL =", nll2
def simple_mauna_experiment():
'''A first version of an experiment learning kernels'''
seed_kernels = [fk.SqExpKernel(0, 0)]
X, y = load_mauna_original()
N_orig = X.shape[0] # subsample data.
X = X[:N_orig//3, :]
y = y[:N_orig//3, :]
max_depth = 4
k = 4 # Expand k best
nll_key = 1
laplace_key = 2
results = []
for dummy in range(max_depth):
new_results = structure_search.try_expanded_kernels(X, y, D=2, seed_kernels=seed_kernels, verbose=False)
results = results + new_results
print
results = sorted(results, key=lambda p: p[nll_key], reverse=True)
for kernel, nll, laplace in results:
print nll, laplace, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[nll_key])[0:k]]
def plot_Carls_kernel():
kernel = fk.Carls_Mauna_kernel()
X = np.linspace(0,10,1000)
sigma = gpml.plot_kernel(kernel, X)
pylab.figure()
pylab.plot(X, sigma)
pylab.title('Carl''s kernel');
def plot_our_kernel():
kernel = ( fk.SqExpKernel(-0.7, -1.3) + fk.SqExpKernel(4.8, 2.3) ) * \
( fk.SqExpKernel(3.0, 0.5) + fk.SqExpPeriodicKernel(0.4, -0.0, -0.9) )
X = np.linspace(0,10,1000)
sigma = gpml.plot_kernel(kernel, X)
pylab.figure()
pylab.plot(X, sigma)
pylab.title('Our kernel');
def load_simple_gef_load():
'''Zone 1 and temperature station 2'''
data_file = '../data/gef_load_simple.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def load_full_gef_load():
'''20 Zones in y, time and 11 temp stations in X'''
data_file = '../data/gef_load_full_Xy.mat'
data = scipy.io.loadmat(data_file)
return data['X'], data['y']
def simple_gef_load_experiment(verbose=True):
'''A first version of an experiment learning kernels'''
seed_kernels = [fk.MaskKernel(2, 0, fk.SqExpKernel(0, 0)),
fk.MaskKernel(2, 1, fk.SqExpKernel(0, 0))]
X, y = load_simple_gef_load()
# subsample data.
X = X[0:99, :]
y = y[0:99, :]
max_depth = 5
k = 2 # Expand k best
nll_key = 1
BIC_key = 2
active_key = BIC_key
results = []
for dummy in range(max_depth):
new_results = structure_search.try_expanded_kernels(X, y, D=2, seed_kernels=seed_kernels, verbose=verbose)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, BIC in results:
print nll, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
def full_gef_load_experiment(zone=1, max_depth=5, verbose=True):
'''Round 2'''
# seed_kernels = [fk.MaskKernel(2, 0, fk.SqExpKernel(0, 0)),
# fk.MaskKernel(2, 1, fk.SqExpKernel(0, 0))]
seed_kernels = [fk.MaskKernel(12, i, fk.SqExpKernel(0., 0.)) for i in range(12)] + \
[fk.MaskKernel(12, i, fk.SqExpPeriodicKernel(0., 0., 0.)) for i in range(12)] + \
[fk.MaskKernel(12, i, fk.RQKernel(0., 0., 0.)) for i in range(12)]
X, y = load_full_gef_load()
# subsample data.
X = X[0:299, :]
y = y[0:299, zone-1]
# max_depth = 5
k = 2 # Expand k best
nll_key = 1
BIC_key = 2
active_key = BIC_key
results = []
for i in range(max_depth):
if i:
expand = True
else:
expand = False
new_results = structure_search.try_expanded_kernels(X, y, D=12, seed_kernels=seed_kernels, expand=expand, verbose=verbose)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, BIC in results:
print nll, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
#os.system(command_str)
#### Attempt at sending individual jobs to the cluster
import pysftp, tempfile, config, subprocess, config, time
nax = np.newaxis
def mkstemp_safe(directory, suffix):
(os_file_handle, file_name) = tempfile.mkstemp(dir=directory, suffix=suffix)
os.close(os_file_handle)
return file_name
def fear_connect():
return pysftp.Connection('fear', username=config.USERNAME, password=config.PASSWORD)
def fear_command(cmd, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute(cmd)
if fear is None:
srv.close()
return output
def copy_to_fear(local_path, remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
srv.put(local_path, remote_path)
if fear is None:
srv.close()
def copy_from_fear(remote_path, local_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
srv.get(remote_path, local_path)
if fear is None:
srv.close()
def fear_rm(remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute('rm %s' % remote_path)
if fear is None:
srv.close()
return output
def fear_file_exists(remote_path, fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
response = srv.execute('if [ -e %s ] \nthen \necho ''exists'' \nfi' % remote_path)
if fear is None:
srv.close()
return response == ['exists\n']
def fear_qdel_all(fear=None):
if not fear is None:
srv = fear
else:
srv = fear_connect()
output = srv.execute('. /usr/local/grid/divf2/common/settings.sh; qdel -u %s' % config.USERNAME)
if fear is None:
srv.close()
return output
def qsub_matlab_code(code, verbose=True, local_dir ='../temp/', remote_dir ='./temp/', fear=None):
# Write to a temp script
script_file = mkstemp_safe(local_dir, '.m')
shell_file = mkstemp_safe(local_dir, '.sh')
f = open(script_file, 'w')
f.write(code)
f.close()
#### Local file reference without extension - MATLAB fails silently otherwise
f = open(shell_file, 'w')
f.write('/usr/local/apps/matlab/matlabR2011b/bin/matlab -nosplash -nojvm -nodisplay -singleCompThread -r ' + script_file.split('/')[-1].split('.')[0] + '\n')
f.close()
# Copy this to fear
copy_to_fear(script_file, remote_dir + script_file.split('/')[-1], fear)
copy_to_fear(shell_file, remote_dir + shell_file.split('/')[-1], fear)
# Create fear call
#### WARNING - hardcoded path 'temp'
fear_string = ' '.join(['. /usr/local/grid/divf2/common/settings.sh;',
'cd temp;'
'chmod +x %s;' % shell_file.split('/')[-1],
'qsub -l lr=0',
shell_file.split('/')[-1] + ';',
'cd ..'])
if verbose:
print 'Submitting : %s' % fear_string
# Send this command to fear
fear_command(fear_string, fear)
# Tell the caller where the script file was written
return script_file, shell_file
def re_qsub(shell_file, verbose=True, fear=None):
# Create fear call
#### WARNING - hardcoded path 'temp'
fear_string = ' '.join(['. /usr/local/grid/divf2/common/settings.sh;',
'cd temp;'
'chmod +x %s;' % shell_file.split('/')[-1],
'qsub -l lr=0',
shell_file.split('/')[-1] + ';',
'cd ..'])
if verbose:
print 'Re-submitting : %s' % fear_string
# Send this command to fear
fear_command(fear_string, fear)
# Matlab code to optimise hyper-parameters on one file, given one kernel.
OPTIMIZE_KERNEL_CODE = r"""
%% Load the data, it should contain X and y.
a = 'trying to load data files'
load '%(datafile)s'
a = 'loaded data files'
%% Load GPML
addpath(genpath('%(gpml_path)s'));
a = 'loaded GPML'
%% Set up model.
meanfunc = {@meanConst}
hyp.mean = mean(y)
covfunc = %(kernel_family)s
hyp.cov = %(kernel_params)s
likfunc = @likGauss
hyp.lik = %(noise)s
[hyp_opt, nlls] = minimize(hyp, @gp, -%(iters)s, @infExact, meanfunc, covfunc, likfunc, X, y);
best_nll = nlls(end)
laplace_nle = best_nll %% HACK HACK HACK
save( '%(writefile)s', 'hyp_opt', 'best_nll', 'nlls', 'laplace_nle' );
a = 'Goodbye, World!'
exit();
"""
def fear_run_experiments(kernels, X, y, return_all=False, verbose=True, noise=None, iters=300, local_dir ='../temp/', remote_dir ='./temp/', \
sleep_time=10, n_sleep_timeout=6, re_submit_wait=60):
'''
Sends jobs to fear, waits for them, returns the results
'''
# Not sure what this is for
if X.ndim == 1:
X = X[:, nax]
if y.ndim == 1:
y = y[:, nax]
if noise is None:
noise = np.log(np.var(y)/10) #### Just a heuristic.
data = {'X': X, 'y': y}
# Setup the connection to fear
fear = fear_connect()
# Submit all the jobs and remember where we put them
data_files = []
write_files = []
script_files = []
shell_files = []
for kernel in kernels:
# Create data file and results file
data_files.append(mkstemp_safe(local_dir, '.mat'))
write_files.append(mkstemp_safe(local_dir, '.mat'))
# Save data
scipy.io.savemat(data_files[-1], data)
# Copy files to fear
copy_to_fear(data_files[-1], remote_dir + data_files[-1].split('/')[-1], fear)
# copy_to_fear(write_files[-1], remote_dir + write_files[-1].split('/')[-1])
# Create MATLAB code
code = OPTIMIZE_KERNEL_CODE % {'datafile': data_files[-1].split('/')[-1],
'writefile': write_files[-1].split('/')[-1],
'gpml_path': config.FEAR_GPML_PATH,
'kernel_family': kernel.gpml_kernel_expression(),
'kernel_params': '[ %s ]' % ' '.join(str(p) for p in kernel.param_vector()),
'noise': str(noise),
'iters': str(iters)}
# Submit this to fear and save the file names
script_file, shell_file = qsub_matlab_code(code=code, verbose=verbose, local_dir=local_dir, remote_dir=remote_dir, fear=fear)
script_files.append(script_file)
shell_files.append(shell_file)
# Let the scripts run
# if verbose:
# print 'Giving the jobs some time to run'
# time.sleep(re_submit_wait)
# Wait for and read in results
fear_finished = False
job_finished = [False] * len(write_files)
results = [None] * len(write_files)
sleep_count = 0
while not fear_finished:
for (i, write_file) in enumerate(write_files):
if not job_finished[i]:
if fear_file_exists(remote_dir + write_file.split('/')[-1], fear):
# Another job has finished
job_finished[i] = True
sleep_count = 0
# Copy files
os.remove(write_file) # Not sure if necessary
copy_from_fear(remote_dir + write_file.split('/')[-1], write_file, fear)
# Read results ##### THIS WILL CHANGE IF RUNNING DIFFERENT TYPE OF EXPERIMENT
gpml_result = scipy.io.loadmat(write_file)
optimized_hypers = gpml_result['hyp_opt']
nll = gpml_result['best_nll'][0, 0]
# nlls = gpml_result['nlls'].ravel()
laplace_nle = gpml_result['laplace_nle'][0, 0]
kernel_hypers = optimized_hypers['cov'][0, 0].ravel()
k_opt = kernels[i].family().from_param_vector(kernel_hypers)
BIC = 2 * nll + len(kernel_hypers) * np.log(y.shape[0])
results[i] = (k_opt, nll, laplace_nle, BIC)
# Tidy up
fear_rm(remote_dir + data_files[i].split('/')[-1], fear)
fear_rm(remote_dir + write_files[i].split('/')[-1], fear)
fear_rm(remote_dir + script_files[i].split('/')[-1], fear)
fear_rm(remote_dir + shell_files[i].split('/')[-1], fear)
fear_rm(remote_dir + shell_files[i].split('/')[-1] + '*', fear)
os.remove(data_files[i])
os.remove(write_files[i])
os.remove(script_files[i])
os.remove(shell_files[i])
# Tell the world
if verbose:
print '%d / %d jobs complete' % (sum(job_finished), len(job_finished))
if sum(job_finished) == len(job_finished):
fear_finished = True
if not fear_finished:
if verbose:
print 'Sleeping'
sleep_count += 1
if sleep_count < n_sleep_timeout:
time.sleep(sleep_time)
else:
# Jobs taking too long - assume failure - resubmit
fear_qdel_all(fear)
for (i, shell_file) in enumerate(shell_files):
if not job_finished[i]:
re_qsub(shell_file, verbose=verbose, fear=fear)
if verbose:
print 'Giving the jobs some time to run'
time.sleep(re_submit_wait)
sleep_count = 0
fear.close()
return results
def fear_load_mat(data_file, y_dim=1):
'''Load a Matlab file'''
data = scipy.io.loadmat(data_file)
return data['X'], data['y'][:,y_dim-1], np.shape(data['X'])[1]
def fear_expand_kernels(D, seed_kernels, verbose=False):
'''
Just expands
'''
g = grammar.MultiDGrammar(D)
print 'Seed kernels :'
for k in seed_kernels:
print k.pretty_print()
kernels = []
for k in seed_kernels:
kernels = kernels + grammar.expand(k, g)
kernels = grammar.remove_duplicates(kernels)
print 'Expanded kernels :'
for k in kernels:
print k.pretty_print()
return (kernels)
def fear_experiment(data_file, results_filename, y_dim=1, subset=None, max_depth=2, k=2, verbose=True, sleep_time=60, n_sleep_timeout=20, re_submit_wait=60, \
description=''):
'''Recursively search for the best kernel'''
X, y, D = fear_load_mat(data_file, y_dim)
# Subset if necessary
if not subset is None:
X = X[subset, :]
y = y[subset]
##### This should be abstracted
seed_kernels = [fk.MaskKernel(D, i, fk.SqExpKernel(0., 0.)) for i in range(D)] + \
[fk.MaskKernel(D, i, fk.SqExpPeriodicKernel(0., 0., 0.)) for i in range(D)] + \
[fk.MaskKernel(D, i, fk.RQKernel(0., 0., 0.)) for i in range(D)]
nll_key = 1
laplace_key = 2
BIC_key = 3
active_key = BIC_key
results = []
results_sequence = []
for r in range(max_depth):
if r == 0:
new_results = fear_run_experiments(seed_kernels, X, y, verbose=verbose, \
sleep_time=sleep_time, n_sleep_timeout=n_sleep_timeout, re_submit_wait=re_submit_wait)
else:
new_results = fear_run_experiments(fear_expand_kernels(D, seed_kernels, verbose=verbose), X, y, verbose=verbose, \
sleep_time=sleep_time, n_sleep_timeout=n_sleep_timeout, re_submit_wait=re_submit_wait)
results = results + new_results
print
results = sorted(results, key=lambda p: p[active_key], reverse=True)
for kernel, nll, laplace, BIC in results:
print nll, laplace, BIC, kernel.pretty_print()
seed_kernels = [r[0] for r in sorted(new_results, key=lambda p: p[active_key])[0:k]]
results_sequence.append(results)
# Write results to a file
results = sorted(results, key=lambda p: p[active_key], reverse=True)
with open(results_filename, 'w') as outfile:
outfile.write('Experiment results for\n datafile = %s\n y_dim = %d\n subset = %s\n max_depth = %f\n k = %f\n Description = %s\n\n' % (data_file, y_dim, subset, max_depth, k, description))
for (i, results) in enumerate(results_sequence):
outfile.write('\n%%%%%%%%%% Level %d %%%%%%%%%%\n\n' % i)
for kernel, nll, laplace, BIC in results:
outfile.write( 'nll=%f, laplace=%f, BIC=%f, kernel=%s\n' % (nll, laplace, BIC, kernel.__repr__()))
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import matplotlib.pyplot as plt
def plot_gef_load_Z01_raw():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Time")
host.set_ylabel("Load (Z01)")
par1.set_ylabel("Temperature (T09)")
p1, = host.plot(X[0:499,0], y[0:499])
p2, = par1.plot(X[0:499,0], X[0:499,9])
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_split_mean():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499])
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499])
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Time")
host.set_ylabel("Periodic component")
plt.title('Posterior mean functions')
par1.set_ylabel("Smooth component")
p1, = host.plot(X[0:499,0], posterior_mean_1)
p2, = par1.plot(X[0:499,0], posterior_mean_2)
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_split_mean_temp():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499], iters=10)
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499], iters=10)
plt.figure()
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.85)
par1 = host.twinx()
# host.set_xlim(0, 2)
# host.set_ylim(0, 2)
host.set_xlabel("Temperature (T09)")
# par1.set_ylabel("Periodic component")
plt.title('Posterior mean function')
host.set_ylabel("Load posterior mean")
p2, = host.plot(X[0:499,9], y[0:499], 'o', alpha=0.5)
p1, = host.plot(X[0:499,9], posterior_mean_2, 'o')
# par1.set_ylim(0, 4)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
# par1.axis["right"].label.set_color(p2.get_color())
plt.draw()
plt.show()
def plot_gef_load_Z01_smooth_2d_mean():
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
min_T = -3.0
max_T = 1.0
N_T = 10
temps = np.repeat(np.linspace(min_T, max_T, N_T), 499)
input = np.tile(X[0:499,:], (N_T, 1))
input[:,9] = temps
posterior_mean = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499], input, iters=300)
X_plt = X[0:499,0]
Y_plt = np.linspace(min_T, max_T, N_T)
Z_plt = np.reshape(posterior_mean, (N_T, 499), 'A')
data = {'X': X_plt, 'Y': Y_plt, 'Z': Z_plt, 'post_mean': posterior_mean}
scipy.io.savemat('temp_data.mat', data)
def plot_gef_load_Z01():
# This kernel was chosen from a run of gef_load datapoints.
# kernel = eval(ProductKernel([ covMask(ndim=12, active_dimension=0, base_kernel=RQKernel(lengthscale=0.268353, output_variance=-0.104149, alpha=-2.105742)), covMask(ndim=12, active_dimension=9, base_kernel=SqExpKernel(lengthscale=1.160242, output_variance=0.004344)), SumKernel([ covMask(ndim=12, active_dimension=0, base_kernel=SqExpPeriodicKernel(lengthscale=-0.823413, period=0.000198, output_variance=-0.917064)), covMask(ndim=12, active_dimension=0, base_kernel=RQKernel(lengthscale=-0.459219, output_variance=-0.077250, alpha=-2.212718)) ]) ]))
X, y, D = fear_load_mat('../data/gef_load_full_Xy.mat', 1)
kernel = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
(fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064)) + fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718)))
# Todo: set random seed.
sample = gpml.sample_from_gp_prior(kernel, X[0:499,:])
pylab.figure()
pylab.plot(X[0:499,0], y[0:499])
pylab.title('GEFCom2012 Z01 and T09 - first 500 data points')
pylab.xlabel('Time')
pylab.ylabel('Load')
# pylab.figure()
# pylab.plot(X[0:499,0], sample)
# pylab.title('GEF load Z01 - a sample from the learnt kernel')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
#
# kernel_1 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
# fk.MaskKernel(D, 0, fk.SqExpPeriodicKernel(-0.823413, 0.000198, -0.917064))
#
# posterior_mean_1 = gpml.posterior_mean(kernel, kernel_1, X[0:499,:], y[0:499])
#
# pylab.figure()
# pylab.plot(X[0:499,0], posterior_mean_1)
# pylab.title('GEF load Z01 - periodic posterior mean component')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
#
# kernel_2 = fk.MaskKernel(D, 0, fk.RQKernel(0.268353, -0.104149, -2.105742)) * fk.MaskKernel(D, 9, fk.SqExpKernel(1.160242, 0.004344)) * \
# fk.MaskKernel(D, 0, fk.RQKernel(-0.459219, -0.077250, -2.212718))
#
# posterior_mean_2 = gpml.posterior_mean(kernel, kernel_2, X[0:499,:], y[0:499])
#
# pylab.figure()
# pylab.plot(X[0:499,0], posterior_mean_2)
# pylab.title('GEF load Z01 - smooth posterior mean component')
# pylab.xlabel('Time')
# pylab.ylabel('Load')
def main():
# Run everything
# fear_experiment('../data/abalone_500.mat', '../results/abalone_500_01.txt', max_depth=4, k=3)
# fear_experiment('../data/gef_load_full_Xy.mat', '../results/gef_load_500_Z01_02.txt', max_depth=6, k=5, subset=range(500), y_dim=1, description = 'BIC, 0 init')
# fear_experiment('../data/gef_load_full_Xy.mat', '../results/gef_load_500_Z09_02.txt', max_depth=6, k=5, subset=range(500), y_dim=9, description = 'BIC, 0 init')
fear_experiment('../data/bach_synth_r_200.mat', '../results/bach_synth_r_200_test.txt', max_depth=2, k=1, description = 'Dave test')
# fear_experiment('../data/housing.mat', '../results/housing_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/mauna2003.mat', '../results/mauna2003_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/mauna2011.mat', '../results/mauna2011_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/prostate.mat', '../results/prostate_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/pumadyn256.mat', '../results/pumadyn256_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_concrete_100.mat', '../results/r_concrete_100_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_concrete_500.mat', '../results/r_concrete_500_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/r_solar_500.mat', '../results/r_solar_500_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/unicycle_pitch_angle_400.mat', '../results/unicycle_pitch_angle_400_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
# fear_experiment('../data/unicycle_pitch_ang_vel_400.mat', '../results/unicycle_pitch_ang_vel_400_02.txt', max_depth=6, k=5, description = 'BIC, 0 init')
def debug_laplace():
# Load data set
X, y, D, Xtest, ytest = gpml.load_mat('../data/kfold_data/r_concrete_500_fold_10_of_10.mat', y_dim=1)
# Load the suspicious kernel
sk = fk.repr_string_to_kernel('ScoredKernel(k_opt=ProductKernel([ MaskKernel(ndim=8, active_dimension=0, base_kernel=CubicKernel(offset=1.757755, output_variance=7.084045)), MaskKernel(ndim=8, active_dimension=7, base_kernel=SqExpPeriodicKernel(lengthscale=-2.701080, period=-0.380918, output_variance=-0.071214)) ]), nll=6348.096611, laplace_nle=-184450132.068237, bic_nle=12720.630212, noise=[-1.77276072])')
# Create some code to evaluate it
if X.ndim == 1: X = X[:, nax]
if y.ndim == 1: y = y[:, nax]
ndata = y.shape[0]
# Create data file
data_file = cblparallel.create_temp_file('.mat')
scipy.io.savemat(data_file, {'X': X, 'y': y}) # Save regression data
# Move to fear
cblparallel.copy_to_remote(data_file)
scripts = [gpml.OPTIMIZE_KERNEL_CODE % {'datafile': data_file.split('/')[-1],
'writefile': '%(output_file)s', # N.B. cblparallel manages output files
'gpml_path': cblparallel.gpml_path(local_computation=False),
'kernel_family': sk.k_opt.gpml_kernel_expression(),
'kernel_params': '[ %s ]' % ' '.join(str(p) for p in sk.k_opt.param_vector()),
'noise': str(sk.noise),
'iters': str(300)}]
#### Need to be careful with % signs
#### For the moment, cblparallel expects no single % signs - FIXME
scripts[0] = re.sub('% ', '%% ', scripts[0])
# Test
scripts[0] = re.sub('delta = 1e-6', 'delta = 1e-6', scripts[0])
#scripts[0] = re.sub('hyp.lik = [-1.77276072]', 'hyp.lik = [-0.77276072]', scripts[0])
output_file = cblparallel.run_batch_on_fear(scripts, language='matlab', max_jobs=600)[0]
# Read in results
output = gpml.read_outputs(output_file)
result = ScoredKernel.from_matlab_output(output, sk.k_opt.family(), ndata)
print result
print output.hessian
os.remove(output_file)
# Remove temporary data file (perhaps on the cluster server)
cblparallel.remove_temp_file(data_file, local_computation=False)
def debug_descriptions():
ck = fk.Carls_Mauna_kernel()
print ck.english()
| mit |
gregcaporaso/scikit-bio | skbio/stats/power.py | 3 | 47995 | r"""
Empirical Power Estimation (:mod:`skbio.stats.power`)
=====================================================
.. currentmodule:: skbio.stats.power
The purpose of this module is to provide empirical, post-hoc power estimation
of normally and non-normally distributed data. It also provides support to
subsample data to facilitate this analysis.
The underlying principle is based on subsampling and Monte Carlo simulation.
Assume that there is some set of populations, :math:`K_{1}, K_{2}, ... K_{n}`
which have some property, :math:`\mu` such that :math:`\mu_{1} \neq \mu_{2}
\neq ... \neq \mu_{n}`. For each of the populations, a sample, :math:`S` can be
drawn, with a parameter, :math:`x` where :math:`x \approx \mu` and for the
samples, we can use a test, :math:`f`, to show that :math:`x_{1} \neq x_{2}
\neq ... \neq x_{n}`.
Since we know that :math:`\mu_{1} \neq \mu_{2} \neq ... \neq \mu_{n}`,
we know we should reject the null hypothesis. If we fail to reject the null
hypothesis, we have committed a Type II error and our result is a false
negative. We can estimate the frequency of Type II errors at various sampling
depths by repeatedly subsampling the populations and observing how often we
see a false negative. If we repeat this several times for each subsampling
depth, and vary the depths we use, we can start to approximate a relationship
between the number of samples we use and the rate of false negatives, also
called the statistical power of the test.
To generate complete power curves from data which appears underpowered, the
`statsmodels.stats.power` package can be used to solve for an effect size. The
effect size can be used to extrapolate a power curve for the data.
Most functions in this module accept a statistical test function which takes a
list of samples and returns a p value. The test is then evaluated over a series
of subsamples.
Sampling may be handled in two ways. For any set of samples, we may simply
choose to draw :math:`n` observations at random for each sample. Alternatively,
if metadata is available, samples can be matched based on a set of control
categories so that paired samples are drawn at random from the set of available
matches.
Functions
---------
.. autosummary::
:toctree:
subsample_power
subsample_paired_power
confidence_bound
paired_subsamples
Examples
--------
Suppose we wanted to test that there's a relationship between two random
variables, `ind` and `dep`. Let's use random subsampling to estimate the
statistical power of our test with an alpha of 0.1, 0.01, and 0.001.
To control for the pseudo-random number generation, we will use a seed.
When using these functions with your own data, you don't need to include the
step.
>>> import numpy as np
>>> np.random.seed(20)
>>> ind = np.random.randint(0, 20, 15)
>>> ind
array([ 3, 15, 9, 11, 7, 2, 0, 8, 19, 16, 6, 6, 16, 9, 5])
>>> dep = (3 * ind + 5 + np.random.randn(15) * 5).round(3)
>>> dep
array([ 15.617, 47.533, 28.04 , 33.788, 19.602, 12.229, 4.779,
36.838, 67.256, 55.032, 22.157, 7.051, 58.601, 38.664,
18.783])
Let's define a test that will draw a list of sample pairs and determine
if they're correlated. We'll use `scipy.stats.pearsonr` which takes two arrays
and returns a correlation coefficient and a p-value representing the
probability the two distributions are correlated.
>>> from scipy.stats import pearsonr
>>> f = lambda x: pearsonr(x[0], x[1])[1]
Now, let's use random sampling to estimate the power of our test on
the first distribution.
>>> samples = [ind, dep]
>>> print("%.3e" % f(samples))
3.646e-08
In `subsample_power`, we can maintain a paired relationship between samples
by setting `draw_mode` to "matched". We can also set our critical value, so
that we estimate power for a critical value of :math:`\alpha = 0.05`, an
estimate for the critical value of 0.01, and a critical value of 0.001.
>>> from skbio.stats.power import subsample_power
>>> pwr_100, counts_100 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.1,
... num_iter=25)
>>> pwr_010, counts_010 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.01,
... num_iter=25)
>>> pwr_001, counts_001 = subsample_power(test=f,
... samples=samples,
... max_counts=10,
... min_counts=3,
... counts_interval=1,
... draw_mode="matched",
... alpha_pwr=0.001,
... num_iter=25)
>>> counts_100
array([3, 4, 5, 6, 7, 8, 9])
>>> pwr_100.mean(0)
array([ 0.484, 0.844, 0.932, 0.984, 1. , 1. , 1. ])
>>> pwr_010.mean(0)
array([ 0.044, 0.224, 0.572, 0.836, 0.928, 0.996, 1. ])
>>> pwr_001.mean(0)
array([ 0. , 0.016, 0.108, 0.332, 0.572, 0.848, 0.956])
Based on this power estimate, as we increase our confidence that we have not
committed a type I error and identified a false positive, the number of samples
we need to be confident that we have not committed a type II error increases.
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import copy
import numpy as np
import scipy.stats
from skbio.util._decorator import experimental
@experimental(as_of="0.4.0")
def subsample_power(test, samples, draw_mode='ind', alpha_pwr=0.05, ratio=None,
max_counts=50, counts_interval=10, min_counts=None,
num_iter=500, num_runs=10):
r"""Subsamples data to iteratively calculate power
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
alpha_pwr : float, optional
The critical value used to calculate the power.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
max_counts : positive int, optional
The maximum number of samples per group to draw for effect size
calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
num_iter : positive int, optional
The number of p-values to generate for each point
on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power : array
The power calculated for each subsample at each count. The array has
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
Examples
--------
Let's say we wanted to look at the relationship between the presence of a
specific bacteria, *Gardnerella vaginalis* in the vaginal community, and
the probability of a pre or post menopausal woman experiencing a urinary
tract infection (UTI). Healthy women were enrolled in the study either
before or after menopause, and followed for eight weeks. Participants
submitted fecal samples at the beginning of the study, and were then
followed for clinical symptoms of a UTI. A confirmed UTI was an endpoint
in the study.
Using available literature and 16S sequencing, a set of candidate taxa were
identified as correlated with UTIs, including *G. vaginalis*. In the 100
women (50 premenopausal and 50 postmenopausal samples) who had UTIs, the
presence or absence of *G. vaginalis* was confirmed with quantitative PCR.
We can model the probability that detectable *G. vaginalis* was found in
these samples using a binomial model. (*Note that this is a simulation.*)
>>> import numpy as np
>>> np.random.seed(25)
>>> pre_rate = np.random.binomial(1, 0.85, size=(50,))
>>> pre_rate.sum()
45
>>> pos_rate = np.random.binomial(1, 0.40, size=(50,))
>>> pos_rate.sum()
21
Let's set up a test function, so we can test the probability of
finding a difference in frequency between the two groups. We'll use
`scipy.stats.chisquare` to look for the difference in frequency between
groups.
>>> from scipy.stats import chisquare
>>> test = lambda x: chisquare(np.array([x[i].sum() for i in
... range(len(x))]))[1]
Let's make sure that our two distributions are different.
>>> print(round(test([pre_rate, pos_rate]), 3))
0.003
Since there are an even number of samples, and we don't have enough
information to try controlling the data, we'll use
`skbio.stats.power.subsample_power` to compare the two groups. If we had
metadata about other risk factors, like a reproductive history, BMI,
tobacco use, we might want to use
`skbio.stats.power.subsample_paired_power`.
We'll also use "ind" `draw_mode`, since there is no linkage between the
two groups of samples.
>>> from skbio.stats.power import subsample_power
>>> pwr_est, counts = subsample_power(test=test,
... samples=[pre_rate, pos_rate],
... num_iter=100,
... num_runs=5,
... counts_interval=5)
>>> counts
array([ 5, 10, 15, 20, 25, 30, 35, 40, 45])
>>> np.nanmean(pwr_est, axis=0) # doctest: +NORMALIZE_WHITESPACE
array([ 0.056, 0.074, 0.226, 0.46 , 0.61 , 0.806, 0.952, 1. ,
1. ])
>>> counts[np.nanmean(pwr_est, axis=0) > 0.8].min()
30
So, we can estimate that we will see a significant difference in the
presence of *G. vaginalis* in the stool of pre and post women with UTIs if
we have at least 30 samples per group.
If we wanted to test the relationship of a second candidate taxa which is
more rare in the population, but may have a similar effect, based on
available literature, we might also start by trying to identify 30
samples per group where the second candidate taxa is present.
Suppose, now, that we want to test that a secondary metabolite seen only in
the presence of *G vaginalis* to see if it is also correlated with UTIs. We
can model the abundance of the metabolite as a normal distribution.
>>> met_pos = (np.random.randn(pre_rate.sum() + pos_rate.sum()) * 2000 +
... 2500)
>>> met_pos[met_pos < 0] = 0
>>> met_neg = met_neg = (np.random.randn(100 - (pre_rate.sum() +
... pos_rate.sum())) * 2000 + 500)
>>> met_neg[met_neg < 0] = 0
Let's compare the populations with a kruskal-wallis test. Physically, there
cannot be a negative concentration of a chemical, so we've set the lower
bound at 0. This means that we can no longer assume our distribution is
normal.
>>> from scipy.stats import kruskal
>>> def metabolite_test(x):
... return kruskal(x[0], x[1])[1]
>>> print(round(metabolite_test([met_pos, met_neg]), 3))
0.005
When we go to perform the statistical test on all the data, you might
notice that there are twice as many samples from women with *G. vaginalis*
than those without. It might make sense to account for this difference when
we're testing power. So, we're going to set the `ratio` parameter, which
lets us draw twice as many samples from women with *G. vaginalis*.
>>> pwr_est2, counts2 = subsample_power(test=metabolite_test,
... samples=[met_pos, met_neg],
... counts_interval=5,
... num_iter=100,
... num_runs=5,
... ratio=[2, 1])
>>> counts2
array([ 5., 10., 15., 20., 25., 30.])
>>> np.nanmean(pwr_est2, axis=0)
array([ 0.14 , 0.272, 0.426, 0.646, 0.824, 0.996])
>>> counts2[np.nanmean(pwr_est2, axis=0) > 0.8].min()
25.0
When we consider the number of samples per group needed in the power
analysis, we need to look at the ratio. The analysis says that we need 25
samples in the smallest group, in this case, the group of women without
*G. vaginalis* and 50 samples from women with *G. vaginalis* to see a
significant difference in the abundance of our secondary metabolite at 80%
power.
"""
# Checks the inputs
ratio, num_p, sample_counts = \
_check_subsample_power_inputs(test=test,
samples=samples,
draw_mode=draw_mode,
ratio=ratio,
min_counts=min_counts,
max_counts=max_counts,
counts_interval=counts_interval)
# Prealocates the power array
power = np.zeros((num_runs, len(sample_counts), num_p))
# Calculates the power instances
for id2, c in enumerate(sample_counts):
count = np.round(c * ratio, 0).astype(int)
for id1 in range(num_runs):
ps = _compare_distributions(test=test,
samples=samples,
num_p=num_p,
counts=count,
num_iter=num_iter,
mode=draw_mode)
power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
power = power.squeeze()
return power, sample_counts
@experimental(as_of="0.4.0")
def subsample_paired_power(test, meta, cat, control_cats, order=None,
strict_match=True, alpha_pwr=0.05,
max_counts=50, counts_interval=10, min_counts=None,
num_iter=500, num_runs=10):
r"""Estimates power iteratively using samples with matching metadata
Parameters
----------
test : function
The statistical test which accepts a list of arrays sample ids and
returns a p value.
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str
The metadata category being varied between samples.
control_cats : list
The metadata categories to be used as controls. For example, if
you wanted to vary age (`cat` = "AGE"), you might want to control
for gender and health status (i.e. `control_cats` = ["SEX",
"HEALTHY"]).
order : list, optional
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
strict_match : bool, optional
This determines how data is grouped using
`control_cats`. If a sample within `meta` has an undefined value (NaN)
for any of the columns in `control_cats`, the sample will not be
considered as having a match and will be ignored when `strict_match`
is True. If `strict_match` is False, missing values (NaN) in the
`control_cats` can be considered matches.
alpha_pwr : float, optional
The critical value used to calculate the power.
max_counts : positive int, optional
The maximum number of observations per sample to draw
for effect size calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
num_iter : positive int, optional
The number of p-values to generate for each point on the curve.
num_runs : positive int, optional
The number of times to calculate each curve.
Returns
-------
power : array
The power calculated for each subsample at each count. The array is
`num_runs` rows, a length with the same number of elements as
`sample_counts` and a depth equal to the number of p values returned by
`test`. If `test` returns a float, the returned array will be
two-dimensional instead of three.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
Examples
--------
Assume you are interested in the role of a specific cytokine of protein
translocation in myeloid-lineage cells. You are able to culture two
macrophage lineages (bone marrow derived phagocytes and
peritoneally-derived macrophages). Due to unfortunate circumstances, your
growth media must be acquired from multiple sources (lab, company A,
company B). Also unfortunate, you must use labor-intensive low throughput
assays. You have some preliminary measurements, and you'd like to
predict how many (more) cells you need to analyze for 80% power.
You have information about 60 cells, which we'll simulate below. Note
that we are setting a random seed value for consistency.
>>> import numpy as np
>>> import pandas as pd
>>> np.random.seed(25)
>>> data = pd.DataFrame.from_dict({
... 'CELL_LINE': np.random.binomial(1, 0.5, size=(60,)),
... 'SOURCE': np.random.binomial(2, 0.33, size=(60,)),
... 'TREATMENT': np.hstack((np.zeros((30)), np.ones((30)))),
... 'INCUBATOR': np.random.binomial(1, 0.2, size=(60,))})
>>> data['OUTCOME'] = (0.25 + data.TREATMENT * 0.25) + \
... np.random.randn(60) * (0.1 + data.SOURCE/10 + data.CELL_LINE/5)
>>> data.loc[data.OUTCOME < 0, 'OUTCOME'] = 0
>>> data.loc[data.OUTCOME > 1, 'OUTCOME'] = 1
We will approach this by assuming that the distribution of our outcome is
not normally distributed, and apply a kruskal-wallis test to compare
between the cytokine treated and untreated cells.
>>> from scipy.stats import kruskal
>>> f = lambda x: kruskal(*[data.loc[i, 'OUTCOME'] for i in x])[1]
Let's check that cytokine treatment has a significant effect across all
the cells.
>>> treatment_stat = [g for g in data.groupby('TREATMENT').groups.values()]
>>> f(treatment_stat)
0.0019386336266250209
Now, let's pick the control categories. It seems reasonable to assume there
may be an effect of cell line on the treatment outcome, which may be
attributed to differences in receptor expression. It may also be possible
that there are differences due cytokine source. Incubators were maintained
under the same conditions throughout the experiment, within one degree of
temperature difference at any given time, and the same level of CO2.
So, at least initially, let's ignore differences due to the incubator.
It's recommended that as a first pass analysis, control variables be
selected based on an idea of what may be biologically relevant to the
system, although further iteration might encourage the consideration of
variable with effect sizes similar, or larger than the variable of
interest.
>>> control_cats = ['SOURCE', 'CELL_LINE']
>>> from skbio.stats.power import subsample_paired_power
>>> pwr, cnt = subsample_paired_power(test=f,
... meta=data,
... cat='TREATMENT',
... control_cats=control_cats,
... counts_interval=5,
... num_iter=25,
... num_runs=5)
>>> cnt
array([ 5., 10., 15., 20.])
>>> pwr.mean(0)
array([ 0.24 , 0.528, 0.68 , 0.88 ])
>>> pwr.std(0).round(3)
array([ 0.088, 0.127, 0.168, 0.08 ])
Estimating off the power curve, it looks like 20 cells per group may
provide adequate power for this experiment, although the large variance
in power might suggest extending the curves or increasing the number of
samples per group.
"""
# Handles the order argument
if order is None:
order = sorted(meta.groupby(cat).groups.keys())
order = np.array(order)
# Checks for the number of sampling pairs available
meta_pairs, index = _identify_sample_groups(meta, cat, control_cats, order,
strict_match)
min_obs = min([_get_min_size(meta, cat, control_cats, order, strict_match),
np.floor(len(index)*0.9)])
sub_ids = _draw_paired_samples(meta_pairs, index, min_obs)
ratio, num_p, sample_counts = \
_check_subsample_power_inputs(test=test,
samples=sub_ids,
draw_mode='matched',
min_counts=min_counts,
max_counts=max_counts,
counts_interval=counts_interval)
# Prealocates the power array
power = np.zeros((num_runs, len(sample_counts), num_p))
# Calculates power instances
for id2, c in enumerate(sample_counts):
for id1 in range(num_runs):
ps = np.zeros((num_p, num_iter))
for id3 in range(num_iter):
subs = _draw_paired_samples(meta_pairs, index, c)
ps[:, id3] = test(subs)
power[id1, id2, :] = _calculate_power(ps, alpha_pwr)
power = power.squeeze()
return power, sample_counts
@experimental(as_of="0.4.0")
def confidence_bound(vec, alpha=0.05, df=None, axis=None):
r"""Calculates a confidence bound assuming a normal distribution
Parameters
----------
vec : array_like
The array of values to use in the bound calculation.
alpha : float, optional
The critical value, used for the confidence bound calculation.
df : float, optional
The degrees of freedom associated with the
distribution. If None is given, df is assumed to be the number of
elements in specified axis.
axis : positive int, optional
The axis over which to take the deviation. When axis
is None, a single value will be calculated for the whole matrix.
Returns
-------
bound : float
The confidence bound around the mean. The confidence interval is
[mean - bound, mean + bound].
"""
# Determines the number of non-nan counts
vec = np.asarray(vec)
vec_shape = vec.shape
if axis is None and len(vec_shape) == 1:
num_counts = vec_shape[0] - np.isnan(vec).sum()
elif axis is None:
num_counts = vec_shape[0] * vec_shape[1] - np.isnan(vec).sum()
else:
num_counts = vec_shape[axis] - np.isnan(vec).sum() / \
(vec_shape[0] * vec_shape[1])
# Gets the df if not supplied
if df is None:
df = num_counts - 1
# Calculates the bound
# In the conversion from scipy.stats.nanstd -> np.nanstd `ddof=1` had to be
# added to match the scipy default of `bias=False`.
bound = np.nanstd(vec, axis=axis, ddof=1) / np.sqrt(num_counts - 1) * \
scipy.stats.t.ppf(1 - alpha / 2, df)
return bound
@experimental(as_of="0.4.0")
def paired_subsamples(meta, cat, control_cats, order=None, strict_match=True):
r"""Draws a list of samples varied by `cat` and matched for `control_cats`
This function is designed to provide controlled samples, based on a
metadata category. For example, one could control for age, sex, education
level, and diet type while measuring exercise frequency.
Parameters
----------
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str, list
The metadata category (or a list of categories) for comparison.
control_cats : list
The metadata categories to be used as controls. For example, if you
wanted to vary age (`cat` = "AGE"), you might want to control for
gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
order : list, optional
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
strict_match: bool, optional
This determines how data is grouped using `control_cats`. If a sample
within `meta` has an undefined value (`NaN`) for any of the columns in
`control_cats`, the sample will not be considered as having a match and
will be ignored when `strict_match` is True. If `strict_match` is
False, missing values (NaN) in the `control_cats` can be considered
matches.
Returns
-------
ids : array
a set of ids which satisfy the criteria. These are not grouped by
`cat`. An empty array indicates there are no sample ids which satisfy
the requirements.
Examples
--------
If we have a mapping file for a set of random individuals looking at
housing, sex, age and antibiotic use.
>>> import pandas as pd
>>> import numpy as np
>>> meta = {'SW': {'HOUSING': '2', 'SEX': 'M', 'AGE': np.nan, 'ABX': 'Y'},
... 'TS': {'HOUSING': '2', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
... 'CB': {'HOUSING': '3', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'},
... 'BB': {'HOUSING': '1', 'SEX': 'M', 'AGE': '40s', 'ABX': 'Y'}}
>>> meta = pd.DataFrame.from_dict(meta, orient="index")
>>> meta #doctest: +SKIP
ABX HOUSING AGE SEX
BB Y 1 40s M
CB Y 3 40s M
SW Y 2 NaN M
TS Y 2 40s M
We may want to vary an individual's housing situation, while holding
constant their age, sex and antibiotic use so we can estimate the effect
size for housing, and later compare it to the effects of other variables.
>>> from skbio.stats.power import paired_subsamples
>>> ids = paired_subsamples(meta, 'HOUSING', ['SEX', 'AGE', 'ABX'])
>>> np.hstack(ids) #doctest: +ELLIPSIS
array(['BB', 'TS', 'CB']...)
So, for this set of data, we can match TS, CB, and BB based on their age,
sex, and antibiotic use. SW cannot be matched in either group because
`strict_match` was true, and there is missing AGE data for this sample.
"""
# Handles the order argument
if order is None:
order = sorted(meta.groupby(cat).groups.keys())
order = np.array(order)
# Checks the groups in the category
min_obs = _get_min_size(meta, cat, control_cats, order, strict_match)
# Identifies all possible subsamples
meta_pairs, index = _identify_sample_groups(meta=meta,
cat=cat,
control_cats=control_cats,
order=order,
strict_match=strict_match)
# Draws paired ids
ids = _draw_paired_samples(meta_pairs=meta_pairs,
index=index,
num_samps=min_obs)
return ids
def _get_min_size(meta, cat, control_cats, order, strict_match):
"""Determines the smallest group represented"""
if strict_match:
all_cats = copy.deepcopy(control_cats)
all_cats.append(cat)
meta = meta[all_cats].dropna()
return meta.groupby(cat).count().loc[order, control_cats[0]].min()
def _check_nans(x, switch=False):
r"""Returns False if x is a nan and True is x is a string or number
"""
if isinstance(x, str):
return True
elif isinstance(x, (float, int)):
return not np.isnan(x)
elif switch and isinstance(x, (list, tuple)) and np.nan in x:
return False
elif switch and isinstance(x, (list, tuple)):
return True
else:
raise TypeError('input must be a string, float or a nan')
def _calculate_power(p_values, alpha=0.05):
r"""Calculates statistical power empirically
Parameters
----------
p_values : 1-D array
A 1-D numpy array with the test results.
alpha : float
The critical value for the power calculation.
Returns
-------
power : float
The empirical power, or the fraction of observed p values below the
critical value.
"""
p_values = np.atleast_2d(p_values)
w = (p_values < alpha).sum(axis=1)/p_values.shape[1]
return w
def _compare_distributions(test, samples, num_p, counts=5, mode="ind",
num_iter=100):
r"""Compares two distribution arrays iteratively
Parameters
----------
test : function
The statistical test which accepts an array_like of sample ids
(list of lists) and returns a p-value. This can be a one-dimensional
array, or a float.
samples : list of arrays
A list where each 1-d array represents a sample. If `mode` is
"matched", there must be an equal number of observations in each
sample.
num_p : positive int, optional
The number of p-values returned by the test.
counts : positive int or 1-D array, optional
The number of samples to draw from each distribution.
If this is a 1-D array, the length must correspond to the number of
samples. The function will not draw more observations than are in a
sample. In "matched" `mode`, the same number of observations will be
drawn from each group.
mode : {"ind", "matched", "paired"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
num_iter : positive int, optional
Default 1000. The number of p-values to generate for each point on the
curve.
Returns
-------
p_values : array
The p-values for the subsampled tests. If `test` returned a single
p value, p_values is a one-dimensional array. If `test` returned an
array, `p_values` has dimensions `num_iter` x `num_p`
Raises
------
ValueError
If mode is not "ind" or "matched".
ValueError
If the arrays in samples are not the same length in "matched" mode.
ValueError
If counts is a 1-D array and counts and samples are different lengths.
"""
# Prealocates the pvalue matrix
p_values = np.zeros((num_p, num_iter))
# Determines the number of samples per group
num_groups = len(samples)
samp_lens = [len(sample) for sample in samples]
if isinstance(counts, int):
counts = np.array([counts] * num_groups)
for idx in range(num_iter):
if mode == "matched":
pos = np.random.choice(np.arange(0, samp_lens[0]), counts[0],
replace=False)
subs = [sample[pos] for sample in samples]
else:
subs = [np.random.choice(np.array(pop), counts[i], replace=False)
for i, pop in enumerate(samples)]
p_values[:, idx] = test(subs)
if num_p == 1:
p_values = p_values.squeeze()
return p_values
def _check_subsample_power_inputs(test, samples, draw_mode='ind', ratio=None,
max_counts=50, counts_interval=10,
min_counts=None):
r"""Makes sure that everything is sane before power calculations
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values
(sample ids or numeric values) and returns a p value or one-dimensional
array of p values.
samples : array_like
`samples` can be a list of lists or a list of arrays where each
sublist or row in the array corresponds to a sampled group.
draw_mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to
:math:`y_{1}, y_{2}, ..., y_{n}`. Sample vectors must be the same
length in "matched" mode.
If there is no reciprocal relationship between samples, then
"ind" mode should be used.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample. In
`matched` mode, this will be set to one.
max_counts : positive int, optional
The maximum number of samples per group to draw for effect size
calculation.
counts_interval : positive int, optional
The difference between each subsampling count.
min_counts : positive int, optional
How many samples should be drawn for the smallest
subsample. If this is None, the `counts_interval` will be used.
Returns
-------
ratio : 1-D array
The fraction of the sample counts which should be assigned to each
group.
num_p : positive integer
The number of p values returned by `test`.
sample_counts : array
The number of samples drawn at each power calculation.
Raises
------
ValueError
If the `mode` is "matched", an error will occur if the arrays in
`samples` are not the same length.
ValueError
There is a ValueError if there are fewer samples than the minimum
count.
ValueError
If the `counts_interval` is greater than the difference between the
sample start and the max value, the function raises a ValueError.
ValueError
There are not an equal number of groups in `samples` and in `ratios`.
TypeError
`test` does not return a float or a 1-dimensional numpy array.
"""
if draw_mode not in {'ind', 'matched'}:
raise ValueError('mode must be "matched" or "ind".')
# Determines the minimum number of ids in a category
id_counts = np.array([len(id_) for id_ in samples])
num_ids = id_counts.min()
# Determines the number of groups
num_groups = len(samples)
# Checks that "matched" mode is handled appropriately
if draw_mode == "matched":
for id_ in samples:
if not len(id_) == num_ids:
raise ValueError('Each vector in samples must be the same '
'length in "matched" draw_mode.')
# Checks the number of counts is appropriate
if min_counts is None:
min_counts = counts_interval
if (max_counts - min_counts) < counts_interval:
raise ValueError("No subsamples of the specified size can be drawn.")
# Checks the ratio argument is sane
if ratio is None or draw_mode == 'matched':
ratio = np.ones((num_groups))
else:
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
ratio_counts = np.array([id_counts[i] / ratio[i]
for i in range(num_groups)])
largest = ratio_counts.min()
# Determines the number of p values returned by the test
p_return = test(samples)
if isinstance(p_return, float):
num_p = 1
elif isinstance(p_return, np.ndarray) and len(p_return.shape) == 1:
num_p = p_return.shape[0]
else:
raise TypeError('test must return a float or one-dimensional array.')
# Calculates the same counts
sample_counts = np.arange(min_counts,
min(max_counts, largest),
counts_interval)
return ratio, num_p, sample_counts
def _identify_sample_groups(meta, cat, control_cats, order, strict_match):
"""Aggregates samples matches for `control_cats` that vary by `cat`
Parameters
----------
meta : pandas.DataFrame
The metadata associated with the samples.
cat : str, list
The metadata category (or a list of categories) for comparison.
control_cats : list
The metadata categories to be used as controls. For example, if you
wanted to vary age (`cat` = "AGE"), you might want to control for
gender and health status (i.e. `control_cats` = ["SEX", "HEALTHY"])
order : list
The order of groups in the category. This can be used
to limit the groups selected. For example, if there's a category with
groups 'A', 'B' and 'C', and you only want to look at A vs B, `order`
would be set to ['A', 'B'].
ctrl_pos : int
The location of the smallest group in `order`.
strict_match: bool, optional
This determines how data is grouped using `control_cats`. If a sample
within `meta` has an undefined value (`NaN`) for any of the columns in
`control_cats`, the sample will not be considered as having a match and
will be ignored when `strict_match` is True. If `strict_match` is
False, missing values (NaN) in the `control_cats` can be considered
matches.
Returns
-------
meta_pairs : dict
Describes the categories matched for metadata. The
`control_cat`-grouped samples are numbered, corresponding to the
second list in `index`. The group is keyed to the list of sample arrays
with the same length of `order`.
index : list
A list of numpy arrays describing the positions of samples to be drawn.
The first array is an index array. The second gives an integer
corresponding to the `control_cat`-group, and the third lists the
position of the reference group sample in the list of samples.
"""
# Sets up variables to be filled
meta_pairs = {}
index = []
i1 = 0
# Groups the data by the control groups
ctrl_groups = meta.groupby(control_cats).groups
# Identifies the samples that satisfy the control pairs. Keys are iterated
# in sorted order so that results don't change with different dictionary
# ordering.
for g in sorted(ctrl_groups, key=lambda k: str(k)):
ids = ctrl_groups[g]
# If strict_match, Skips over data that has nans
if not _check_nans(g, switch=True) and strict_match:
continue
# Draws the samples that are matched for control cats
m_ids = meta.loc[ids].groupby(cat).groups
# Checks if samples from the cat groups are represented in those
# Samples
id_vecs = [sorted(m_ids[o]) for o in order if o in
m_ids]
# If all groups are represented, the index and results are retained
if len(id_vecs) == len(order):
min_vec = np.array([len(v) for v in id_vecs])
loc_vec = np.arange(0, min_vec.min())
meta_pairs[i1] = id_vecs
index.append(np.zeros(loc_vec.shape) + i1)
i1 = i1 + 1
# If the groups are not represented, an empty array gets passed
else:
index.append(np.array([]))
# Converts index to a 1d array
index = np.hstack(index)
# If index is empty, sets up meta_paris with a no key.
if not meta_pairs:
meta_pairs['no'] = order
return meta_pairs, index
def _draw_paired_samples(meta_pairs, index, num_samps):
"""Draws a random set of ids from a matched list
Parameters
----------
meta_pairs : dict
Describes the categories matched for metadata. The
`control_cat`-grouped samples are numbered, corresponding to the
second list in `index`. The group is keyed to the list of sample arrays
with the same length of `order`.
index : list
A list of numpy arrays describing the positions of samples to be drawn.
The first array is an index array. The second gives an integer
corresponding to the `control_cat`-group, and the third lists the
position of the reference group sample in the list of samples.
Returns
-------
ids : list
A set of randomly selected ids groups from each group.
"""
# Handles an empty paired vector
if 'no' in meta_pairs:
return [np.array([]) for o in meta_pairs['no']]
# Identifies the absolute positions of the control group being drawn
set_pos = np.random.choice(index, int(num_samps),
replace=False).astype(int)
subs = []
# Draws the other groups. Get a collection.Counter object for simplicity
counter = collections.Counter(set_pos)
# counter.items() order isn't guaranteed in python 3.6 and then the random
# choice isn't reproducible between python version, even specifying seed;
# so we access such values through sets.
set_list = set(set_pos)
# then, as stated by @RNAer, since we can't assure that items in sets are
# ordered, we choose to order set_list before accessing values
set_list = sorted(set_list)
# now set_list is ordered and we can iterate over it to get counter obj
for set_ in set_list:
num_ = counter[set_]
r2 = [np.random.choice(col, num_, replace=False) for col in
meta_pairs[set_]]
subs.append(r2)
ids = [np.hstack(ids) for ids in zip(*subs)]
return ids
def _calculate_power_curve(test, samples, sample_counts, ratio=None,
mode='ind', num_iter=1000, alpha=0.05):
r"""Generates an empirical power curve for the samples.
Parameters
----------
test : function
The statistical test which accepts a list of arrays of values and
returns a p value.
samples : array_like
`samples` can be a list of lists or an array where each sublist or row
in the array corresponds to a sampled group.
sample_counts : 1-D array
A vector of the number of samples which should be sampled in each
curve.
mode : {"ind", "matched"}, optional
"matched" samples should be used when observations in
samples have corresponding observations in other groups. For instance,
this may be useful when working with regression data where
:math:`x_{1}, x_{2}, ..., x_{n}` maps to :math:`y_{1}, y_{2}, ... ,
y_{n}`.
ratio : 1-D array, optional
The fraction of the sample counts which should be
assigned to each group. If this is a 1-D array, it must be the same
length as `samples`. If no value is supplied (`ratio` is None),
then an equal number of observations will be drawn for each sample.
num_iter : int
The default is 1000. The number of p-values to generate for each point
on the curve.
Returns
-------
p_values : array
The p-values associated with the input sample counts.
Raises
------
ValueError
If ratio is an array and ratio is not the same length as samples
"""
# Casts array-likes to arrays
sample_counts = np.asarray(sample_counts)
# Determines the number of groups
num_groups = len(samples)
num_samps = len(sample_counts)
if isinstance(alpha, float):
vec = True
pwr = np.zeros((num_samps))
alpha = np.array([alpha])
else:
vec = False
num_crit = alpha.shape[0]
pwr = np.zeros((num_crit, num_samps))
# Checks the ratio argument
if ratio is None:
ratio = np.ones((num_groups))
ratio = np.asarray(ratio)
if not ratio.shape == (num_groups,):
raise ValueError('There must be a ratio for each group.')
# Loops through the sample sizes
for id2, s in enumerate(sample_counts):
count = np.round(s * ratio, 0).astype(int)
for id1, a in enumerate(alpha):
ps = _compare_distributions(test=test,
samples=samples,
counts=count,
num_p=1,
num_iter=num_iter,
mode=mode)
if vec:
pwr[id2] = _calculate_power(ps, a)
else:
pwr[id1, id2] = _calculate_power(ps, a)
return pwr
| bsd-3-clause |
weigq/pytorch-pose | pose/utils/transforms.py | 1 | 4639 | from __future__ import absolute_import
import os
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt
import torch
from .misc import *
from .imutils import *
def color_normalize(x, mean, std):
if x.size(0) == 1:
x = x.repeat(3, x.size(1), x.size(2))
return (x - mean.view(3, 1, 1).expand_as(x)) #/ std.view(3, 1, 1).expand_as(x)
def flip_back(flip_output, dataset='mpii'):
"""
flip output map
"""
if dataset == 'mpii':
matchedParts = (
[0,5], [1,4], [2,3],
[10,15], [11,14], [12,13]
)
else:
print('Not supported dataset: ' + dataset)
# flip output horizontally
flip_output = fliplr(flip_output.numpy())
# Change left-right parts
for pair in matchedParts:
tmp = np.copy(flip_output[:, pair[0], :, :])
flip_output[:, pair[0], :, :] = flip_output[:, pair[1], :, :]
flip_output[:, pair[1], :, :] = tmp
return torch.from_numpy(flip_output).float()
def shufflelr(x, width, dataset='mpii'):
"""
flip coords
"""
if dataset == 'mpii':
matchedParts = (
[0,5], [1,4], [2,3],
[10,15], [11,14], [12,13]
)
else:
print('Not supported dataset: ' + dataset)
# Flip horizontal
x[:, 0] = width - x[:, 0]
# Change left-right parts
for pair in matchedParts:
tmp = x[pair[0], :].clone()
x[pair[0], :] = x[pair[1], :]
x[pair[1], :] = tmp
return x
def fliplr(x):
if x.ndim == 3:
x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))
elif x.ndim == 4:
for i in range(x.shape[0]):
x[i] = np.transpose(np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
return x.astype(float)
"""
General image processing functions
"""
def get_transform(center, scale, res, rot=0):
# Generate transformation matrix
h = 200 * scale
t = np.zeros((3, 3))
t[0, 0] = float(res[1]) / h
t[1, 1] = float(res[0]) / h
t[0, 2] = res[1] * (-float(center[0]) / h + .5)
t[1, 2] = res[0] * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3,3))
rot_rad = rot * np.pi / 180
sn,cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0,:2] = [cs, -sn]
rot_mat[1,:2] = [sn, cs]
rot_mat[2,2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0,2] = -res[1]/2
t_mat[1,2] = -res[0]/2
t_inv = t_mat.copy()
t_inv[:2,2] *= -1
t = np.dot(t_inv,np.dot(rot_mat,np.dot(t_mat,t)))
return t
def transform(pt, center, scale, res, invert=0, rot=0):
# Transform pixel location to different reference
t = get_transform(center, scale, res, rot=rot)
if invert:
t = np.linalg.inv(t)
new_pt = np.array([pt[0], pt[1], 1.]).T
new_pt = np.dot(t, new_pt)
return new_pt[:2].astype(int)
def transform_preds(coords, center, scale, res):
# size = coords.size()
# coords = coords.view(-1, coords.size(-1))
# print(coords.size())
for p in range(coords.size(0)):
coords[p, 0:2] = to_torch(transform(coords[p, 0:2], center, scale, res, 1, 0))
return coords
def crop(imgIn, center, scale, res, rot=0):
'''
imgIn>
type: ndarray
shape: C*H*W
'''
# transpose imgIn to H*W*C
img = imgIn.cpu().numpy()
img = np.transpose(img, (1,2,0))
# Upper left point
ul = np.array(transform([0, 0], center, scale, res, invert=1))
# Bottom right point
br = np.array(transform(res, center, scale, res, invert=1))
# Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape)
# Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
# Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1], old_x[0]:old_x[1]]
if not rot == 0:
# Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
new_img = im_to_torch(scipy.misc.imresize(new_img, res))
return new_img
| gpl-3.0 |
michaelyin/code-for-blog | 2008/wx_mpl_dynamic_graph.py | 13 | 11139 | """
This demo demonstrates how to draw a dynamic mpl (matplotlib)
plot in a wxPython application.
It allows "live" plotting as well as manual zooming to specific
regions.
Both X and Y axes allow "auto" or "manual" settings. For Y, auto
mode sets the scaling of the graph to see all the data points.
For X, auto mode makes the graph "follow" the data. Set it X min
to manual 0 to always see the whole data from the beginning.
Note: press Enter in the 'manual' text box to make a new value
affect the plot.
Eli Bendersky ([email protected])
License: this code is in the public domain
Last modified: 31.07.2008
"""
import os
import pprint
import random
import sys
import wx
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import numpy as np
import pylab
class DataGen(object):
""" A silly class that generates pseudo-random data for
display in the plot.
"""
def __init__(self, init=50):
self.data = self.init = init
def next(self):
self._recalc_data()
return self.data
def _recalc_data(self):
delta = random.uniform(-0.5, 0.5)
r = random.random()
if r > 0.9:
self.data += delta * 15
elif r > 0.8:
# attraction to the initial value
delta += (0.5 if self.init > self.data else -0.5)
self.data += delta
else:
self.data += delta
class BoundControlBox(wx.Panel):
""" A static box with a couple of radio buttons and a text
box. Allows to switch between an automatic mode and a
manual mode with an associated value.
"""
def __init__(self, parent, ID, label, initval):
wx.Panel.__init__(self, parent, ID)
self.value = initval
box = wx.StaticBox(self, -1, label)
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
self.radio_auto = wx.RadioButton(self, -1,
label="Auto", style=wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1,
label="Manual")
self.manual_text = wx.TextCtrl(self, -1,
size=(35,-1),
value=str(initval),
style=wx.TE_PROCESS_ENTER)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_manual_text, self.manual_text)
self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.manual_text)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_text, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_manual_text(self, event):
self.manual_text.Enable(self.radio_manual.GetValue())
def on_text_enter(self, event):
self.value = self.manual_text.GetValue()
def is_auto(self):
return self.radio_auto.GetValue()
def manual_value(self):
return self.value
class GraphFrame(wx.Frame):
""" The main frame of the application
"""
title = 'Demo: dynamic matplotlib graph'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.datagen = DataGen()
self.data = [self.datagen.next()]
self.paused = False
self.create_menu()
self.create_status_bar()
self.create_main_panel()
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.redraw_timer.Start(100)
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
self.menubar.Append(menu_file, "&File")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
self.panel = wx.Panel(self)
self.init_plot()
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.xmin_control = BoundControlBox(self.panel, -1, "X min", 0)
self.xmax_control = BoundControlBox(self.panel, -1, "X max", 50)
self.ymin_control = BoundControlBox(self.panel, -1, "Y min", 0)
self.ymax_control = BoundControlBox(self.panel, -1, "Y max", 100)
self.pause_button = wx.Button(self.panel, -1, "Pause")
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.cb_grid = wx.CheckBox(self.panel, -1,
"Show Grid",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_grid, self.cb_grid)
self.cb_grid.SetValue(True)
self.cb_xlab = wx.CheckBox(self.panel, -1,
"Show X labels",
style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, self.on_cb_xlab, self.cb_xlab)
self.cb_xlab.SetValue(True)
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(20)
self.hbox1.Add(self.cb_grid, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.AddSpacer(10)
self.hbox1.Add(self.cb_xlab, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.xmin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.xmax_control, border=5, flag=wx.ALL)
self.hbox2.AddSpacer(24)
self.hbox2.Add(self.ymin_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.ymax_control, border=5, flag=wx.ALL)
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def init_plot(self):
self.dpi = 100
self.fig = Figure((3.0, 3.0), dpi=self.dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor('black')
self.axes.set_title('Very important random data', size=12)
pylab.setp(self.axes.get_xticklabels(), fontsize=8)
pylab.setp(self.axes.get_yticklabels(), fontsize=8)
# plot the data as a line series, and save the reference
# to the plotted line series
#
self.plot_data = self.axes.plot(
self.data,
linewidth=1,
color=(1, 1, 0),
)[0]
def draw_plot(self):
""" Redraws the plot
"""
# when xmin is on auto, it "follows" xmax to produce a
# sliding window effect. therefore, xmin is assigned after
# xmax.
#
if self.xmax_control.is_auto():
xmax = len(self.data) if len(self.data) > 50 else 50
else:
xmax = int(self.xmax_control.manual_value())
if self.xmin_control.is_auto():
xmin = xmax - 50
else:
xmin = int(self.xmin_control.manual_value())
# for ymin and ymax, find the minimal and maximal values
# in the data set and add a mininal margin.
#
# note that it's easy to change this scheme to the
# minimal/maximal value in the current display, and not
# the whole data set.
#
if self.ymin_control.is_auto():
ymin = round(min(self.data), 0) - 1
else:
ymin = int(self.ymin_control.manual_value())
if self.ymax_control.is_auto():
ymax = round(max(self.data), 0) + 1
else:
ymax = int(self.ymax_control.manual_value())
self.axes.set_xbound(lower=xmin, upper=xmax)
self.axes.set_ybound(lower=ymin, upper=ymax)
# anecdote: axes.grid assumes b=True if any other flag is
# given even if b is set to False.
# so just passing the flag into the first statement won't
# work.
#
if self.cb_grid.IsChecked():
self.axes.grid(True, color='gray')
else:
self.axes.grid(False)
# Using setp here is convenient, because get_xticklabels
# returns a list over which one needs to explicitly
# iterate, and setp already handles this.
#
pylab.setp(self.axes.get_xticklabels(),
visible=self.cb_xlab.IsChecked())
self.plot_data.set_xdata(np.arange(len(self.data)))
self.plot_data.set_ydata(np.array(self.data))
self.canvas.draw()
def on_pause_button(self, event):
self.paused = not self.paused
def on_update_pause_button(self, event):
label = "Resume" if self.paused else "Pause"
self.pause_button.SetLabel(label)
def on_cb_grid(self, event):
self.draw_plot()
def on_cb_xlab(self, event):
self.draw_plot()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_redraw_timer(self, event):
# if paused do not add data, but still redraw the plot
# (to respond to scale modifications, grid change, etc.)
#
if not self.paused:
self.data.append(self.datagen.next())
self.draw_plot()
def on_exit(self, event):
self.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = GraphFrame()
app.frame.Show()
app.MainLoop()
| unlicense |
eusoubrasileiro/fatiando_seismic | fatiando/gravmag/__init__.py | 2 | 2173 | """
Gravity and magnetics forward modeling, inversion, transformations and
utilities.
Forward modeling
----------------
The forward modeling modules provide ways to calculate the gravitational and
magnetic field of various types of geometric objects:
* :mod:`~fatiando.gravmag.prism`: 3D right rectangular prisms
* :mod:`~fatiando.gravmag.polyprism`: 3D prisms with polygonal horizontal
cross-sections
* :mod:`~fatiando.gravmag.sphere`: Spheres in Cartesian coordinates
* :mod:`~fatiando.gravmag.tesseroid`: Tesseroids (spherical prisms) for
modeling in spherical coordinates
* :mod:`~fatiando.gravmag.talwani`: 2D bodies with polygonal vertical
cross-sections
* :mod:`~fatiando.gravmag.half_sph_shell`: Gravity fields of half a spherical
shell. Useful for benchmarking and testing.
Inversion
---------
The inversion modules use the forward modeling models and the
:mod:`fatiando.inversion` package to solve potential field inverse problems:
* :mod:`~fatiando.gravmag.basin2d`: 2D inversion of the shape of sedimentary
basins and other outcropping bodies
* :mod:`~fatiando.gravmag.harvester`: 3D inversion of compact bodies by
planting anomalous densities
* :mod:`~fatiando.gravmag.euler`: 3D Euler deconvolution methods to estimate
source location
* :mod:`~fatiando.gravmag.magdir`: Inversion methods to estimate the total
magnetization vector of multiple sources.
Processing
----------
The processing modules offer tools to prepare potential field data before or
after modeling.
* :mod:`~fatiando.gravmag.normal_gravity`: Compute normal gravity and
reductions.
* :mod:`~fatiando.gravmag.eqlayer`: Equivalent layer processing
* :mod:`~fatiando.gravmag.transform`: Potential field transformations,
like upward continuation, derivatives, etc
* :mod:`~fatiando.gravmag.imaging`: Imaging methods for potential fields for
estimating physical property distributions
* :mod:`~fatiando.gravmag.tensor`: Utilities for operating on the gradient
tensor
Interactivity
-------------
Module :mod:`~fatiando.gravmag.interactive` implements matplotlib GUIs and
IPython HTML widgets for interacting with the modeling and processing
functions.
----
"""
| bsd-3-clause |
alf3r/GidroGraf-Sirius | src/Sonar_data.py | 1 | 3604 | import cv2
import matplotlib.pyplot as plt
import numpy as np
import Capture
class Sonar():
def set_port(self, port):
self.port = port
def set_starboard(self, starboard):
self.starboard = starboard
class Sonar_data():
def __init__(self, id, source_type, datarate, v, c):
self.id = id # Идентификатор источника данных в БД
self.source_type = source_type # Имя источника данных
self.datarate = datarate # Частота дискретизации, Гц
self.v = v # Скорость движения гидролокатора
self.c = c # Скорость звука в воде
def set_data(self, data):
self.data = data
self.calculate_scale()
self.data = cv2.flip(self.data, 0)
def apply_left(self):
self.data = cv2.flip(self.data, 1)
def points2range(self, n_points):
time0 = n_points / self.datarate
range = self.c * time0 / 2
return range
def range2points(self, range):
return round(2 * self.datarate * range / self.c)
def calculate_scale(self):
n_points = self.data.shape[1]
m_lines = self.data.shape[0]
time0 = n_points / self.datarate
Ltotal = self.points2range(n_points)
Lpx = Ltotal / n_points
Hpx = self.v * time0 / 2
Htotal = Hpx * m_lines
self.scale = Hpx / Lpx
def get_image(self, screen_width, screen_height):
if screen_width == -1:
screen_width = round(screen_height / self.scale)
elif screen_height == -1:
screen_height = round(screen_width * self.scale)
image_dimention = (screen_width, screen_height)
self.data = cv2.resize(self.data, image_dimention, interpolation=cv2.INTER_AREA)
def binarize(self):
# Выделение линии дна = retval2, thres = cv2.threshold(data, 50,70,cv2.THRESH_BINARY) thres = cv2.blur(thres, (50, 50))
# Выделение объектов =
a = cv2.adaptiveThreshold(self.data, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 125, 1)
# a = cv2.adaptiveThreshold(a, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 55, 1)
# retval2, a = cv2.threshold(self.data, 90, 255, cv2.THRESH_BINARY)
# a1 = np.median(a, 0)
# plt.hist(a1, 256, range=[0, 255], fc='k', ec='k')
# plt.show()
self.data = a
def blur(self):
px = 5
self.data = cv2.blur(self.data, (px, px))
# self.data = cv2.medianBlur(self.data, px)
def find_contours(self):
im2, contours, hierarchy = cv2.findContours(self.data, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
self.data = cv2.cvtColor(self.data, cv2.COLOR_GRAY2RGB)
cv2.drawContours(self.data, contours, -1, (255, 0, 0), 20)
def convert_range(self):
for i in range(1,30):
alpha = 1*i
a = cv2.convertScaleAbs(self.data, alpha=alpha, beta=0)
beta = 127 - np.median(a, [0, 1])
a = cv2.convertScaleAbs(self.data, alpha=alpha, beta=beta)
condition = np.mod(a, 255) == 0
K = np.sum(condition) / a.size
if K > 0.1:
break
self.data = a
def extend_data(self, zeros_arr):
zeros_arr += zeros_arr + 30
self.data = np.hstack((self.data, zeros_arr))
| gpl-3.0 |
saketkc/statsmodels | statsmodels/sandbox/examples/example_garch.py | 31 | 2294 | import numpy as np
import matplotlib.pyplot as plt
#import scikits.timeseries as ts
#import scikits.timeseries.lib.plotlib as tpl
import statsmodels.api as sm
#from statsmodels.sandbox import tsa
from statsmodels.sandbox.tsa.garch import * # local import
#dta2 = ts.tsfromtxt(r'gspc_table.csv',
# datecols=0, skiprows=0, delimiter=',',names=True, freq='D')
#print dta2
aa=np.genfromtxt(r'gspc_table.csv', skip_header=0, delimiter=',', names=True)
cl = aa['Close']
ret = np.diff(np.log(cl))[-2000:]*1000.
ggmod = Garch(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 1
ggmod.nma = 1
ggmod._start_params = np.array([-0.1, 0.1, 0.1, 0.1])
ggres = ggmod.fit(start_params=np.array([-0.1, 0.1, 0.1, 0.0]),
maxiter=1000,method='bfgs')
print('ggres.params', ggres.params)
garchplot(ggmod.errorsest, ggmod.h, title='Garch estimated')
use_rpy = False
if use_rpy:
from rpy import r
r.library('fGarch')
f = r.formula('~garch(1, 1)')
fit = r.garchFit(f, data = ret - ret.mean(), include_mean=False)
f = r.formula('~arma(1,1) + ~garch(1, 1)')
fit = r.garchFit(f, data = ret)
ggmod0 = Garch0(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 1
ggmod.nma = 1
start_params = np.array([-0.1, 0.1, ret.var()])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)
print('ggres0.params', ggres0.params)
g11res = optimize.fmin(lambda params: -loglike_GARCH11(params, ret - ret.mean())[0], [0.01, 0.1, 0.1])
print(g11res)
llf = loglike_GARCH11(g11res, ret - ret.mean())
print(llf[0])
ggmod0 = Garch0(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod0.nar = 2
ggmod.nma = 2
start_params = np.array([-0.1,-0.1, 0.1, 0.1, ret.var()])
ggmod0._start_params = start_params #np.array([-0.6, 0.1, 0.2, 0.0])
ggres0 = ggmod0.fit(start_params=start_params, maxiter=2000)#, method='ncg')
print('ggres0.params', ggres0.params)
ggmod = Garch(ret - ret.mean())#hgjr4[:nobs])#-hgjr4.mean()) #errgjr4)
ggmod.nar = 2
ggmod.nma = 2
start_params = np.array([-0.1,-0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
ggmod._start_params = start_params
ggres = ggmod.fit(start_params=start_params, maxiter=1000)#,method='bfgs')
print('ggres.params', ggres.params)
| bsd-3-clause |
peastman/msmbuilder | msmbuilder/decomposition/pca.py | 9 | 1922 | # Author: Matthew Harrigan <[email protected]>
# Contributors:
# Copyright (c) 2016, Stanford University and the Authors
# All rights reserved.
from __future__ import print_function, division, absolute_import
from sklearn import decomposition
from .base import MultiSequenceDecompositionMixin
__all__ = ['PCA', 'SparsePCA']
class PCA(MultiSequenceDecompositionMixin, decomposition.PCA):
__doc__ = decomposition.PCA.__doc__
def summarize(self):
return '\n'.join([
"Principal Component Analysis (PCA)",
"----------",
"Number of components: {n_components}",
"explained variance raio: {explained_variance_ratio_}",
"Noise variance: {noise_variance_}",
]).format(**self.__dict__)
class SparsePCA(MultiSequenceDecompositionMixin, decomposition.SparsePCA):
__doc__ = decomposition.SparsePCA.__doc__
def summarize(self):
return '\n'.join([
"Sparse PCA",
"----------",
"Number of components: {n_components}",
]).format(**self.__dict__)
class MiniBatchSparsePCA(MultiSequenceDecompositionMixin,
decomposition.MiniBatchSparsePCA):
__doc__ = decomposition.MiniBatchSparsePCA.__doc__
def summarize(self):
return '\n'.join([
"MiniBatch Sparse PCA",
"--------------------",
"Number of components: {n_components}",
"Batch size: {batch_size}"
]).format(**self.__dict__)
class KernelPCA(MultiSequenceDecompositionMixin, decomposition.KernelPCA):
__doc__ = decomposition.KernelPCA.__doc__
def summarize(self):
return '\n'.join([
"Kernel PCA",
"--------------------",
"Number of components: {n_components}",
"Kernel: {kernel}",
]).format(**self.__dict__)
| lgpl-2.1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.