repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Ginfung/sway | Benchmarks/XOMO_Base/xomo.py | 1 | 41536 | import sys
sys.dont_write_bytecode=True
def demo(f=None,demos=[]):
if f: demos.append(f); return f
for d in demos:
print '\n--|',d.func_name,'|','-'*40,'\n',d.__doc__,'\n'
d()
def test(f=None,tests=[]):
if f: tests.append(f); return f
ok=no=0
for t in tests:
print "# ",t.func_name + ': ',t.__doc__
for n,(want,got) in enumerate(t()):
if want == got:
ok += 1; print "PASSED:",t.func_name,n+1
else:
no += 1; print "FAILED:",t.func_name,n+1
if tests:
print '\n# totals: %s%% PASSED' % round(100*ok/(ok+no))
@test
def tested1():
"Test functions return lists of (want,got) pairs"
return [(1,1),(0,1),(2,2),(1,0)]
@test
def tested2():
"Test function can return one pair"
return [(1,1)]
import random
class Deep(dict) :
def __getitem__(self,x) :
if x in self: return self.get(x)
new = self[x] = Deep()
return new
def push(self,k,v) :
all = self[k] if k in self else []
all.append(v)
self[k]=all
return all
def at(self,lst,default=None) :
here=self
for key in lst:
if not key in here:
return default
here = here[key]
return here
def inc(self,k,n=1):
new = (self[k] if k in self else 0) + n
self[k] = new
return new
@demo
def _deeped() :
"Auto-generation of nested dictionaries"
d=Deep()
d[1][2][3] = 22
d[1][2][4] = 44
print d
class Sample(object):
def one(o) : pass
def ready(o) : pass
def some(o,max=100):
o.ready()
while True:
max -= 1
if max < 0 : break
yield o.one()
class Some(Sample):
def __init__(o,txt):
o.txt=txt
def bias(o,lst,scale=2):
o.all = [(n**scale,x) for n,x in enumerate(lst)]
return o
def ready(o):
o.all = sorted(o.all,key=lambda x: x[0])
o.total = 0
for n,x in o.all: o.total += n
def one(o):
chosen = random.uniform(0,o.total)
count = 0
for n,x in o.all:
count += n
if count > chosen: return x
@demo
def somed1():
"Biased list"
somed0(Some("xx").bias([x for x in xrange(0,25)]),0.2)
def somed0(r,shrink=2,n=1000):
def show(k,v) :
return str(k).rjust(10)+ ' : '+ \
'*'*int(v)+ ' [%3.2f]%%'%int(shrink*v)
all = Deep()
random.seed(1)
for x in r.some(max=n): all.inc(round(x,1))
print ""
#order = sorted([int(x) for x in all.keys()])
order = sorted(all.keys())
for k in order:
v = all[k]
print "DEBUG: " + str(v) + " " + str(k)
# for k,v in sorted(all.items(),key=all.get):
print show(k,100.00/shrink*v/n)
class Runiform(Sample):
def one(o):
return o.final(random.uniform(o.lo,o.hi))
def __init__(o,txt,lo,hi,final=float):
o.txt= txt; o.lo= lo; o.hi= hi; o.final= final
@demo
def somed1():
"Uniform 1 to 5"
somed0(Runiform("xx",1,5,int),0.5)
class Rtriangle(Sample):
def one(o):
return o.final(random.triangular(o.lo,o.hi,o.mode))
def __init__(o,txt,lo,hi,mode,final=float):
o.txt=txt; o.lo=lo; o.hi=hi; o.mode=mode; o.final=final
@demo
def somed2():
"Triangle min,max,mode = 0,20,4"
somed0(Rtriangle("xx",0,20,4,int),0.25)
class Rgamma(Sample):
def one(o):
return o.final(random.gammavariate(o.a,o.b))
def __init__(o,txt,a,b,final=float):
o.txt= txt; o.a= a; o.b= b; o.final=final
@demo
def somed3():
"Gamma a,b = 5,1"
somed0(Rgamma("xx",6,1,int),0.33)
class Rexpo(Sample):
def one(o):
return o.final(random.expovariate(o.lambd))
def __init__(o,txt,lambd,final=float):
o.txt= txt; o.lambd= lambd; o.final= final
@demo
def somed4():
"Lambda, decay constant=0.7"
somed0(Rexpo("xx",0.7,int),1)
class Rgauss(Sample):
def one(o):
return o.final(random.gauss(o.mu,o.sigma))
def __init__(o,txt,mu,sigma,final=float):
o.txt= txt; o.mu= mu; o.sigma= sigma; o.final= final
@demo
def somed5():
"Guassian, mean=20, sigma=2"
somed0(Rgauss("xx",20,2,int),0.5)
class Rsteps(Sample):
def __init__(o,txt,bins=7,final=float):
o.txt= txt; o.bins= bins; o.final= final
o.all=[]; o.stale=False
def bias(o,lst):
o.stale=True
o.all=lst
def put(o,x):
o.stale=True
o.all.append(x)
def ready(o):
if o.stale:
o.all = sorted(o.all)
split = max(1, int(len(o.all)/o.bins))
o.all = [o.all[int(bin*split)] for bin in range(o.bins)]
o.stale=False
def __sub__(o1,o2):
o1.ready(); o2.ready()
diff = sum1 = sum2 = 0.0
for n1 in o1.all: sum1 += n1
for n2 in o2.all: sum2 += n2
for n1,n2 in zip(o1.all,o2.all) :
print n1,n2,sum1,sum2
diff += (n1/sum1 - n2/sum2)
return 100*diff
def one(o):
o.ready()
n = random.randint(1,o.bins-1)
return o.final(random.uniform( o.all[n-1], o.all[n]))
@demo
def somed6():
"Divide Data into 5 steps"
lst = [x for x in xrange(0,33)]
somed0(Rsteps("xx",7,int).bias(lst))
import random
def any(l):
return l[random.randint(0,len(l)-1)]
def chunks(l, n):
"Divide 'l' into sub-lists of length 'n'."
return [l[i:i+n] for i in range(0, len(l), n)]
def often(seq,max=100,
item = lambda x: x,
weight = lambda x: x.priority) :
total = 0
for x in seq: total += weight(x)
while True:
max -= 1
if max < 0 : break
chosen = random.uniform(0, total)
count = 0
for x in seq:
count += weight(x)
if count > chosen:
yield item(x)
break
@test
def oftened():
"select, with bias, from a space"
def left(x) : return x[0]
def right(x): return x[1]
counts = Deep()
random.seed(1)
for x in often([("a",10),("b",20),("c",40),("d",80)],
max=1000,item=left,weight=right):
counts.inc(x)
return [(counts,{'a': 67, 'c': 265, 'b': 113, 'd': 555})]
import random
class X2Y(object):
def x(o): pass
def y(o,x) : pass
def xy(o):
x = o.x()
y = o.y(x)
return x,y
class Range(X2Y):
def __init__(o,name,min,max,final=float,wild=False):
o.txt = name
o.wild = wild
o.update(min,max,final)
def update(o,min,max,final=float,m=None):
o.min = min
o.max = max
o.sample = Runiform(o.txt,min,max,final)
if m:
m.all[o.txt] = o
def x(o):
return o.sample.one()
class Model(X2Y):
def __init__(o):
o.all = {}
for i in o.about():
o.all[i.txt] = i
def x(o):
out = {}
for what,thing in o.all.items(): out[what] = thing.x()
return out
def about(o): pass
#################
##--SCED-RISK--##
#################
def totalRisk(project, risktable):
_d = 3.73
return (sced_risk(project, risktable) +
prod_risk(project, risktable) +
pers_risk(project, risktable) +
proc_risk(project, risktable) +
plat_risk(project, risktable) +
reus_risk(project, risktable)) / _d
def getRisk(a, b, project, risks):
if (a in project) and (b in project):
_aval = int(project[a])
_bval = int(project[b])
try:
_rt = risks[a, b]
_rt = _rt.split(",") # split table
_rt = _rt[_bval-1] # grab line
_rt = _rt.split() # split line
return float(_rt[_aval-1]) # find index
except KeyError:
return 0
else: return 0
def sced_risk(proj, risks):
_a = 'sced'
return(sum(map(lambda x : getRisk(_a,x,proj,risks),
str.split("rely time pvol tool acap "
"aexp pcap plex ltex pmat"))))
def prod_risk(proj, risks):
return(sum(map(lambda (x,y) : getRisk(x,y,
proj,risks),
[("rely","acap"), ("rely","pcap"),
("cplx","acap"), ("cplx","pcap"),
("cplx","tool"), ("rely","pmat"),
("sced","cplx"), ("sced","rely"),
("sced","time"), ("ruse","aexp"),
("ruse","ltex")])))
def pers_risk(proj, risks):
return(sum(map(lambda (x,y) : getRisk(x,y,
proj,risks),
[("pmat","acap"), ("stor","acap"),
("time","acap"), ("tool","acap"),
("tool","pcap"), ("ruse","aexp"),
("ruse","ltex"), ("pmat","pcap"),
("stor","pcap"), ("time","pcap"),
("ltex","pcap"), ("pvol","plex"),
("sced","acap"), ("sced","aexp"),
("sced","pcap"), ("sced","plex"),
("sced","ltex"), ("rely","acap"),
("rely","pcap"), ("cplx","acap"),
("cplx","pcap"), ("team","aexp")
])))
def proc_risk(proj, risks):
return(sum(map(lambda (x,y) : getRisk(x,y,
proj,risks),
[("tool","pmat"), ("time","tool"),
("team","aexp"), ("team","sced"),
("team","site"), ("sced","tool"),
("sced","pmat"), ("cplx","tool"),
("pmat","acap"), ("tool","acap"),
("tool","pcap"), ("pmat","pcap")
])))
def plat_risk(proj, risks):
return(sum(map(lambda (x,y) : getRisk(x,y,
proj,risks),
[("sced","time"), ("sced","pvol"),
("stor","acap"), ("time","acap"),
("stor","pcap"), ("pvol","plex"),
("time","tool")])))
def reus_risk(project, risktable):
return(getRisk('ruse','aexp',project,risktable) +
getRisk('ruse','ltex',project,risktable))
#############
def readRisks(risktable):
risktable['sced','rely'] = ("0 0 0 1 2 0,"
"0 0 0 0 1 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','cplx'] = ("0 0 0 1 2 4,"
"0 0 0 0 1 2,"
"0 0 0 0 0 1,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','time'] = ("0 0 0 1 2 4,"
"0 0 0 0 1 2,"
"0 0 0 0 0 1,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','pvol'] = ("0 0 0 1 2 0,"
"0 0 0 0 1 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','tool'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','pexp'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','pcap'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','aexp'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','acap'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','ltex'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['sced','pmat'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['rely','acap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0,"
"0 0 0 0 0 0")
risktable['rely','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0,"
"0 0 0 0 0 0")
risktable['cplx','acap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['cplx','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['cplx','tool'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['rely','pmat'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0,"
"0 0 0 0 0 0")
risktable['pmat','acap'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['stor','acap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['time','acap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['tool','acap'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['tool','pcap'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['ruse','aexp'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['ruse','ltex'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['pmat','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['stor','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['time','pcap'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"4 2 1 0 0 0")
risktable['ltex','pcap'] = ("4 2 1 0 0 0,"
"2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['pvol','pexp'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0,"
"0 0 0 0 0 0")
risktable['tool','pmat'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['time','tool'] = ("0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"1 0 0 0 0 0,"
"2 1 0 0 0 0")
risktable['team','aexp'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['team','sced'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
risktable['team','site'] = ("2 1 0 0 0 0,"
"1 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0,"
"0 0 0 0 0 0")
#############
import math
# CoQualMo Calibration Mods:
DefectIntroReqsNeg = ['pmat','prec','resl','team',
'acap','aexp','docu','ltex',
'pcon','plex','rely','sced',
'site','tool']
DefectIntroDesignNeg = ['pmat','prec','resl','team',
'acap','aexp','docu','ltex',
'pcon','plex','rely','sced',
'site','tool','pcap']
DefectIntroCodingNeg = ['pmat','prec','resl','team',
'acap','aexp','docu','ltex',
'pcon','plex','rely','sced',
'site','tool','pcap']
DefectIntroReqsPos = ['cplx','Data','pvol',
'ruse','stor','time']
DefectIntroDesignPos = ['cplx','Data','pvol',
'ruse','stor','time']
DefectIntroCodingPos = ['cplx','Data','pvol',
'ruse','stor','time']
DefectIntroReqsIgnore = ["flex","pcap"]
DefectIntroDesignIgnore = ["flex"]
DefectIntroCodingIgnore = ["flex"]
DefectRemovers = ["aa","etat","pr"]
class Cocomo(Model):
def __init__(o, *args, **kwargs):
o.bounds = {}
_s = "flex pmat prec resl team"
o.scaleFactors = _s.split()
_s = ("acap aexp cplx Data docu ltex "
"pcap pcon plex pvol rely ruse "
"sced site stor time tool")
o.effortMultipliers = _s.split()
_s = "aa etat pr"
o.defectRemovers = _s.split()
# Open file of constraints (?):
for _a in args:
if isinstance(_a, basestring):
try:
_acc = {}
with open(_a,'r') as infile:
for line in infile:
line = line.split()
if line[0] == '@project':
o.proj = line[1]
elif line[0] == '@discrete':
if line[1][0] == '?':
_attr = line[1][1:]
_lo = float(line[2])
_hi = float(line[len(line)-1])
else:
_attr = line[1]
_lo = _hi = int(line[2])
_acc[_attr] = _lo,_hi
# Overwrite file constraints w/kwargs:
kwargs = dict(_acc.items() + kwargs.items())
break
except:
print "Input file [", _a, "] not readable"
# Read constraints from kwargs:
for _key,_val in kwargs.items():
# print _key, _val
if ((_key in o.scaleFactors) or
(_key in o.effortMultipliers) or
(_key in o.defectRemovers) or
(_key in ["kloc","b"])):
if isinstance(_val, tuple):
_lo,_hi = _val
else:
_lo = _hi = _val
o.bounds[str(_key)] = _lo,_hi
# Parent init:
super(o.__class__,o).__init__()
def say(o,x,a,b,kloc,sum,prod,exp,
effort, # o1\o2,o3,o4
months="nc",defects="nc",risks="nc"):
for i,j in x.items():
if i=="kloc": print i,j
else: print i,j,str(o.all[i].y(j))
print (":a",a,":b",b,":kloc",kloc,":exp",exp,
":sum",sum,":prod",prod, "effort",effort,
"months",months,"defects",defects,
"risks",risks)
def sumSfs(o,x,out=0,reset=False):
for i in o.scaleFactors:
out += o.all[i].y(x[i],reset)
return out
def prodEms(o,x,out=1,reset=False):
for i in o.effortMultipliers:
out *= o.all[i].y(x[i],reset) #changed_nave
return out
def xy(o,verbose=False):
x = o.x()
a = x["b"] # a little tricky... "a" is the x of "b"
b = o.all["b"].y(a,reset=True)
kloc = o.all["kloc"].x()
sum = o.sumSfs(x,reset=True)
prod = o.prodEms(x,reset=True)
exp = b + 0.01 * sum
effort = a*(kloc**exp)*prod
if verbose: o.say(x,a,b,kloc,sum,prod,exp,effort)
return x,effort
def xys(o,verbose=False,olist=False):
x = o.x()
a = x["b"]
b = o.all["b"].y(a,reset=True)
kloc = x["kloc"]
sum = o.sumSfs(x,reset=True)
prod = o.prodEms(x,reset=True)
exp = b + 0.01 * sum
effort = o.effort_calc(x, a, b, exp, sum, prod)
months = o.month_calc(x, effort, sum, prod)
defects = o.defect_calc(x)
risks = o.risk_calc(x)
if verbose: o.say(x,a,b,kloc,sum,prod,exp,
effort,months,defects,risks)
if olist:
return [effort,months,defects,risks]
else:
return x,effort,months,defects,risks
def trials(o,n=500,out="out.csv",verbose=True,write=False):
import csv
keys = []
_efforts = []
_months = []
_defects = []
_risks = []
_first = 0
rows = []
with open(out,'w') as csv_file:
if write: csv_wri = csv.writer(csv_file)
for _i in range(0,n):
x = o.x()
if _i == 0:
for _k,_ in x.iteritems():
if _first == 0:
keys.append('$'+str(_k))
_first = 1
else:
keys.append('$'+str(_k)) #changed_nave
keys.extend(["-effort","-months",
"-defects","-risks"])
if write: csv_wri.writerows([keys])
a = x["b"]
b = o.all["b"].y(a,reset=True)
kloc = x["kloc"]
sum = o.sumSfs(x,reset=True)
prod = o.prodEms(x,reset=True)
exp = b + 0.01 * sum
effort = o.effort_calc(x,a,b,exp,sum,prod)
months = o.month_calc(x,effort,sum,prod)
defects = o.defect_calc(x)
risks = o.risk_calc(x)
_efforts.append(effort)
_months.append(months)
_defects.append(defects)
_risks.append(risks)
vals = []
for _,_v in x.iteritems():
vals.append(_v)
vals.extend([effort,months,defects,risks])
if write: csv_wri.writerows([vals])
rows.append(vals)
if verbose:
_effSum = math.fsum(_efforts)
_mosSum = math.fsum(_months)
_defSum = math.fsum(_defects)
_rskSum = math.fsum(_risks)
_effMean = _effSum/n
_mosMean = _mosSum/n
_defMean = _defSum/n
_rskMean = _rskSum/n
_effSD = pow( math.fsum(map(lambda x: pow(x-_effMean,2),_efforts))/n, 0.5)
_mosSD = pow( math.fsum(map(lambda x: pow(x-_mosMean,2),_months))/n, 0.5)
_defSD = pow( math.fsum(map(lambda x: pow(x-_defMean,2),_defects))/n, 0.5)
_rskSD = pow( math.fsum(map(lambda x: pow(x-_rskMean,2),_risks))/n, 0.5)
_efforts.sort()
_months.sort()
_defects.sort()
_risks.sort()
print "Means:"
print "\tEff:",_effMean,"\n\tMos:",_mosMean,"\n\tDef:",_defMean,"\n\tRsk:",_rskMean
print ""
print "Standard Deviations:"
print "\tEff:",_effSD,"\n\tMos:",_mosSD,"\n\tDef:",_defSD,"\n\tRsk:",_rskSD
print ""
print "Quartile Bounds (25/50/75):"
print "\tEff:", _efforts[int(.25*n)],"\t",\
_efforts[int(.5*n)],"\t",\
_efforts[int(.75*n)], \
"\n\tMos:", _months[int(.25*n)],"\t",\
_months[int(.5*n)],"\t",\
_months[int(.75*n)], \
"\n\tDef:", _defects[int(.25*n)],"\t",\
_defects[int(.5*n)] ,"\t",\
_defects[int(.75*n)], \
"\n\tRsk:", _risks[int(.25*n)],"\t",\
_risks[int(.5*n)],"\t",\
_risks[int(.75*n)]
return keys,rows
def about(o):
def dr(what, lo=1,hi=6) : return Dr(what,lo,hi)
def sf(what, lo=1,hi=5) : return Sf(what,lo,hi)
def emn(what,lo=1,hi=5) : return Emn(what,lo,hi)
def emp(what,lo=1,hi=5) : return Emp(what,lo,hi)
_rtn = []
# kloc:
if "kloc" in o.bounds:
_lo,_hi = o.bounds["kloc"]
else: _lo,_hi = 2,1000
_rtn.append( Range("kloc",_lo,_hi) )
# b (becomes 'a')
if "b" in o.bounds:
_lo,_hi = o.bounds["b"]
_rtn.append( B("b",_lo,_hi) )
else:
_lo,_hi = 3,10
_rtn.append( B("b",3,10,wild=True) )
# Defect Removers:
for _dr in ["aa",
"etat",
"pr" ]:
if _dr in o.bounds:
_lo,_hi = o.bounds[_dr]
_rtn.append( dr(_dr,_lo,_hi) )
else:
_rtn.append( dr(_dr) )
# Scale Factors:
for _sf in ["prec", "flex",
"resl", "team",
"pmat" ]:
if _sf in o.bounds:
_lo,_hi = o.bounds[_sf]
_rtn.append( sf(_sf,_lo,_hi) )
else:
_rtn.append( sf(_sf) )
# Effort Multipliers, Positive Slope
for _emp, _rng in [ ( "rely", (1,5) ),
( "Data", (2,5) ),
( "cplx", (1,6) ),
( "ruse", (2,6) ),
( "docu", (1,5) ),
( "time", (3,6) ),
( "stor", (3,6) ),
( "pvol", (2,5) )]:
if _emp in o.bounds:
_lo,_hi = o.bounds[_emp]
else:
_lo,_hi = _rng
_rtn.append( emp(_emp,_lo,_hi) )
# Effort Multipliers, Negative Slope
for _emn in ["acap", "pcap",
"pcon", "aexp",
"plex", "ltex",
"tool", "site",
"sced" ]:
if _emn in o.bounds:
_lo,_hi = o.bounds[_emn]
else:
if _emn == "site":
_hi = 6 # Special case
else:
_hi = 5 # (Default)
_lo = 1
_rtn.append( emn(_emn,_lo,_hi) )
return _rtn
def effort_calc(o, x,
a=-1, b=-1, exp=-1,
sum=-1, prod=-1):
if a == -1: a = x["b"]
if b == -1: b = o.all["b"].y(a)
if sum == -1: sum = o.sumSfs(x)
if exp == -1: exp = b + 0.01 * sum
if prod == -1: prod = o.prodEms(x)
return a*x["kloc"]**exp*prod
def month_calc(o, x, effort,
sum=-1, prod=-1):
if sum == -1: sum = o.sumSfs(x)
if prod == -1: prod = o.prodEms(x)
_c = 3.67
_d = 0.28
_sced = int(x["sced"])
_scedPercent = 0
if (_sced == 1):
_scedPercent = 75
elif (_sced == 2):
_scedPercent = 85
elif (_sced == 3):
_scedPercent = 100
elif (_sced == 4):
_scedPercent = 130
elif (_sced == 5):
_scedPercent = 160
_pmNs = (effort /
float(o.all["sced"].y(x["sced"])))
_elessb = 0.01 * sum
_f = _d + (0.2 * _elessb)
return _c * pow(_pmNs,_f) * (_scedPercent/100.0)
def defect_calc(o, x):
return (o.defects("requirements", x) +
o.defects("design", x) +
o.defects("code", x))
def defects(o, dtype, x):
_ksloc = float(x["kloc"])
_introduced = 0
import time
time.sleep(0.01)
if (dtype == "requirements"):
_introduced = (10 * _ksloc *
o.defectsIntroduced(dtype,x))
elif (dtype == "design"):
_introduced = (20 * _ksloc *
o.defectsIntroduced(dtype,x))
elif (dtype == "code"):
_introduced = (30 * _ksloc *
o.defectsIntroduced(dtype,x))
_percentRemoved = o.defectsRemovedRatio(dtype,x)
return _introduced * _percentRemoved
def defectsRemovedRatio(o, dtype,x):
_product = 1
for _key in o.defectRemovers:
if _key in x:
if (dtype == "requirements"):
_product *= (1 - float(
o.all[
_key
].calibs.defectRemovalReqs.y(x[_key])
))
elif (dtype == "design"):
_product *= (1 - float(
o.all[
_key
].calibs.defectRemovalDesign.y(x[_key])
))
elif (dtype == "code"):
_product *= (1 - float(
o.all[
_key
].calibs.defectRemovalCoding.y(x[_key])
))
return _product
def totalDefectsIntroduced(o,x):
_ksloc = x["kloc"]
return (10 * _ksloc *
o.defectsIntroduced(
"requirements", x) +
20 * _ksloc *
o.defectsIntroduced(
"design", x) +
30 * _ksloc *
o.defectsIntroduced(
"code", x))
def defectsIntroduced(o, dtype, x):
_product = 1
for _key in o.scaleFactors:
if _key in x:
if (dtype == "requirements"):
_product *= float(
o.all[
_key
].calibs.defectIntroReqs.y(x[_key])
)
elif (dtype == "design"):
_product *= float(
o.all[
_key
].calibs.defectIntroDesign.y(x[_key])
)
elif (dtype == "code"):
_product *= float(
o.all[
_key
].calibs.defectIntroCoding.y(x[_key])
)
else:
print ("Err: " + _key +
" not defined in source input")
for _key in o.effortMultipliers:
if _key in x:
if (dtype == "requirements"):
_product *= float(
o.all[
_key
].calibs.defectIntroReqs.y(x[_key])
)
elif (dtype == "design"):
_product *= float(
o.all[
_key
].calibs.defectIntroDesign.y(x[_key])
)
elif (dtype == "code"):
_product *= float(
o.all[
_key
].calibs.defectIntroCoding.y(x[_key])
)
else:
print ("Err: " + _key +
" not defined in source input")
return _product
def risk_calc(o, x):
rt = {}
readRisks(rt)
return totalRisk(x, rt)
class Calibrations():
"""CoQualMo calibration settings for
a given CoCoMo attribute"""
def __init__(o,txt):
# Requirements:
if txt in DefectIntroReqsPos:
o.defectIntroReqs = Calib('Intro',
'Reqs',
1 )
elif txt in DefectIntroReqsNeg:
o.defectIntroReqs = Calib('Intro',
'Reqs',
-1 )
elif txt in DefectIntroReqsIgnore:
o.defectIntroReqs = Calib('Intro',
'Reqs',
0 )
else: o.defectIntroReqs = None
# Design:
if txt in DefectIntroDesignPos:
o.defectIntroDesign = Calib('Intro',
'Design',
1 )
elif txt in DefectIntroDesignNeg:
o.defectIntroDesign = Calib('Intro',
'Design',
-1 )
elif txt in DefectIntroDesignIgnore:
o.defectIntroDesign = Calib('Intro',
'Design',
0 )
else: o.defectIntroDesign = None
# Coding:
if txt in DefectIntroCodingPos:
o.defectIntroCoding = Calib('Intro',
'Coding',
1 )
elif txt in DefectIntroCodingNeg:
o.defectIntroCoding = Calib('Intro',
'Coding',
-1 )
elif txt in DefectIntroCodingIgnore:
o.defectIntroCoding = Calib('Intro',
'Coding',
0 )
else: o.defectIntroCoding = None
# Removal:
if txt in DefectRemovers:
o.defectRemovalReqs = Calib('Removal',
'Reqs',
0)
o.defectRemovalDesign = Calib('Removal',
'Design',
0)
o.defectRemovalCoding = Calib('Removal',
'Coding',
0)
class Calib():
"""CoQualMo calibration Data generator"""
def __init__(o, phase, category, sign):
o.phase = phase # Intro/Removal
o.category = category # Reqs/Dsgn/Code
o.sign = sign # Slope Pos/Neg
o.mv = 0
o.mv = o.m(reset=True)
def y(o, x, reset=False):
if o.phase == 'Intro':
return o.m(reset)*(x-3)+1
elif o.phase == 'Removal':
return o.m(reset)*(x-1)
def m(o, reset=False):
if reset:
if o.phase == 'Intro':
if o.category == 'Reqs':
if o.sign > 0:
o.mv = random.uniform(0.0166,.38)
elif o.sign < 0:
o.mv = random.uniform(-0.215,-0.035)
elif o.category == 'Design':
if o.sign > 0:
o.mv = random.uniform(0.0066,0.145)
elif o.sign < 0:
o.mv = random.uniform(-0.325,-0.05)
elif o.category == 'Coding':
if o.sign > 0:
o.mv = random.uniform(0.0066,0.145)
elif o.sign < 0:
o.mv = random.uniform(-0.29,-0.05)
elif o.phase == 'Removal':
if o.category == 'Reqs':
o.mv = random.uniform(0.0,0.14)
elif o.category == 'Design':
o.mv = random.uniform(0.0,0.156)
elif o.category == 'Coding':
o.mv = random.uniform(0.1,0.176)
return o.mv
class Sf(Range):
"""Scale Factor"""
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.mv = random.uniform(-0.972,-0.648)
o.calibs = Calibrations(o.txt)
def y(o,x,reset=False): return o.m(reset)*(x - 6)
def m(o,reset=False):
if reset:
o.mv = random.uniform(-0.972,-0.648)
return o.mv
class Dr(Range):
"""Defect Remover"""
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.calibs = Calibrations(o.txt)
def y(o,x,reset=False):
pass
class Em(Range):
"""Effort Multiplier"""
def y(o,x,reset=False):
return o.m(reset)*(x-3)+1
class Emp(Em):
"""Effort Multiplier, Positive slope"""
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.mv = random.uniform(0.055,0.15)
o.calibs = Calibrations(o.txt)
def m(o,reset=False):
if reset:
o.mv = random.uniform(0.055,0.15)
return o.mv
class Emn(Em):
"""Effort Multiplier, Negative slope"""
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.mv = random.uniform(-0.166,-0.075)
o.calibs = Calibrations(o.txt)
def m(o,reset=False):
if reset:
o.mv = random.uniform(-0.166,-0.075)
return o.mv
class B(Range):
def __init__(o, *args, **kwargs):
super(o.__class__,o).__init__(*args,**kwargs)
o.rval = random.random()
def y(o,x,reset=False):
if reset:
o.rval = random.random()
return -0.036 * x + 1.1 - 0.1*o.rval - 0.05
# import os
# import sys
# def coced0(output=os.environ["HOME"]+"/tmp",
# Data = "./Data",
# model=None):
# if not model:
# if len(sys.argv) > 1:
# model = sys.argv[1]
# else:
# model="flight"
# _c = Cocomo(Data + '/' + model)
# _c.xys(verbose=False)
# out = output + "/" + model + ".csv"
# _c.trials(out=out,verbose=False)
# sys.stderr.write("# see" + out + "\n")
#coced0()
def coced1(max=1000):
import matplotlib.pyplot as plt
random.seed(1)
c = Cocomo()
n = 0
out= sorted([c.xy() for x in range(max)],
key=lambda x: x[1])
xs=[]
ys=[]
for x,y in out:
n += 1
xs.append(n)
ys.append(y)
p1, = plt.plot(xs,ys,'ro')
p2, = plt.plot(xs,[x*2 for x in ys],'bo')
plt.legend([p2,p1],["small","bigger"],loc=4)
plt.xlim(0,1050)
plt.yscale('log')
plt.ylabel('effort')
plt.xlabel('all efforts, sorted')
plt.show()
#plt.savefig('coced1.png')
#coced1()
def coced1b(max=1000):
import matplotlib.pyplot as plt
random.seed(1)
c = Cocomo()
n = 0
out = sorted([c.xy() for x in range(max)],
key = lambda x: x[1])
xs = []
y1s = []
y2s = []
y3s = []
for x,y1,y2,y3 in out:
n += 1
xs.append(n)
y1s.append(y1)
y2s.append(y2)
y3s.append(y3)
def coced2(max=1000,rounds=10):
#random.seed(1)
c = Cocomo()
coced2a(rounds,c,max)
def coced2a(r,c,max,updates={}):
def h100(x,r=250) : return int(x/r) * r
if r > 0:
for k in updates:
c.all[k].sample = updates[k]
out = [c.xy() for x in range(max)]
efforts = Rsteps("effort[%s]" % r,final=h100)
for _,effort in out:
efforts.all.append(effort)
somed0(efforts,n=max)
better = elite(out)
#for k,v in better.items():
# print "\n",k
#somed0(v,n=max)
coced2a(r-1,c,max,better)
def coced3(max=1000,rounds=20):
random.seed(1)
c = Cocomo()
import matplotlib.pyplot as plt
#plt.yscale('log')
plt.ylabel('effort')
plt.xlabel('all efforts, sorted')
styles=["r-","m-","c-","y-","k-","b-","g-"]
plots=[]
legends=[]
coced3a(0,len(styles)-1,c,max,plt,styles,plots=plots,legends=legends)
plt.legend(plots,legends,loc=2)
plt.xlim(0,1050)
plt.show()
def coced3a(round,rounds,c,max,plt,styles,updates={},plots=[],legends=[]):
def h100(x,r=250) : return int(x/r) * r
if round <= rounds:
for k in updates:
c.all[k].sample = updates[k]
out = [c.xy() for x in range(max)]
better = elite(out)
plot = plt.plot([x for x in range(1000)],
sorted([effort for _,effort in out]),
styles[round],linewidth=round+1)
plots.append(plot)
legends.append("round%s" % round)
coced3a(round+1,rounds,c,max,plt,styles,updates=better,
plots=plots,legends=legends)
def coced4(samples=1000,rounds=15):
#random.seed(1)
c = Cocomo()
import matplotlib.pyplot as plt
#plt.yscale('log')
xs = []
medians=[]
spreads=[]
mosts=[]
coced4a(0,rounds,c,samples,{},xs,medians,spreads,mosts)
plt.ylabel('effort')
plt.xlabel('round')
plt.legend([plt.plot(xs,medians),plt.plot(xs,spreads)],
["median","spread"],
loc=1)
plt.xlim(-0.5,len(medians)+0.5)
plt.ylim(0,1.05*max(medians + spreads + mosts))
plt.show()
def coced4a(round,rounds,c,samples,updates={},xs=[],medians=[],spreads=[],mosts=[]):
if round <= rounds:
print round
for k in updates:
if not c.all[k].wild:
c.all[k].sample = updates[k]
somed0(c.all[k].sample,n=100)
out = [c.xy() for x in range(samples)]
better = elite(out)
ys = sorted([x for _,x in out])
p25,p50,p75= [int(len(ys)*n) for n in [0.25,0.5,0.75]]
medians.append(ys[p50])
spreads.append(ys[p75] - ys[p25])
xs.append(round)
coced4a(round+1,rounds,c,samples,updates=better,
xs=xs,medians=medians,spreads=spreads,mosts=mosts)
def elite(xy,bins=7,top=0.2,final=float,key=lambda x:x[1]):
def r(x) : return "%3.2f" % x
def keep(lst):
keeper = {}
for how,_ in lst:
if not keeper:
for k in how:
keeper[k] = Rsteps(k,bins,final)
for k,v in how.items():
keeper[k].put(v)
return keeper
n = int(top*len(xy))
xy = sorted(xy,key=key)
bests = keep(xy[:n])
rests = keep(xy[n:])
for k,v in bests.items():
print k, bests[k] - rests[k]
return bests
#coced4()
| mit |
moutai/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 18 | 14289 |
import numpy as np
from scipy import sparse
from sklearn.base import clone
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
def check_indicator(X, expected_imputed_features, axis):
n_samples, n_features = X.shape
imputer = Imputer(missing_values=-1, strategy='mean', axis=axis)
imputer_with_in = clone(imputer).set_params(add_indicator_features=True)
Xt = imputer.fit_transform(X)
Xt_with_in = imputer_with_in.fit_transform(X)
imputed_features_mask = X[:, expected_imputed_features] == -1
n_features_new = Xt.shape[1]
n_imputed_features = len(imputer_with_in.imputed_features_)
assert_array_equal(imputer.imputed_features_, expected_imputed_features)
assert_array_equal(imputer_with_in.imputed_features_,
expected_imputed_features)
assert_equal(Xt_with_in.shape,
(n_samples, n_features_new + n_imputed_features))
assert_array_equal(Xt_with_in, np.hstack((Xt, imputed_features_mask)))
imputer_with_in = clone(imputer).set_params(add_indicator_features=True)
assert_array_equal(Xt_with_in,
imputer_with_in.fit_transform(sparse.csc_matrix(X)).A)
assert_array_equal(Xt_with_in,
imputer_with_in.fit_transform(sparse.csr_matrix(X)).A)
def test_indicator_features():
# one feature with all missng values
X = np.array([
[-1, -1, 2, 3],
[4, -1, 6, -1],
[8, -1, 10, 11],
[12, -1, -1, 15],
[16, -1, 18, 19]
])
check_indicator(X, np.array([0, 2, 3]), axis=0)
check_indicator(X, np.array([0, 1, 2, 3]), axis=1)
# one feature with all missing values and one with no missing value
# when axis=0 the feature gets discarded
X = np.array([
[-1, -1, 1, 3],
[4, -1, 0, -1],
[8, -1, 1, 0],
[0, -1, 0, 15],
[16, -1, 1, 19]
])
check_indicator(X, np.array([0, 3]), axis=0)
check_indicator(X, np.array([0, 1, 3]), axis=1)
| bsd-3-clause |
mcdeaton13/dynamic | Python/dynamic/UNUSED_CODE/income_nopoly.py | 2 | 5952 | # We don't use this code any longer. We're just saving it in case we decide to use this
# method to calibrate ability instead of the polynomials (which is unlikely). The polynomials
# are only for specific bin_weights, this is general to any bin_weights. But the data is not
# as good for the top percentile. Once income_polynomials is generalized, we can delete this.
'''
------------------------------------------------------------------------
Last updated 2/16/2015
Functions for created the matrix of ability levels, e.
This py-file calls the following other file(s):
data/e_vec_data/cwhs_earn_rate_age_profile.csv
This py-file creates the following other file(s):
(make sure that an OUTPUT folder exists)
OUTPUT/Demographics/ability_log
------------------------------------------------------------------------
'''
'''
------------------------------------------------------------------------
Packages
------------------------------------------------------------------------
'''
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy.polynomial.polynomial as poly
import scipy.optimize as opt
'''
------------------------------------------------------------------------
Read Data for Ability Types
------------------------------------------------------------------------
The data comes from the IRS. We can either use wage or earnings data,
in this version we are using earnings data. The data is for individuals
in centile groups for each age from 20 to 70.
------------------------------------------------------------------------
'''
earn_rate = pd.read_table(
"data/e_vec_data/cwhs_earn_rate_age_profile.csv", sep=',', header=0)
del earn_rate['obs_earn']
piv = earn_rate.pivot(index='age', columns='q_earn', values='mean_earn_rate')
emat_basic = np.array(piv)
'''
------------------------------------------------------------------------
Generate ability type matrix
------------------------------------------------------------------------
Given desired starting and stopping ages, as well as the values for S
and J, the ability matrix is created.
------------------------------------------------------------------------
'''
def fit_exp_right(params, pt1):
a, b = params
x1, y1, slope = pt1
error1 = -a*b**(-x1)*np.log(b) - slope
error2 = a*b**(-x1) - y1
return [error1, error2]
def exp_funct(points, a, b):
y = a*b**(-points)
return y
def exp_fit(e_input, S, J):
params_guess = [20, 1]
e_output = np.zeros((S, J))
e_output[:50, :] = e_input
for j in xrange(J):
meanslope = np.mean([e_input[-1, j]-e_input[-2, j], e_input[
-2, j]-e_input[-3, j], e_input[-3, j]-e_input[-4, j]])
slope = np.min([meanslope, -.01])
a, b = opt.fsolve(fit_exp_right, params_guess, args=(
[70, e_input[-1, j], slope]))
e_output[50:, j] = exp_funct(np.linspace(70, 100, 30), a, b)
return e_output
def graph_income(S, J, e, starting_age, ending_age, bin_weights):
'''
Graphs the log of the ability matrix.
'''
e_tograph = np.log(e)
domain = np.linspace(starting_age, ending_age, S)
Jgrid = np.zeros(J)
for j in xrange(J):
Jgrid[j:] += bin_weights[j]
X, Y = np.meshgrid(domain, Jgrid)
cmap2 = matplotlib.cm.get_cmap('summer')
if J == 1:
plt.figure()
plt.plot(domain, e_tograph)
plt.savefig('OUTPUT/Demographics/ability_log')
else:
fig10 = plt.figure()
ax10 = fig10.gca(projection='3d')
ax10.plot_surface(X, Y, e_tograph.T, rstride=1, cstride=2, cmap=cmap2)
ax10.set_xlabel(r'age-$s$')
ax10.set_ylabel(r'ability type -$j$')
ax10.set_zlabel(r'log ability $log(e_j(s))$')
plt.savefig('OUTPUT/Demographics/ability_log')
if J == 1:
plt.figure()
plt.plot(domain, e)
plt.savefig('OUTPUT/Demographics/ability')
else:
fig10 = plt.figure()
ax10 = fig10.gca(projection='3d')
ax10.plot_surface(X, Y, e.T, rstride=1, cstride=2, cmap=cmap2)
ax10.set_xlabel(r'age-$s$')
ax10.set_ylabel(r'ability type -$j$')
ax10.set_zlabel(r'ability $e_j(s)$')
plt.savefig('OUTPUT/Demographics/ability')
def get_e(S, J, starting_age, ending_age, bin_weights, omega_SS):
'''
Parameters: S - Number of age cohorts
J - Number of ability levels by age
starting_age - age of first age cohort
ending_age - age of last age cohort
bin_weights - what fraction of each age is in each
abiility type
Returns: e - S x J matrix of ability levels for each
age cohort, normalized so
the weighted sum is one
'''
emat_trunc = emat_basic[:50, :]
cum_bins = 100 * np.array(bin_weights)
for j in xrange(J-1):
cum_bins[j+1] += cum_bins[j]
emat_collapsed = np.zeros((50, J))
for s in xrange(50):
for j in xrange(J):
if j == 0:
emat_collapsed[s, j] = emat_trunc[s, :cum_bins[j]].mean()
else:
emat_collapsed[s, j] = emat_trunc[
s, cum_bins[j-1]:cum_bins[j]].mean()
e_fitted = np.zeros((50, J))
for j in xrange(J):
func = poly.polyfit(
np.arange(50)+starting_age, emat_collapsed[:50, j], deg=2)
e_fitted[:, j] = poly.polyval(np.arange(50)+starting_age, func)
emat_extended = exp_fit(e_fitted, S, J)
for j in xrange(1, J):
emat_extended[:, j] = np.max(np.array(
[emat_extended[:, j], emat_extended[:, j-1]]), axis=0)
graph_income(S, J, emat_extended, starting_age, ending_age, bin_weights)
emat_normed = emat_extended/(omega_SS * bin_weights * emat_extended).sum()
return emat_normed
| mit |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/datasets/twenty_newsgroups.py | 35 | 13626 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from .base import _pkl_filepath
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = _pkl_filepath(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = _pkl_filepath(data_home, filebase + ".pkl")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| mit |
LevinJ/Supply-demand-forecasting | exploredata/weather.py | 1 | 4102 | from utility.datafilepath import g_singletonDataFilePath
from timeslot import singletonTimeslot
import pandas as pd
import os
from exploredata import ExploreData
from time import time
from utility.dumpload import DumpLoad
from scipy.stats import mode
class ExploreWeather(ExploreData ):
def __init__(self):
return
def run(self):
self.__unittest()
data_dir = g_singletonDataFilePath.getTest2Dir()
# self.save_all_csv( data_dir+ 'weather_data/')
# self.combine_all_csv(data_dir + 'weather_data/temp/', 'weather_', 'weather.csv')
self.get_weather_dict(data_dir)
return
def __unittest(self):
# self.combine_all_csv(g_singletonDataFilePath.getTrainDir() + 'weather_data/temp/', 'weather_', 'weather.csv')
# self.save_one_csv(g_singletonDataFilePath.getTrainDir() + 'weather_data/weather_data_2016-01-02')
# weatherdf = self.load_weatherdf(g_singletonDataFilePath.getTrainDir())
weather_dict = self.get_weather_dict(g_singletonDataFilePath.getTrainDir())
assert 2== self.find_prev_weather_mode('2016-01-01-1', weather_dict = weather_dict)[0]
assert 2== self.find_prev_weather_mode('2016-01-21-144', weather_dict = weather_dict)[0]
#
assert 2== self.find_prev_weather_mode('2016-01-21-115', weather_dict = weather_dict)[0]
assert 2== self.find_prev_weather_mode('2016-01-21-114', weather_dict = weather_dict)[0]
print 'passed unit test'
return
def get_weather_dict(self,data_dir):
t0 = time()
filename = '../data_raw/' + data_dir.split('/')[-2] + '_weather.csv.dict.pickle'
dumpload = DumpLoad( filename)
if dumpload.isExisiting():
return dumpload.load()
resDict = {}
df = self.load_weatherdf(data_dir)
for index, row in df.iterrows():
resDict[row['time_slotid']] = (index, row['weather'], row['temparature'], row['pm25'])
for name, group in df.groupby('time_date'):
resDict[name] = (-1, mode(group['weather'])[0][0], mode(group['temparature'])[0][0], mode(group['pm25'])[0][0])
dumpload.dump(resDict)
print "dump weather dict:", round(time()-t0, 3), "s"
return resDict
def process_all_df(self, df):
self.add_timeid_col(df)
self.add_timedate_col(df)
self.sort_by_time(df)
return
def get_intial_colnames(self):
return ['Time','weather','temparature', 'pm25']
def load_weatherdf(self, dataDir):
filename = dataDir + 'weather_data/temp/weather.csv'
return pd.read_csv(filename, index_col= 0)
def is_first_record(self, weather_dict, time_slotid):
try:
res = weather_dict[time_slotid]
if (res[0] == 0):
return True
except:
pass
return False
def find_prev_weather(self, time_slotid, weather_dict=None,):
if self.is_first_record(weather_dict, time_slotid):
return pd.Series([0], index = ['preweather'])
current_slot = time_slotid
while(True):
res = singletonTimeslot.getPrevSlots(current_slot, 1)
current_slot = res[0]
try:
res = weather_dict[current_slot]
return pd.Series([res[1]], index = ['preweather'])
except:
pass
return
def find_prev_weather_mode(self, time_slotid, weather_dict=None,):
try:
prev_slot = singletonTimeslot.getPrevSlots(time_slotid, 1)[0]
res = weather_dict[prev_slot]
except:
current_date = singletonTimeslot.getDate(time_slotid)
res = weather_dict[current_date]
return pd.Series([res[1]], index = ['preweather'])
def process_one_df(self, df):
#Remove duplicate time_slotid, retain the ealier ones
df.drop_duplicates(subset='time_slotid', keep='first', inplace=True)
return
if __name__ == "__main__":
obj= ExploreWeather()
obj.run() | mit |
pyReef-model/pyReefCore | pyReefCore/forcing/enviForce.py | 1 | 20710 | ##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
## ##
## This file forms part of the pyReefCore synthetic coral reef core model app. ##
## ##
## For full license and copyright information, please refer to the LICENSE.md file ##
## located at the project root, or contact the authors. ##
## ##
##~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~#~##
"""
This module defines several functions used to force pyReefCore simulation with external
processes related to sediment input, flow velocity and sea level.
"""
import warnings
import os
import numpy
import pandas
import skfuzzy as fuzz
from scipy import interpolate
from scipy.optimize import curve_fit
from scipy.optimize import OptimizeWarning
class enviForce:
"""
This class defines external forcing parameters.
"""
def __init__(self, input):
"""
Constructor.
Parameters
----------
class: input
Input parameter class.
"""
self.sea0 = input.seaval
self.seafile = input.seafile
self.sealevel = None
self.seatime = None
self.seaFunc = None
self.tempfile = input.tempfile
self.templevel = None
self.temptime = None
self.tempFunc = None
self.pHfile = input.pHfile
self.pHlevel = None
self.pHtime = None
self.pHFunc = None
self.nufile = input.nufile
self.nulevel = None
self.nutime = None
self.nuFunc = None
self.tec0 = input.tecval
self.tecfile = input.tecfile
self.tecrate = None
self.tectime = None
self.tecFunc = None
self.sed0 = input.sedval
self.sedfile = input.sedfile
self.sedlevel = None
self.sedtime = None
self.sedFunc = None
self.sedopt = None
self.sedlin = None
self.sedfct = False
self.plotsedx = None
self.plotsedy = None
self.flow0 = input.flowval
self.flowfile = input.flowfile
self.flowlevel = None
self.flowtime = None
self.flowFunc = None
self.flowopt = None
self.flowlin = None
self.flowfct = False
self.plotflowx = None
self.plotflowy = None
if self.seafile is not None:
self._build_Sea_function()
if self.tecfile is not None:
self._build_Tec_function()
if self.sedfile is not None:
self._build_Sed_function()
if self.flowfile is not None:
self._build_Flow_function()
if self.tempfile is not None:
self._build_Temp_function()
if self.pHfile is not None:
self._build_pH_function()
if self.nufile is not None:
self._build_nu_function()
if input.flowfunc is not None:
self.flowfct = True
if input.flowdecay is not None:
yf = input.flowdecay[0,:]
xf = input.flowdecay[1,:]
self.xflow = xf
self.yflow = yf
warnings.filterwarnings('ignore', category=OptimizeWarning)
popt, pcov = curve_fit(self._expdecay_func, xf, yf)
self.flowopt = popt
self.plotflowx = numpy.linspace(0., xf.max(), 100)
self.plotflowy = self._expdecay_func(self.plotflowx, *popt)
self.plotflowy[self.plotflowy<0]=0.
else:
self.flowlin = [input.flowlina,input.flowlinb]
self.plotflowx = numpy.linspace(0, input.flowdepth, 100)
self.plotflowy = self.flowlin[0]*self.plotflowx+self.flowlin[1] #(self.plotflowx-self.flowlin[1])/self.flowlin[0]
self.plotflowy[self.plotflowy<0]=0.
if input.sedfunc is not None:
self.sedfct = True
if input.seddecay is not None:
y = input.seddecay[0,:]
x = input.seddecay[1,:]
warnings.filterwarnings('ignore', category=OptimizeWarning)
popt, pcov = curve_fit(self._expdecay_func, x, y)
self.sedopt = popt
self.plotsedx = numpy.linspace(0, x.max(), 100)
self.plotsedy = self._expdecay_func(self.plotsedx, *popt)
self.plotsedy[self.plotsedy<0]=0.
else:
self.sedlin = [input.sedlina,input.sedlinb]
self.plotsedx = numpy.linspace(0, input.seddepth, 100)
self.plotsedy = input.sedlina*self.plotsedx+input.sedlinb #(self.plotsedx-self.sedlin[1])/self.sedlin[0]
self.plotsedy[self.plotsedy<0]=0.
# Shape functions
self.edepth = None
self.xd = None
self.dtrap = []
if input.seaOn and input.enviDepth is None:
input.seaOn = False
if input.seaOn:
self.edepth = input.enviDepth
# Trapeizoidal environment depth production curve
self.xd = numpy.linspace(0, self.edepth.max(), num=1001, endpoint=True)
for s in range(input.speciesNb):
self.dtrap.append(fuzz.trapmf(self.xd, self.edepth[s,:]))
self.speciesNb = input.speciesNb
self.eflow = None
self.xf = None
self.ftrap = []
if input.flowOn and input.enviFlow is None:
input.flowOn = False
if input.flowOn:
self.eflow = input.enviFlow
# Trapeizoidal environment flow production curve
self.xf = numpy.linspace(0, self.eflow.max(), num=1001, endpoint=True)
for s in range(input.speciesNb):
self.ftrap.append(fuzz.trapmf(self.xf, self.eflow[s,:]))
self.esed = None
self.xs = None
self.strap = []
if input.sedOn and input.enviSed is None:
input.sedOn = False
if input.sedOn:
self.esed = input.enviSed
# Trapeizoidal environment sediment production curve
self.xs = numpy.linspace(0, self.esed.max(), num=1001, endpoint=True)
for s in range(input.speciesNb):
self.strap.append(fuzz.trapmf(self.xs, self.esed[s,:]))
return
def _expdecay_func(self, x, a, b, c):
return a*numpy.exp(-b*x) + c
def _extract_enviParam(self, x, xmf, xx):
"""
Find the degree of membership ``u(xx)`` for a given value of ``x = xx``.
"""
# Nearest discrete x-values
x1 = x[x <= xx][-1]
x2 = x[x >= xx][0]
idx1 = numpy.nonzero(x == x1)[0][0]
idx2 = numpy.nonzero(x == x2)[0][0]
xmf1 = xmf[idx1]
xmf2 = xmf[idx2]
if x1 == x2:
xxmf = xmf[idx1]
else:
slope = (xmf2 - xmf1) / float(x2 - x1)
xxmf = slope * (xx - x1) + xmf1
return xxmf
def _build_Sea_function(self):
"""
Using Pandas library to read the sea level file and define sea level interpolation
function based on Scipy 1D cubic function.
"""
# Read sea level file
seadata = pandas.read_csv(self.seafile, sep=r'\s+', engine='c',
header=None, na_filter=False,
dtype=numpy.float, low_memory=False)
self.seatime = seadata.values[:,0]
tmp = seadata.values[:,1]
self.seaFunc = interpolate.interp1d(self.seatime, tmp, kind='linear')
return
def _build_Temp_function(self):
"""
Using Pandas library to read the temperature file and define temperature interpolation
function based on Scipy 1D cubic function.
"""
# Read temperature file
tempdata = pandas.read_csv(self.tempfile, sep=r'\s+', engine='c',
header=None, na_filter=False,
dtype=numpy.float, low_memory=False)
self.temptime = tempdata.values[:,0]
tmp = tempdata.values[:,1]
if tmp.max()>1.:
raise ValueError('Error the temperature function should have value between 0 and 1.')
if tmp.min()<0.:
raise ValueError('Error the temperature function should have value between 0 and 1.')
self.tempFunc = interpolate.interp1d(self.temptime, tmp, kind='linear')
return
def _build_pH_function(self):
"""
Using Pandas library to read the pH file and define pH interpolation
function based on Scipy 1D cubic function.
"""
# Read pH file
pHdata = pandas.read_csv(self.pHfile, sep=r'\s+', engine='c',
header=None, na_filter=False,
dtype=numpy.float, low_memory=False)
self.pHtime = pHdata.values[:,0]
tmp = pHdata.values[:,1]
if tmp.max()>1.:
raise ValueError('Error the pH function should have value between 0 and 1.')
if tmp.min()<0.:
raise ValueError('Error the pH function should have value between 0 and 1.')
self.pHFunc = interpolate.interp1d(self.pHtime, tmp, kind='linear')
return
def _build_nu_function(self):
"""
Using Pandas library to read the nutrients file and define nutrients interpolation
function based on Scipy 1D cubic function.
"""
# Read nutrients file
nudata = pandas.read_csv(self.nufile, sep=r'\s+', engine='c',
header=None, na_filter=False,
dtype=numpy.float, low_memory=False)
self.nutime = nudata.values[:,0]
tmp = nudata.values[:,1]
if tmp.max()>1.:
raise ValueError('Error the nutrient function should have value between 0 and 1.')
if tmp.min()<0.:
raise ValueError('Error the nutrient function should have value between 0 and 1.')
self.nuFunc = interpolate.interp1d(self.nutime, tmp, kind='linear')
return
def _build_Tec_function(self):
"""
Using Pandas library to read the tectonic file and define tectonic interpolation
function based on Scipy 1D cubic function.
"""
# Read tectonic file
tecdata = pandas.read_csv(self.tecfile, sep=r'\s+', engine='c',
header=None, na_filter=False,
dtype=numpy.float, low_memory=False)
self.tectime = tecdata.values[:,0]
tmp = tecdata.values[:,1]
self.tecFunc = interpolate.interp1d(self.tectime, tmp, kind='linear')
return
def _build_Sed_function(self):
"""
Using Pandas library to read the sediment input file and define interpolation
function based on Scipy 1D cubic function.
"""
# Read sea level file
seddata = pandas.read_csv(self.sedfile, sep=r'\s+', engine='c',
header=None, na_filter=False,
dtype=numpy.float, low_memory=False)
self.sedtime = seddata.values[:,0]
tmp = seddata.values[:,1]
self.sedFunc = interpolate.interp1d(self.sedtime, tmp, kind='linear')
return
def _build_Flow_function(self):
"""
Using Pandas library to read the flow velocity file and define interpolation
function based on Scipy 1D cubic function.
"""
# Read sea level file
flowdata = pandas.read_csv(self.flowfile, sep=r'\s+', engine='c',
header=None, na_filter=False,
dtype=numpy.float, low_memory=False)
self.flowtime = flowdata.values[:,0]
tmp = flowdata.values[:,1]
self.flowFunc = interpolate.interp1d(self.flowtime, tmp, kind='cubic')
return
def getSea(self, time, top):
"""
Computes for a given time the sea level according to input file parameters.
Parameters
----------
float : time
Requested time for which to compute sea level elevation.
float : top
Elevation of the core.
"""
oldsea = self.sealevel
if self.seafile is None:
self.sealevel = self.sea0
else:
if time < self.seatime.min():
time = self.seatime.min()
if time > self.seatime.max():
time = self.seatime.max()
self.sealevel = self.seaFunc(time)
if oldsea == None:
depth = top
else:
depth = top+(self.sealevel-oldsea)
factors = numpy.ones(self.speciesNb,dtype=float)
for s in range(self.speciesNb):
if depth<self.xd[0] and self.edepth[s,1] == self.edepth[s,0]:
factors[s] = 1.
elif depth<self.xd[0] and self.edepth[s,1] != self.edepth[s,0]:
factors[s] = 0.
elif depth>self.xd[-1] and self.edepth[s,2] == self.edepth[s,3]:
factors[s] = 1.
elif depth>self.xd[-1] and self.edepth[s,2] != self.edepth[s,3]:
factors[s] = 0.
else:
factors[s] = self._extract_enviParam( self.xd, self.dtrap[s], depth )
return depth,factors
def getTemp(self, time):
"""
Computes for a given time the temperature according to input file parameters.
Parameters
----------
float : time
Requested time for which to compute temperature.
"""
factors = numpy.ones(self.speciesNb,dtype=float)
if self.tempfile is None:
self.templevel = 1.
else:
if time < self.temptime.min():
time = self.temptime.min()
if time > self.temptime.max():
time = self.temptime.max()
self.templevel = self.tempFunc(time)
for s in range(self.speciesNb):
factors[s] = self.templevel
return factors
def getpH(self, time):
"""
Computes for a given time the pH according to input file parameters.
Parameters
----------
float : time
Requested time for which to compute pH.
"""
factors = numpy.ones(self.speciesNb,dtype=float)
if self.pHfile is None:
self.pHlevel = 1.
else:
if time < self.pHtime.min():
time = self.pHtime.min()
if time > self.pHtime.max():
time = self.pHtime.max()
self.pHlevel = self.pHFunc(time)
for s in range(self.speciesNb):
factors[s] = self.pHlevel
return factors
def getNu(self, time):
"""
Computes for a given time the nutrients according to input file parameters.
Parameters
----------
float : time
Requested time for which to compute nutrients.
"""
factors = numpy.ones(self.speciesNb,dtype=float)
if self.nufile is None:
self.nulevel = 1.
else:
if time < self.nutime.min():
time = self.nutime.min()
if time > self.nutime.max():
time = self.nutime.max()
self.nulevel = self.nuFunc(time)
for s in range(self.speciesNb):
factors[s] = self.nulevel
return factors
def getTec(self, time, otime, top):
"""
Computes for a given time the tectonic rate according to input file parameters.
Parameters
----------
float : time
Requested time for which to compute tectonic rate.
float : otime
Previous time used to compute tectonic rate.
float : top
Elevation of the core.
"""
if self.tecfile is None:
self.tecrate = self.tec0
else:
if time < self.tectime.min():
time = self.tectime.min()
if time > self.tectime.max():
time = self.tectime.max()
self.tecrate = self.tecFunc(time)
if otime == time:
depth = top
else:
depth = top-(self.tecrate*(time-otime))
factors = numpy.ones(self.speciesNb,dtype=float)
for s in range(self.speciesNb):
if depth<self.xd[0] and self.edepth[s,1] == self.edepth[s,0]:
factors[s] = 1.
elif depth<self.xd[0] and self.edepth[s,1] != self.edepth[s,0]:
factors[s] = 0.
elif depth>self.xd[-1] and self.edepth[s,2] == self.edepth[s,3]:
factors[s] = 1.
elif depth>self.xd[-1] and self.edepth[s,2] != self.edepth[s,3]:
factors[s] = 0.
else:
factors[s] = self._extract_enviParam( self.xd, self.dtrap[s], depth )
return depth,factors
def getSed(self, time, elev):
"""
Computes for a given time the sediment input according to input file parameters.
Parameters
----------
float : time
Requested time for which to compute sediment input.
float : elev
Elevation of the bed.
"""
if self.sedfct:
if self.plotsedx.max()<elev:
self.sedlevel = 0.
elif self.plotsedx.min()>elev:
self.sedlevel = 0.
elif self.sedlin is None:
self.sedlevel = self._expdecay_func(elev,*self.sedopt)
else:
self.sedlevel = self.sedlin[0]*elev+self.sedlin[1]
if self.sedlevel<0:
self.sedlevel = 0.
elif self.sedfile == None:
self.sedlevel = self.sed0
else:
if time < self.sedtime.min():
time = self.sedtime.min()
if time > self.sedtime.max():
time = self.sedtime.max()
self.sedlevel = self.sedFunc(time)
factors = numpy.ones(self.speciesNb,dtype=float)
for s in range(self.speciesNb):
if self.sedlevel<self.xs[0] and self.esed[s,1] == self.esed[s,0]:
factors[s] = 1.
elif self.sedlevel<self.xs[0] and self.esed[s,1] != self.esed[s,0]:
factors[s] = 0.
elif self.sedlevel>self.xs[-1] and self.esed[s,2] == self.esed[s,3]:
factors[s] = 1.
elif self.sedlevel>self.xs[-1] and self.esed[s,2] != self.esed[s,3]:
factors[s] = 0.
else:
factors[s] = self._extract_enviParam( self.xs, self.strap[s], self.sedlevel )
return self.sedlevel,factors
def getFlow(self, time, elev):
"""
Computes for a given time the flow velocity according to input file parameters.
Parameters
----------
float : time
Requested time for which to compute flow velocity value.
float : elev
Elevation of the bed.
"""
if self.flowfct:
if self.plotflowx.max()<elev:
self.flowlevel = 0.
elif self.plotflowx.min()>elev:
self.flowlevel = 0.
elif self.flowlin is None:
self.flowlevel = self._expdecay_func(elev,*self.flowopt)
else:
self.flowlevel = self.flowlin[0]*elev+self.flowlin[1]
if self.flowlevel<0.:
self.flowlevel = 0.
elif self.flowfile == None:
self.flowlevel = self.flow0
else:
if time < self.flowtime.min():
time = self.flowtime.min()
if time > self.flowtime.max():
time = self.flowtime.max()
self.flowlevel = self.flowFunc(time)
factors = numpy.ones(self.speciesNb,dtype=float)
for s in range(self.speciesNb):
if self.flowlevel<self.xf[0] and self.eflow[s,1] == self.eflow[s,0]:
factors[s] = 1.
elif self.flowlevel<self.xf[0] and self.eflow[s,1] != self.eflow[s,0]:
factors[s] = 0.
elif self.flowlevel>self.xf[-1] and self.eflow[s,2] == self.eflow[s,3]:
factors[s] = 1.
elif self.flowlevel>self.xf[-1] and self.eflow[s,2] != self.eflow[s,3]:
factors[s] = 0.
else:
factors[s] = self._extract_enviParam( self.xf, self.ftrap[s], self.flowlevel )
return factors
| gpl-3.0 |
james4424/nest-simulator | extras/ConnPlotter/ConnPlotter.py | 19 | 83508 | # -*- coding: utf-8 -*-
#
# ConnPlotter.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# ConnPlotter --- A Tool to Generate Connectivity Pattern Matrices
"""
ConnPlotter is a tool to create connectivity pattern tables.
For background on ConnPlotter, please see
Eilen Nordlie and Hans Ekkehard Plesser.
Connection Pattern Tables: A new way to visualize connectivity
in neuronal network models.
Frontiers in Neuroinformatics 3:39 (2010)
doi: 10.3389/neuro.11.039.2009
Example:
# code creating population and connection lists
from ConnPlotter import ConnectionPattern, SynType
# Case A: All connections have the same "synapse_model".
#
# Connections with weight < 0 are classified as excitatory,
# weight > 0 are classified as inhibitory.
# Each sender must make either excitatory or inhibitory connection,
# not both. When computing totals, excit/inhib connections are
# weighted with +-1.
pattern = ConnectionPattern(layerList, connList)
# Case B: All connections have the same "synapse_model", but violate Dale's law
#
# Connections with weight < 0 are classified as excitatory,
# weight > 0 are classified as inhibitory.
# A single sender may have excitatory and inhibitory connections.
# When computing totals, excit/inhib connections are
# weighted with +-1.
pattern = ConnectionPattern(layerList, connList,
synTypes=(((SynType('exc', 1.0, 'b'),
SynType('inh', -1.0, 'r')),)))
# Case C: Synapse models are "AMPA", "NMDA", "GABA_A", "GABA_B".
#
# Connections are plotted by synapse model, with AMPA and NMDA
# on the top row, GABA_A and GABA_B in the bottom row when
# combining by layer. Senders must either have AMPA and NMDA or
# GABA_A and GABA_B synapses, but not both. When computing totals,
# AMPA and NMDA connections are weighted with +1, GABA_A and GABA_B
# with -1.
pattern = ConnectionPattern(layerList, connList)
# Case D: Explicit synapse types.
#
# If your network model uses other synapse types, or you want to use
# other weighting factors when computing totals, or you want different
# colormaps, you must specify synapse type information explicitly for
# ALL synase models in your network. For each synapse model, you create
# a
#
# SynType(name, tweight, cmap)
#
# object, where "name" is the synapse model name, "tweight" the weight
# to be given to the type when computing totals (usually >0 for excit,
# <0 for inhib synapses), and "cmap" the "colormap": if may be a
# matplotlib.colors.Colormap instance or any valid matplotlib color
# specification; in the latter case, as colormap will be generated
# ranging from white to the given color.
# Synapse types are passed as a tuple of tuples. Synapses in a tuple form
# a group. ConnPlotter assumes that a sender may make synapses with all
# types in a single group, but never synapses with types from different
# groups (If you group by transmitter, this simply reflects Dale's law).
# When connections are aggregated by layer, each group is printed on one
# row.
pattern = ConnectionPattern(layerList, connList, synTypes = \
((SynType('Asyn', 1.0, 'orange'),
SynType('Bsyn', 2.5, 'r'),
SynType('Csyn', 0.5, (1.0, 0.5, 0.0))), # end first group
(SynType('Dsyn', -1.5, matplotlib.pylab.cm.jet),
SynType('Esyn', -3.2, '0.95'))))
# See documentation of class ConnectionPattern for more options.
# plotting the pattern
# show connection kernels for all sender-target pairs and all synapse models
pattern.plot()
# combine synapses of all types for each sender-target pair
# always used red-blue (inhib-excit) color scale
pattern.plot(aggrSyns=True)
# for each pair of sender-target layer pair, show sums for each synapse type
pattern.plot(aggrGroups=True)
# As mode layer, but combine synapse types.
# always used red-blue (inhib-excit) color scale
pattern.plot(aggrSyns=True, aggrGroups=True)
# Show only synases of the selected type(s)
pattern.plot(mode=('AMPA',))
pattern.plot(mode=('AMPA', 'GABA_A'))
# use same color scales for all patches
pattern.plot(globalColors=True)
# manually specify limits for global color scale
pattern.plot(globalColors=True, colorLimits=[0, 2.5])
# save to file(s)
# NB: do not write to PDF directly, this seems to cause artifacts
pattern.plot(file='net.png')
pattern.plot(file=('net.eps','net.png'))
# You can adjust some properties of the figure by changing the
# default values in plotParams.
# Experimentally, you can dump the connection pattern into a LaTeX table
pattern.toLaTeX('pattern.tex', standalone=True)
# Figure layout can be modified by changing the global variable plotParams.
# Please see the documentation for class PlotParams for details.
# Changes 30 June 2010:
# - Singular layers (extent 0x0) are ignored as target layers.
# The reason for this is so that single-generator "layers" can be
# displayed as input.
# Problems:
# - singularity is not made clear visually
# - This messes up the diagonal shading
# - makes no sense to aggregate any longer
"""
# ----------------------------------------------------------------------------
from . import colormaps as cm
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import warnings
__all__ = ['ConnectionPattern', 'SynType', 'plotParams', 'PlotParams']
# ----------------------------------------------------------------------------
# To do:
# - proper testsuite
# - layers of different sizes not handled properly
# (find biggest layer extent in each direction, then center;
# may run into problems with population label placement)
# - clean up main
# - color bars
# - "bad color" should be configurable
# - fix hack for colormaps import
# - use generators where possible (eg kernels?)
# ----------------------------------------------------------------------------
class SynType(object):
"""
Provide information about how synapse types should be rendered.
A singly nested list of SynType objects can be passed to the
ConnectionPattern constructor to specify layout and rendering info.
"""
def __init__(self, name, relweight, cmap):
"""
Arguments:
name Name of synapse type (string, must be unique)
relweight Relative weight of synapse type when aggregating
across synapse types. Should be negative for inhibitory
connections.
cmap Either a matplotlib.colors.Colormap instance or a
color specification. In the latter case, the colormap
will be built from white to the color given. Thus,
the color should be fully saturated. Colormaps should
have "set_bad(color='white')".
"""
self.name, self.relweight = name, relweight
if isinstance(cmap, mpl.colors.Colormap):
self.cmap = cmap
else:
self.cmap = cm.make_colormap(cmap)
# ----------------------------------------------------------------------------
class PlotParams(object):
"""
Collects parameters governing plotting.
Implemented using properties to ensure they are read-only.
"""
class Margins(object):
"""Width of outer margins, in mm."""
def __init__(self):
"""Set default values."""
self._left = 15.0
self._right = 10.0
self._top = 10.0
self._bottom = 10.0
self._colbar = 10.0
@property
def left(self):
return self._left
@left.setter
def left(self, l):
self._left = float(l)
@property
def right(self):
return self._right
@right.setter
def right(self, r):
self._right = float(r)
@property
def top(self):
return self._top
@top.setter
def top(self, t):
self._top = float(t)
@property
def bottom(self):
return self._bottom
@bottom.setter
def bottom(self, b):
self._bottom = float(b)
@property
def colbar(self):
return self._colbar
@colbar.setter
def colbar(self, b):
self._colbar = float(b)
def __init__(self):
"""Set default values"""
self._n_kern = 100
self._patch_size = 20.0 # 20 mm
self._layer_bg = {'super': '0.9', 'diag': '0.8', 'sub': '0.9'}
self._layer_font = mpl.font_manager.FontProperties(size='large')
self._layer_orient = {'sender': 'horizontal', 'target': 'horizontal'}
self._pop_font = mpl.font_manager.FontProperties(size='small')
self._pop_orient = {'sender': 'horizontal', 'target': 'horizontal'}
self._lgd_tick_font = mpl.font_manager.FontProperties(size='x-small')
self._lgd_title_font = mpl.font_manager.FontProperties(size='xx-small')
self._lgd_ticks = None
self._lgd_tick_fmt = None
self._lgd_location = None
self._cbwidth = None
self._cbspace = None
self._cbheight = None
self._cboffset = None
self._z_layer = 25
self._z_pop = 50
self._z_conn = 100
self.margins = self.Margins()
def reset(self):
"""
Reset to default values.
"""
self.__init__()
@property
def n_kern(self):
"""Sample long kernel dimension at N_kernel points."""
return self._n_kern
@n_kern.setter
def n_kern(self, n):
if n <= 0:
raise ValueError('n_kern > 0 required')
self._n_kern = n
@property
def patch_size(self):
"""Length of the longest edge of the largest patch, in mm."""
return self._patch_size
@patch_size.setter
def patch_size(self, sz):
if sz <= 0:
raise ValueError('patch_size > 0 required')
self._patch_size = sz
@property
def layer_bg(self):
"""
Dictionary of colors for layer background.
Entries "super", "diag", "sub". Each entry
can be set to any valid color specification.
If just a color is given, create dict by
brightening/dimming.
"""
return self._layer_bg
@layer_bg.setter
def layer_bg(self, bg):
if isinstance(bg, dict):
if set(bg.keys()) != set(('super', 'diag', 'sub')):
raise ValueError(
'Background dict must have keys "super", "diag", "sub"')
for bgc in bg.values():
if not mpl.colors.is_color_like(bgc):
raise ValueError('Entries in background dict must be ' +
'valid color specifications.')
self._layer_bg = bg
elif not mpl.colors.is_color_like(bg):
raise ValueError(
'layer_bg must be dict or valid color specification.')
else: # is color like
rgb = mpl.colors.colorConverter.to_rgb(bg)
self._layer_bg = {'super': [1.1 * c for c in rgb],
'diag': rgb,
'sub': [0.9 * c for c in rgb]}
@property
def layer_font(self):
"""
Font to use for layer labels.
Can be set to a matplotlib.font_manager.FontProperties instance.
"""
return self._layer_font
@layer_font.setter
def layer_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('layer_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._layer_font = font
@property
def layer_orientation(self):
"""
Orientation of layer labels.
Dictionary with orientation of sender and target labels. Orientation
is either 'horizontal', 'vertial', or a value in degrees. When set
to a single string or number, this value is used for both sender and
target labels.
"""
return self._layer_orient
@layer_orientation.setter
def layer_orientation(self, orient):
if isinstance(orient, (str, float, int)):
tmp = {'sender': orient, 'target': orient}
elif isinstance(orient, dict):
tmp = self._layer_orient
tmp.update(orient)
else:
raise ValueError(
'Orientation ust be set to dict, string or number.')
if len(tmp) > 2:
raise ValueError('Orientation dictionary can only contain keys ' +
'"sender" and "target".')
self._layer_orient = tmp
@property
def pop_font(self):
"""
Font to use for population labels.
Can be set to a matplotlib.font_manager.FontProperties instance.
"""
return self._pop_font
@pop_font.setter
def pop_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('pop_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._pop_font = font
@property
def pop_orientation(self):
"""
Orientation of population labels.
Dictionary with orientation of sender and target labels. Orientation
is either 'horizontal', 'vertial', or a value in degrees. When set
to a single string or number, this value is used for both sender and
target labels.
"""
return self._pop_orient
@pop_orientation.setter
def pop_orientation(self, orient):
if isinstance(orient, (str, float, int)):
tmp = {'sender': orient, 'target': orient}
elif isinstance(orient, dict):
tmp = self._pop_orient
tmp.update(orient)
else:
raise ValueError(
'Orientation ust be set to dict, string or number.')
if len(tmp) > 2:
raise ValueError('Orientation dictionary can only contain keys ' +
'"sender" and "target".')
self._pop_orient = tmp
@property
def legend_tick_font(self):
"""
FontProperties for legend (colorbar) ticks.
"""
return self._lgd_tick_font
@legend_tick_font.setter
def legend_tick_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('legend_tick_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._lgd_tick_font = font
@property
def legend_title_font(self):
"""
FontProperties for legend (colorbar) titles.
"""
return self._lgd_title_font
@legend_title_font.setter
def legend_title_font(self, font):
if not isinstance(font, mpl.font_manager.FontProperties):
raise ValueError('legend_title_font must be a ' +
'matplotlib.font_manager.FontProperties instance')
self._lgd_title_font = font
@property
def legend_ticks(self):
"""
Ordered list of values at which legend (colorbar) ticks shall be set.
"""
return self._lgd_ticks
@legend_ticks.setter
def legend_ticks(self, ticks):
self._lgd_ticks = ticks
@property
def legend_tick_format(self):
"""
C-style format string for legend (colorbar) tick marks.
"""
return self._lgd_tick_fmt
@legend_tick_format.setter
def legend_tick_format(self, tickfmt):
self._lgd_tick_fmt = tickfmt
@property
def legend_location(self):
"""
If set to 'top', place legend label above colorbar,
if None, to the left.
"""
return self._lgd_location
@legend_location.setter
def legend_location(self, loc):
self._lgd_location = loc
@property
def cbwidth(self):
"""
Width of single colorbar, relative to figure width.
"""
return self._cbwidth
@cbwidth.setter
def cbwidth(self, cbw):
self._cbwidth = cbw
@property
def cbheight(self):
"""
Height of colorbar, relative to margins.colbar
"""
return self._cbheight
@cbheight.setter
def cbheight(self, cbh):
self._cbheight = cbh
@property
def cbspace(self):
"""
Spacing between colorbars, relative to figure width.
"""
return self._cbspace
@cbspace.setter
def cbspace(self, cbs):
self._cbspace = cbs
@property
def cboffset(self):
"""
Left offset of colorbar, relative to figure width.
"""
return self._cboffset
@cboffset.setter
def cboffset(self, cbo):
self._cboffset = cbo
@property
def z_layer(self):
"""Z-value for layer label axes."""
return self._z_layer
@property
def z_pop(self):
"""Z-value for population label axes."""
return self._z_pop
@property
def z_conn(self):
"""Z-value for connection kernel axes."""
return self._z_conn
# ----------------------------------------------------------------------------
# plotting settings, default values
plotParams = PlotParams()
# ----------------------------------------------------------------------------
class ConnectionPattern(object):
"""
Connection pattern representation for plotting.
When a ConnectionPattern is instantiated, all connection kernels
are pre-computed. They can later be plotted in various forms by
calling the plot() method.
The constructor requires layer and connection lists:
ConnectionPattern(layerList, connList, synTypes, **kwargs)
The layerList is used to:
- determine the size of patches
- determine the block structure
All other information is taken from the connList. Information
about synapses is inferred from the connList.
The following keyword arguments can also be given:
poporder : Population order. A dictionary mapping population names
to numbers; populations will be sorted in diagram in order
of increasing numbers. Otherwise, they are sorted
alphabetically.
intensity: 'wp' - use weight * probability (default)
'p' - use probability alone
'tcd' - use total charge deposited * probability
requires mList and Vmem; per v 0.7 only supported
for ht_neuron.
mList : model list; required for 'tcd'
Vmem : membrane potential; required for 'tcd'
"""
# ------------------------------------------------------------------------
class _LayerProps(object):
"""
Information about layer.
"""
def __init__(self, name, extent):
"""
name : name of layer
extent: spatial extent of the layer
"""
self.name = name
self.ext = extent
self.singular = extent[0] == 0.0 and extent[1] == 0.0
# ------------------------------------------------------------------------
class _SynProps(object):
"""
Information on how to plot patches for a synapse type.
"""
def __init__(self, row, col, tweight, cmap, idx):
"""
row, col: Position of synapse in grid of synapse patches, from 0,0
tweight : weight used when adding kernels for different synapses
cmap : colormap for synapse type (matplotlib.colors.Colormap)
idx : linear index, used to order colorbars in figure
"""
self.r, self.c = row, col
self.tw = tweight
self.cmap = cmap
self.index = idx
# --------------------------------------------------------------------
class _PlotKern(object):
"""
Representing object ready for plotting.
"""
def __init__(self, sl, sn, tl, tn, syn, kern):
"""
sl : sender layer
sn : sender neuron/population
tl : target layer
tn : target neuron/population
syn : synapse model
kern: kernel values (numpy masked array)
All arguments but kern are strings.
"""
self.sl = sl
self.sn = sn
self.tl = tl
self.tn = tn
self.syn = syn
self.kern = kern
# ------------------------------------------------------------------------
class _Connection(object):
def __init__(self, conninfo, layers, synapses, intensity, tcd, Vmem):
"""
Arguments:
conninfo: list of connection info entries:
(sender,target,conn_dict)
layers : list of _LayerProps objects
synapses: list of _SynProps objects
intensity: 'wp', 'p', 'tcd'
tcd : tcd object
Vmem : reference membrane potential for tcd calculations
"""
self._intensity = intensity
# get source and target layer
self.slayer, self.tlayer = conninfo[:2]
lnames = [l.name for l in layers]
if self.slayer not in lnames:
raise Exception('Unknown source layer "%s".' % self.slayer)
if self.tlayer not in lnames:
raise Exception('Unknown target layer "%s".' % self.tlayer)
# if target layer is singular (extent==(0,0)),
# we do not create a full object
self.singular = False
for l in layers:
if l.name == self.tlayer and l.singular:
self.singular = True
return
# see if we connect to/from specific neuron types
cdict = conninfo[2]
if 'sources' in cdict:
if tuple(cdict['sources'].keys()) == ('model',):
self.snrn = cdict['sources']['model']
else:
raise ValueError(
'Can only handle sources in form {"model": ...}')
else:
self.snrn = None
if 'targets' in cdict:
if tuple(cdict['targets'].keys()) == ('model',):
self.tnrn = cdict['targets']['model']
else:
raise ValueError(
'Can only handle targets in form {"model": ...}')
else:
self.tnrn = None
# now get (mean) weight, we need this if we classify
# connections by sign of weight only
try:
self._mean_wght = _weighteval(cdict['weights'])
except:
raise ValueError('No or corrupt weight information.')
# synapse model
if sorted(synapses.keys()) == ['exc', 'inh']:
# implicit synapse type, we ignore value of
# 'synapse_model', it is for use by NEST only
if self._mean_wght >= 0:
self.synmodel = 'exc'
else:
self.synmodel = 'inh'
else:
try:
self.synmodel = cdict['synapse_model']
if self.synmodel not in synapses:
raise Exception('Unknown synapse model "%s".'
% self.synmodel)
except:
raise Exception('Explicit synapse model info required.')
# store information about connection
try:
self._mask = cdict['mask']
self._kern = cdict['kernel']
self._wght = cdict['weights']
# next line presumes only one layer name will match
self._textent = [tl.ext for tl in layers
if tl.name == self.tlayer][0]
if intensity == 'tcd':
self._tcd = tcd(self.synmodel, self.tnrn, Vmem)
else:
self._tcd = None
except:
raise Exception('Corrupt connection dictionary')
# prepare for lazy evaluation
self._kernel = None
# --------------------------------------------------------------------
@property
def keyval(self):
"""
Return key and _Connection as tuple.
Useful to create dictionary via list comprehension.
"""
if self.singular:
return (None, self)
else:
return ((self.slayer, self.snrn, self.tlayer,
self.tnrn, self.synmodel),
self)
# --------------------------------------------------------------------
@property
def kernval(self):
"""Kernel value, as masked array."""
if self._kernel is None:
self._kernel = _evalkernel(self._mask, self._kern,
self._mean_wght,
self._textent, self._intensity,
self._tcd)
return self._kernel
# --------------------------------------------------------------------
@property
def mask(self):
"""Dictionary describing the mask."""
return self._mask
# --------------------------------------------------------------------
@property
def kernel(self):
"""Dictionary describing the kernel."""
return self._kern
# --------------------------------------------------------------------
@property
def weight(self):
"""Dictionary describing weight distribution."""
return self._wght
# --------------------------------------------------------------------
def matches(self, sl=None, sn=None, tl=None, tn=None, syn=None):
"""
Return True if all non-None arguments match.
Arguments:
sl : sender layer
sn : sender neuron type
tl : target layer
tn : target neuron type
syn: synapse type
"""
return ((sl is None or sl == self.slayer) and
(sn is None or sn == self.snrn) and
(tl is None or tl == self.tlayer) and
(tn is None or tn == self.tnrn) and
(syn is None or syn == self.synmodel))
# ------------------------------------------------------------------------
class _Patch(object):
"""
Represents a patch, i.e., an axes that will actually contain an
imshow graphic of a connection kernel.
The patch object contains the physical coordinates of the patch,
as well as a reference to the actual Axes object once it is created.
Also contains strings to be used as sender/target labels.
Everything is based on a coordinate system looking from the top left
corner down.
"""
# --------------------------------------------------------------------
def __init__(self, left, top, row, col, width, height,
slabel=None, tlabel=None, parent=None):
"""
Arguments:
left, top : Location of top-left corner
row, col : row, column location in parent block
width, height : Width and height of patch
slabel, tlabel: Values for sender/target label
parent : _Block to which _Patch/_Block belongs
"""
self.l, self.t, self.r, self.c = left, top, row, col
self.w, self.h = width, height
self.slbl, self.tlbl = slabel, tlabel
self.ax = None
self._parent = parent
# --------------------------------------------------------------------
def _update_size(self, new_lr):
"""Update patch size by inspecting all children."""
if new_lr[0] < self.l:
raise ValueError(
"new_lr[0] = %f < l = %f" % (new_lr[0], self.l))
if new_lr[1] < self.t:
raise ValueError(
"new_lr[1] = %f < t = %f" % (new_lr[1], self.t))
self.w, self.h = new_lr[0] - self.l, new_lr[1] - self.t
if self._parent:
self._parent._update_size(new_lr)
# --------------------------------------------------------------------
@property
def tl(self):
"""Top left corner of the patch."""
return (self.l, self.t)
# --------------------------------------------------------------------
@property
def lr(self):
"""Lower right corner of the patch."""
return (self.l + self.w, self.t + self.h)
# --------------------------------------------------------------------
@property
def l_patches(self):
"""Left edge of leftmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return min([e.l_patches for e in _flattened(self.elements)])
else:
return self.l
# --------------------------------------------------------------------
@property
def t_patches(self):
"""Top edge of topmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return min([e.t_patches for e in _flattened(self.elements)])
else:
return self.t
# --------------------------------------------------------------------
@property
def r_patches(self):
"""Right edge of rightmost _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return max([e.r_patches for e in _flattened(self.elements)])
else:
return self.l + self.w
# --------------------------------------------------------------------
@property
def b_patches(self):
"""Bottom edge of lowest _Patch in _Block."""
if isinstance(self, ConnectionPattern._Block):
return max([e.b_patches for e in _flattened(self.elements)])
else:
return self.t + self.h
# --------------------------------------------------------------------
@property
def location(self):
if self.r < self.c:
return 'super'
elif self.r == self.c:
return 'diag'
else:
return 'sub'
# ------------------------------------------------------------------------
class _Block(_Patch):
"""
Represents a block of patches.
A block is initialized with its top left corner and is then built
row-wise downward and column-wise to the right. Rows are added by
block.newRow(2.0, 1.5)
where 2.0 is the space between rows, 1.5 the space between the
first row. Elements are added to a row by
el = block.newElement(1.0, 0.6, 's', 't')
el = block.newElement(1.0, 0.6, 's', 't', size=[2.0, 3.0])
The first example adds a new _Block to the row. 1.0 is space between
blocks, 0.6 space before the first block in a row. 's' and 't' are
stored as slbl and tlbl (optional). If size is given, a _Patch with
the given size is created. _Patch is atomic. newElement() returns the
_Block or _Patch created.
"""
# --------------------------------------------------------------------
def __init__(self, left, top, row, col, slabel=None, tlabel=None,
parent=None):
ConnectionPattern._Patch.__init__(self, left, top, row, col, 0, 0,
slabel, tlabel, parent)
self.elements = []
self._row_top = None # top of current row
self._row = 0
self._col = 0
# --------------------------------------------------------------------
def newRow(self, dy=0.0, dynew=0.0):
"""
Open new row of elements.
Arguments:
dy : vertical skip before new row
dynew: vertical skip if new row is first row
"""
if self.elements:
# top of row is bottom of block so far + dy
self._row_top = self.lr[1] + dy
else:
# place relative to top edge of parent
self._row_top = self.tl[1] + dynew
self._row += 1
self._col = 0
self.elements.append([])
# --------------------------------------------------------------------
def newElement(self, dx=0.0, dxnew=0.0, slabel=None, tlabel=None,
size=None):
"""
Append new element to last row.
Creates _Block instance if size is not given, otherwise _Patch.
Arguments:
dx : horizontal skip before new element
dxnew : horizontal skip if new element is first
slabel: sender label (on y-axis)
tlabel: target label (on x-axis)
size : size of _Patch to create
Returns:
Created _Block or _Patch.
"""
assert (self.elements)
if self.elements[-1]:
# left edge is right edge of block so far + dx
col_left = self.lr[0] + dx
else:
# place relative to left edge of parent
col_left = self.tl[0] + dxnew
self._col += 1
if size is not None:
elem = ConnectionPattern._Patch(col_left, self._row_top,
self._row, self._col,
size[0], size[1], slabel,
tlabel, self)
else:
elem = ConnectionPattern._Block(col_left, self._row_top,
self._row, self._col,
slabel, tlabel, self)
self.elements[-1].append(elem)
self._update_size(elem.lr)
return elem
# --------------------------------------------------------------------
def addMargin(self, rmarg=0.0, bmarg=0.0):
"""Extend block by margin to right and bottom."""
if rmarg < 0.0:
raise ValueError('rmarg must not be negative!')
if bmarg < 0.0:
raise ValueError('bmarg must not be negative!')
lr = self.lr
self._update_size((lr[0] + rmarg, lr[1] + bmarg))
# ------------------------------------------------------------------------
def _prepareAxes(self, mode, showLegend):
"""
Prepare information for all axes, but do not create the actual axes
yet.
mode: one of 'detailed', 'by layer', 'totals'
"""
# parameters for figure, all quantities are in mm
patchmax = plotParams.patch_size # length of largest patch dimension
# actual parameters scaled from default patchmax = 20mm
lmargin = plotParams.margins.left
tmargin = plotParams.margins.top
rmargin = plotParams.margins.right
bmargin = plotParams.margins.bottom
cbmargin = plotParams.margins.colbar
blksep = 3. / 20. * patchmax # distance between blocks
popsep = 2. / 20. * patchmax # distance between populations
synsep = 0.5 / 20. * patchmax # distance between synapse types
# find maximal extents of individual patches, horizontal and vertical
maxext = max(_flattened([l.ext for l in self._layers]))
patchscale = patchmax / float(maxext) # determines patch size
# obtain number of synaptic patches per population pair
# maximum column across all synapse types, same for rows
nsyncols = max([s.c for s in self._synAttr.values()]) + 1
nsynrows = max([s.r for s in self._synAttr.values()]) + 1
# dictionary mapping into patch-axes, to they can be found later
self._patchTable = {}
# set to store all created patches to avoid multiple
# creation of patches at same location
axset = set()
# create entire setup, top-down
self._axes = self._Block(lmargin, tmargin, 1, 1)
for sl in self._layers:
# get sorted list of populations for sender layer
spops = sorted([p[1] for p in self._pops if p[0] == sl.name],
key=lambda pn: self._poporder[pn])
self._axes.newRow(blksep, 0.0)
for tl in self._layers:
# ignore singular target layers
if tl.singular:
continue
# get sorted list of populations for target layer
tpops = sorted([p[1] for p in self._pops if p[0] == tl.name],
key=lambda pn: self._poporder[pn])
# compute size for patches
patchsize = patchscale * np.array(tl.ext)
block = self._axes.newElement(blksep, 0.0, sl.name, tl.name)
if mode == 'totals':
# single patch
block.newRow(popsep, popsep / 2.)
p = block.newElement(popsep, popsep / 2., size=patchsize)
self._patchTable[(sl.name, None, tl.name, None, None)] = p
elif mode == 'layer':
# We loop over all rows and columns in the synapse patch
# grid. For each (r,c), we find the pertaining synapse name
# by reverse lookup in the _synAttr dictionary. This is
# inefficient, but should not be too costly overall. But we
# must create the patches in the order they are placed.
# NB: We must create also those block.newElement() that are
# not registered later, since block would otherwise not
# skip over the unused location.
for r in range(nsynrows):
block.newRow(synsep, popsep / 2.)
for c in range(nsyncols):
p = block.newElement(synsep, popsep / 2.,
size=patchsize)
smod = [k for k, s in self._synAttr.items()
if s.r == r and s.c == c]
if smod:
assert (len(smod) == 1)
self._patchTable[(sl.name, None, tl.name,
None, smod[0])] = p
elif mode == 'population':
# one patch per population pair
for sp in spops:
block.newRow(popsep, popsep / 2.)
for tp in tpops:
pblk = block.newElement(popsep, popsep / 2.,
sp, tp)
pblk.newRow(synsep, synsep / 2.)
self._patchTable[(sl.name, sp,
tl.name, tp, None)] = \
pblk.newElement(synsep, blksep / 2.,
size=patchsize)
else:
# detailed presentation of all pops
for sp in spops:
block.newRow(popsep, popsep / 2.)
for tp in tpops:
pblk = block.newElement(popsep, popsep / 2.,
sp, tp)
pblk.newRow(synsep, synsep / 2.)
# Find all connections with matching properties
# all information we need here is synapse model.
# We store this in a dictionary mapping synapse
# patch column to synapse model, for use below.
syns = dict(
[(self._synAttr[c.synmodel].c, c.synmodel)
for c in _flattened(self._cTable.values())
if c.matches(sl.name, sp, tl.name, tp)])
# create all synapse patches
for n in range(nsyncols):
# Do not duplicate existing axes.
if (sl.name, sp, tl.name, tp, n) in axset:
continue
# Create patch. We must create also such
# patches that do not have synapses, since
# spacing would go wrong otherwise.
p = pblk.newElement(synsep, 0.0,
size=patchsize)
# if patch represents existing synapse,
# register
if n in syns:
self._patchTable[(sl.name, sp, tl.name,
tp, syns[n])] = p
block.addMargin(popsep / 2., popsep / 2.)
self._axes.addMargin(rmargin, bmargin)
if showLegend:
self._axes.addMargin(0, cbmargin) # add color bar at bottom
figwidth = self._axes.lr[0] - self._axes.tl[
0] - rmargin # keep right marg out of calc
if mode == 'totals' or mode == 'population':
# single patch at right edge, 20% of figure
if plotParams.cbwidth:
lwidth = plotParams.cbwidth * figwidth
else:
lwidth = 0.2 * figwidth
if lwidth > 100.0: # colorbar shouldn't be wider than 10cm
lwidth = 100.0
lheight = (plotParams.cbheight * cbmargin
if plotParams.cbheight else 0.3 * cbmargin)
if plotParams.legend_location is None:
cblift = 0.9 * cbmargin
else:
cblift = 0.7 * cbmargin
self._cbPatches = self._Patch(self._axes.tl[0],
self._axes.lr[1] - cblift,
None, None,
lwidth,
lheight)
else:
# one patch per synapse type, 20% of figure or less
# we need to get the synapse names in ascending order
# of synapse indices
snames = [s[0] for s in
sorted([(k, v) for k, v in self._synAttr.items()],
key=lambda kv: kv[1].index)
]
snum = len(snames)
if plotParams.cbwidth:
lwidth = plotParams.cbwidth * figwidth
if plotParams.cbspace:
lstep = plotParams.cbspace * figwidth
else:
lstep = 0.5 * lwidth
else:
if snum < 5:
lwidth = 0.15 * figwidth
lstep = 0.1 * figwidth
else:
lwidth = figwidth / (snum + 1.0)
lstep = (figwidth - snum * lwidth) / (snum - 1.0)
if lwidth > 100.0: # colorbar shouldn't be wider than 10cm
lwidth = 100.0
lstep = 30.0
lheight = (plotParams.cbheight * cbmargin
if plotParams.cbheight else 0.3 * cbmargin)
if plotParams.cboffset is not None:
offset = plotParams.cboffset
else:
offset = lstep
if plotParams.legend_location is None:
cblift = 0.9 * cbmargin
else:
cblift = 0.7 * cbmargin
self._cbPatches = {}
for j in range(snum):
self._cbPatches[snames[j]] = \
self._Patch(
self._axes.tl[0] + offset + j * (lstep + lwidth),
self._axes.lr[1] - cblift,
None, None,
lwidth,
lheight)
# ------------------------------------------------------------------------
def _scaledBox(self, p):
"""Scaled axes rectangle for patch, reverses y-direction."""
xsc, ysc = self._axes.lr
return self._figscale * np.array(
[p.l / xsc, 1 - (p.t + p.h) / ysc, p.w / xsc, p.h / ysc])
# ------------------------------------------------------------------------
def _scaledBoxNR(self, p):
"""Scaled axes rectangle for patch, does not reverse y-direction."""
xsc, ysc = self._axes.lr
return self._figscale * np.array(
[p.l / xsc, p.t / ysc, p.w / xsc, p.h / ysc])
# ------------------------------------------------------------------------
def _configSynapses(self, cList, synTypes):
"""Configure synapse information based on connections and user info."""
# compile information on synapse types and weights
synnames = set(c[2]['synapse_model'] for c in cList)
synweights = set(_weighteval(c[2]['weights']) for c in cList)
# set up synTypes for all pre-defined cases
if synTypes:
# check if there is info for all synapse types
stnames = _flattened([[s.name for s in r] for r in synTypes])
if len(stnames) != len(set(stnames)):
raise ValueError(
'Names of synapse types in synTypes must be unique!')
if len(synnames) > 1 and not synnames.issubset(set(stnames)):
raise ValueError('synTypes must provide information about' +
'all synapse types.')
elif len(synnames) == 1:
# only one synapse type used
if min(synweights) >= 0:
# all weights positive
synTypes = ((SynType('exc', 1.0, 'red'),),)
elif max(synweights) <= 0:
# all weights negative
synTypes = ((SynType('inh', -1.0, 'blue'),),)
else:
# positive and negative weights, assume Dale holds
synTypes = ((SynType('exc', 1.0, 'red'),),
(SynType('inh', -1.0, 'blue'),))
elif synnames == set(['AMPA', 'GABA_A']):
# only AMPA and GABA_A
synTypes = ((SynType('AMPA', 1.0, 'red'),),
(SynType('GABA_A', -1.0, 'blue'),))
elif synnames.issubset(set(['AMPA', 'NMDA', 'GABA_A', 'GABA_B'])):
synTypes = ((SynType('AMPA', 1.0, 'red'),
SynType('NMDA', 1.0, 'orange'),),
(SynType('GABA_A', -1.0, 'blue'),
SynType('GABA_B', -1.0, 'purple'),))
else:
raise ValueError('Connection list contains unknown synapse ' +
'models; synTypes required.')
# now build _synAttr by assigning blocks to rows
self._synAttr = {}
row = 0
ctr = 0
for sgroup in synTypes:
col = 0
for stype in sgroup:
self._synAttr[stype.name] = self._SynProps(row, col,
stype.relweight,
stype.cmap, ctr)
col += 1
ctr += 1
row += 1
# ------------------------------------------------------------------------
def __init__(self, lList, cList, synTypes=None, intensity='wp',
mList=None, Vmem=None, poporder=None):
"""
lList : layer list
cList : connection list
synTypes : nested list of synapse types
intensity: 'wp' - weight * probability
'p' - probability
'tcd' - |total charge deposited| * probability
requires mList; currently only for ht_model
proper results only if Vmem within reversal
potentials
mList : model list; only needed with 'tcd'
Vmem : reference membrane potential for 'tcd'
poporder : dictionary mapping population names to numbers; populations
will be sorted in diagram in order of increasing numbers.
"""
# extract layers to dict mapping name to extent
self._layers = [self._LayerProps(l[0], l[1]['extent']) for l in lList]
# ensure layer names are unique
lnames = [l.name for l in self._layers]
if len(lnames) != len(set(lnames)):
raise ValueError('Layer names must be unique.')
# set up synapse attributes
self._configSynapses(cList, synTypes)
# if tcd mode, build tcd representation
if intensity != 'tcd':
tcd = None
else:
assert (mList)
from . import tcd_nest
tcd = tcd_nest.TCD(mList)
# Build internal representation of connections.
# This representation contains one entry for each sender pop,
# target pop, synapse type tuple. Creating the connection object
# implies computation of the kernel.
# Several connection may agree in all properties, these need to be
# added here. Therefore, we need to build iteratively and store
# everything in a dictionary, so we can find early instances.
self._cTable = {}
for conn in cList:
key, val = self._Connection(conn, self._layers, self._synAttr,
intensity, tcd, Vmem).keyval
if key:
if key in self._cTable:
self._cTable[key].append(val)
else:
self._cTable[key] = [val]
# number of layers
self._nlyr = len(self._layers)
# compile list of populations, list(set()) makes list unique
self._pops = list(
set(_flattened([[(c.slayer, c.snrn), (c.tlayer, c.tnrn)]
for c in _flattened(self._cTable.values())])))
self._npop = len(self._pops)
# store population ordering; if not given, use alphabetical ordering
# also add any missing populations alphabetically at end
# layers are ignored
# create alphabetically sorted list of unique population names
popnames = sorted(list(set([p[1] for p in self._pops])),
key=lambda x: x if x is not None else "")
if poporder:
self._poporder = poporder
next = max(self._poporder.values()) + 1 # next free sorting index
else:
self._poporder = {}
next = 0
for pname in popnames:
if pname not in self._poporder:
self._poporder[pname] = next
next += 1
# compile list of synapse types
self._synTypes = list(
set([c.synmodel for c in _flattened(self._cTable.values())]))
# ------------------------------------------------------------------------
def plot(self, aggrGroups=False, aggrSyns=False, globalColors=False,
colorLimits=None, showLegend=True,
selectSyns=None, file=None, fixedWidth=None):
"""
Plot connection pattern.
By default, connections between any pair of populations
are plotted on the screen, with separate color scales for
all patches.
Arguments:
aggrGroups If True, aggregate projections with the same synapse type
and the same source and target groups (default: False)
aggrSyns If True, aggregate projections with the same synapse model
(default: False)
globalColors If True, use global color scale, otherwise local
(default: False)
colorLimits If given, must be two element vector for lower and
upper limits of color scale. Implies globalColors
(default: None)
showLegend If True, show legend below CPT (default: True).
selectSyns If tuple of synapse models, show only connections of the
give types. Cannot be combined with aggregation.
file If given, save plot to given file name; file may also be a
tuple of file names, the figure will then be saved to all
files. This may be useful if you want to save the same figure
in several formats.
fixedWidth Figure will be scaled to this width in mm by changing
patch size.
Returns:
kern_min, kern_max Minimal and maximal values of kernels,
with kern_min <= 0, kern_max >= 0.
Output:
figure created
"""
# translate new to old paramter names (per v 0.5)
normalize = globalColors
if colorLimits:
normalize = True
if selectSyns:
if aggrPops or aggrSyns:
raise ValueError(
'selectSyns cannot be combined with aggregation.')
selected = selectSyns
mode = 'select'
elif aggrGroups and aggrSyns:
mode = 'totals'
elif aggrGroups and not aggrSyns:
mode = 'layer'
elif aggrSyns and not aggrGroups:
mode = 'population'
else:
mode = None
if mode == 'layer':
# reduce to dimensions sender layer, target layer, synapse type
# add all kernels agreeing on these three attributes
plotKerns = []
for slayer in self._layers:
for tlayer in self._layers:
for synmodel in self._synTypes:
kerns = [c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=slayer.name, tl=tlayer.name,
syn=synmodel)]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(slayer.name, None, tlayer.name,
None, synmodel,
_addKernels(kerns)))
elif mode == 'population':
# reduce to dimensions sender layer, target layer
# all all kernels, weighting according to synapse type
plotKerns = []
for spop in self._pops:
for tpop in self._pops:
kerns = [self._synAttr[c.synmodel].tw * c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=spop[0], sn=spop[1], tl=tpop[0],
tn=tpop[1])]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(spop[0], spop[1], tpop[0], tpop[1],
None,
_addKernels(kerns)))
elif mode == 'totals':
# reduce to dimensions sender layer, target layer
# all all kernels, weighting according to synapse type
plotKerns = []
for slayer in self._layers:
for tlayer in self._layers:
kerns = [self._synAttr[c.synmodel].tw * c.kernval for c in
_flattened(self._cTable.values())
if c.matches(sl=slayer.name, tl=tlayer.name)]
if len(kerns) > 0:
plotKerns.append(
self._PlotKern(slayer.name, None, tlayer.name,
None, None, _addKernels(kerns)))
elif mode == 'select':
# copy only those kernels that have the requested synapse type,
# no dimension reduction
# We need to sum all kernels in the list for a set of attributes
plotKerns = [
self._PlotKern(clist[0].slayer, clist[0].snrn, clist[0].tlayer,
clist[0].tnrn,
clist[0].synmodel,
_addKernels([c.kernval for c in clist]))
for clist in self._cTable.values() if
clist[0].synmodel in selected]
else:
# copy all
# We need to sum all kernels in the list for a set of attributes
plotKerns = [
self._PlotKern(clist[0].slayer, clist[0].snrn, clist[0].tlayer,
clist[0].tnrn,
clist[0].synmodel,
_addKernels([c.kernval for c in clist]))
for clist in self._cTable.values()]
self._prepareAxes(mode, showLegend)
if fixedWidth:
margs = plotParams.margins.left + plotParams.margins.right
if fixedWidth <= margs:
raise ValueError('Requested width must be less than ' +
'width of margins (%g mm)' % margs)
currWidth = self._axes.lr[0]
currPatchMax = plotParams.patch_size # store
# compute required patch size
plotParams.patch_size = ((fixedWidth - margs) /
(currWidth - margs) * currPatchMax)
# build new axes
del self._axes
self._prepareAxes(mode, showLegend)
# restore patch size
plotParams.patch_size = currPatchMax
# create figure with desired size
fsize = np.array(self._axes.lr) / 25.4 # convert mm to inches
f = plt.figure(figsize=fsize, facecolor='w')
# size will be rounded according to DPI setting, adjust fsize
dpi = f.get_dpi()
fsize = np.floor(fsize * dpi) / dpi
# check that we got the correct size
actsize = np.array([f.get_figwidth(), f.get_figheight()], dtype=float)
if all(actsize == fsize):
self._figscale = 1.0 # no scaling
else:
warnings.warn("""
WARNING: Figure shrunk on screen!
The figure is shrunk to fit onto the screen.
Please specify a different backend using the -d
option to obtain full-size figures. Your current
backend is: %s
""" % mpl.get_backend())
plt.close(f)
# determine scale: most shrunk dimension
self._figscale = np.min(actsize / fsize)
# create shrunk on-screen figure
f = plt.figure(figsize=self._figscale * fsize, facecolor='w')
# just ensure all is well now
actsize = np.array([f.get_figwidth(), f.get_figheight()],
dtype=float)
# add decoration
for block in _flattened(self._axes.elements):
ax = f.add_axes(self._scaledBox(block),
axisbg=plotParams.layer_bg[block.location],
xticks=[], yticks=[],
zorder=plotParams.z_layer)
if hasattr(ax, 'frame'):
ax.frame.set_visible(False)
else:
for sp in ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if block.l <= self._axes.l_patches and block.slbl:
ax.set_ylabel(block.slbl,
rotation=plotParams.layer_orientation['sender'],
fontproperties=plotParams.layer_font)
if block.t <= self._axes.t_patches and block.tlbl:
ax.set_xlabel(block.tlbl,
rotation=plotParams.layer_orientation['target'],
fontproperties=plotParams.layer_font)
ax.xaxis.set_label_position('top')
# inner blocks for population labels
if mode not in ('totals', 'layer'):
for pb in _flattened(block.elements):
if not isinstance(pb, self._Block):
continue # should not happen
ax = f.add_axes(self._scaledBox(pb),
axisbg='none', xticks=[], yticks=[],
zorder=plotParams.z_pop)
if hasattr(ax, 'frame'):
ax.frame.set_visible(False)
else:
for sp in ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if pb.l + pb.w >= self._axes.r_patches and pb.slbl:
ax.set_ylabel(pb.slbl,
rotation=plotParams.pop_orientation[
'sender'],
fontproperties=plotParams.pop_font)
ax.yaxis.set_label_position('right')
if pb.t + pb.h >= self._axes.b_patches and pb.tlbl:
ax.set_xlabel(pb.tlbl,
rotation=plotParams.pop_orientation[
'target'],
fontproperties=plotParams.pop_font)
# determine minimum and maximum values across all kernels,
# but set min <= 0, max >= 0
kern_max = max(0.0, max([np.max(kern.kern) for kern in plotKerns]))
kern_min = min(0.0, min([np.min(kern.kern) for kern in plotKerns]))
# determine color limits for plots
if colorLimits:
c_min, c_max = colorLimits # explicit values
else:
# default values for color limits
# always 0 as lower limit so anything > 0 is non-white,
# except when totals or populations
c_min = None if mode in ('totals', 'population') else 0.0
c_max = None # use patch maximum as upper limit
if normalize:
# use overall maximum, at least 0
c_max = kern_max
if aggrSyns:
# use overall minimum, if negative, otherwise 0
c_min = kern_min
# for c_max, use the larger of the two absolute values
c_max = kern_max
# if c_min is non-zero, use same color scale for neg values
if c_min < 0:
c_min = -c_max
# Initialize dict storing sample patches for each synapse type for use
# in creating color bars. We will store the last patch of any given
# synapse type for reference. When aggrSyns, we have only one patch
# type and store that.
if not aggrSyns:
samplePatches = dict(
[(sname, None) for sname in self._synAttr.keys()])
else:
# only single type of patches
samplePatches = None
for kern in plotKerns:
p = self._patchTable[(kern.sl, kern.sn, kern.tl,
kern.tn, kern.syn)]
p.ax = f.add_axes(self._scaledBox(p), aspect='equal',
xticks=[], yticks=[], zorder=plotParams.z_conn)
p.ax.patch.set_edgecolor('none')
if hasattr(p.ax, 'frame'):
p.ax.frame.set_visible(False)
else:
for sp in p.ax.spines.values():
# turn off axis lines, make room for frame edge
sp.set_color('none')
if not aggrSyns:
# we have synapse information -> not totals, a vals positive
assert (kern.syn)
assert (np.min(kern.kern) >= 0.0)
# we may overwrite here, but this does not matter, we only need
# some reference patch
samplePatches[kern.syn] = p.ax.imshow(kern.kern,
vmin=c_min, vmax=c_max,
cmap=self._synAttr[
kern.syn].cmap) # ,
# interpolation='nearest')
else:
# we have totals, special color table and normalization
# we may overwrite here, but this does not matter, we only need
# some reference patch
samplePatches = p.ax.imshow(kern.kern,
vmin=c_min, vmax=c_max,
cmap=cm.bluered,
norm=cm.ZeroCenterNorm())
# interpolation='nearest')
# Create colorbars at bottom of figure
if showLegend:
# FIXME: rewrite the function to avoid comparisons with None!
f_min = float("-inf") if c_min is None else c_min
f_max = float("-inf") if c_max is None else c_max
# Do we have kernel values exceeding the color limits?
if f_min <= kern_min and kern_max <= f_max:
extmode = 'neither'
elif f_min > kern_min and kern_max <= f_max:
extmode = 'min'
elif f_min <= kern_min and kern_max > f_max:
extmode = 'max'
else:
extmode = 'both'
if aggrSyns:
cbax = f.add_axes(self._scaledBox(self._cbPatches))
# by default, use 4 ticks to avoid clogging
# according to docu, we need a separate Locator object
# for each axis.
if plotParams.legend_ticks:
tcks = plotParams.legend_ticks
else:
tcks = mpl.ticker.MaxNLocator(nbins=4)
if normalize:
# colorbar with freely settable ticks
cb = f.colorbar(samplePatches, cax=cbax,
orientation='horizontal',
ticks=tcks,
format=plotParams.legend_tick_format,
extend=extmode)
else:
# colorbar with tick labels 'Exc', 'Inh'
# we add the color bare here explicitly, so we get no
# problems if the sample patch includes only pos or
# only neg values
cb = mpl.colorbar.ColorbarBase(cbax, cmap=cm.bluered,
orientation='horizontal')
cbax.set_xticks([0, 1])
cbax.set_xticklabels(['Inh', 'Exc'])
cb.outline.set_linewidth(0.5) # narrower line around colorbar
# fix font for ticks
plt.setp(cbax.get_xticklabels(),
fontproperties=plotParams.legend_tick_font)
# no title in this case
else:
# loop over synapse types
for syn in self._synAttr.keys():
cbax = f.add_axes(self._scaledBox(self._cbPatches[syn]))
if plotParams.legend_location is None:
cbax.set_ylabel(
syn,
fontproperties=plotParams.legend_title_font,
rotation='horizontal')
else:
cbax.set_title(
syn,
fontproperties=plotParams.legend_title_font,
rotation='horizontal')
if normalize:
# by default, use 4 ticks to avoid clogging
# according to docu, we need a separate Locator object
# for each axis.
if plotParams.legend_ticks:
tcks = plotParams.legend_ticks
else:
tcks = mpl.ticker.MaxNLocator(nbins=4)
# proper colorbar
cb = f.colorbar(samplePatches[syn], cax=cbax,
orientation='horizontal',
ticks=tcks,
format=plotParams.legend_tick_format,
extend=extmode)
cb.outline.set_linewidth(
0.5) # narrower line around colorbar
# fix font for ticks
plt.setp(cbax.get_xticklabels(),
fontproperties=plotParams.legend_tick_font)
else:
# just a solid color bar with no ticks
cbax.set_xticks([])
cbax.set_yticks([])
# full-intensity color from color map
cbax.set_axis_bgcolor(self._synAttr[syn].cmap(1.0))
# narrower border
if hasattr(cbax, 'frame'):
cbax.frame.set_linewidth(0.5)
else:
for sp in cbax.spines.values():
sp.set_linewidth(0.5)
# save to file(s), use full size
f.set_size_inches(fsize)
if isinstance(file, (list, tuple)):
for fn in file:
f.savefig(fn)
elif isinstance(file, str):
f.savefig(file)
f.set_size_inches(actsize) # reset size for further interactive work
return kern_min, kern_max
# ------------------------------------------------------------------------
def toLaTeX(self, file, standalone=False, enumerate=False, legend=True):
"""
Write connection table to file.
Arguments:
file output file name
standalone create complete LaTeX file (default: False)
enumerate enumerate connections (default: False)
legend add explanation of functions used (default: True)
"""
lfile = open(file, 'w')
if not lfile:
raise Exception('Could not open file "%s"' % file)
if standalone:
lfile.write(
r"""
\documentclass[a4paper,american]{article}
\usepackage[pdftex,margin=1in,centering,
noheadfoot,a4paper]{geometry}
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{color}
\usepackage{calc}
\usepackage{tabularx} % autom. adjusts column width in tables
\usepackage{multirow} % allows entries spanning several rows
\usepackage{colortbl} % allows coloring tables
\usepackage[fleqn]{amsmath}
\setlength{\mathindent}{0em}
\usepackage{mathpazo}
\usepackage[scaled=.95]{helvet}
\renewcommand\familydefault{\sfdefault}
\renewcommand\arraystretch{1.2}
\pagestyle{empty}
% \hdr{ncols}{label}{title}
%
% Typeset header bar across table with ncols columns
% with label at left margin and centered title
%
\newcommand{\hdr}[3]{%
\multicolumn{#1}{|l|}{%
\color{white}\cellcolor[gray]{0.0}%
\textbf{\makebox[0pt]{#2}\hspace{0.5\linewidth}%
\makebox[0pt][c]{#3}}%
}%
}
\begin{document}
""")
lfile.write(
r"""
\noindent\begin{tabularx}{\linewidth}{%s|l|l|l|c|c|X|}\hline
\hdr{%d}{}{Connectivity}\\\hline
%s \textbf{Src} & \textbf{Tgt} & \textbf{Syn} &
\textbf{Wght} & \textbf{Mask} & \textbf{Kernel} \\\hline
""" % (('|r', 7, '&') if enumerate else ('', 6, '')))
# ensure sorting according to keys, gives some alphabetic sorting
haveU, haveG = False, False
cctr = 0 # connection counter
for ckey in sorted(self._cTable.keys()):
for conn in self._cTable[ckey]:
cctr += 1
if enumerate:
lfile.write('%d &' % cctr)
# take care to escape _ in names such as GABA_A
# also remove any pending '/None'
lfile.write((r'%s/%s & %s/%s & %s' %
(conn.slayer, conn.snrn, conn.tlayer, conn.tnrn,
conn.synmodel)).replace('_', r'\_').replace(
'/None', ''))
lfile.write(' & \n')
if isinstance(conn.weight, (int, float)):
lfile.write(r'%g' % conn.weight)
elif 'uniform' in conn.weight:
cw = conn.weight['uniform']
lfile.write(
r'$\mathcal{U}[%g, %g)$' % (cw['min'], cw['max']))
haveU = True
else:
raise ValueError(
'Unkown weight type "%s"' % conn.weight.__str__)
lfile.write(' & \n')
if 'circular' in conn.mask:
lfile.write(r'$\leq %g$' % conn.mask['circular']['radius'])
elif 'rectangular' in conn.mask:
cmr = conn.mask['rectangular']
lfile.write(
r"""$[(%+g, %+g), (%+g, %+g)]$"""
% (cmr['lower_left'][0], cmr['lower_left'][1],
cmr['upper_right'][0], cmr['upper_right'][1]))
else:
raise ValueError(
'Unknown mask type "%s"' % conn.mask.__str__)
lfile.write(' & \n')
if isinstance(conn.kernel, (int, float)):
lfile.write(r'$%g$' % conn.kernel)
elif 'gaussian' in conn.kernel:
ckg = conn.kernel['gaussian']
lfile.write(r'$\mathcal{G}(p_0 = %g, \sigma = %g)$' %
(ckg['p_center'], ckg['sigma']))
haveG = True
else:
raise ValueError(
'Unkown kernel type "%s"' % conn.kernel.__str__)
lfile.write('\n')
lfile.write(r'\\\hline' '\n')
if legend and (haveU or haveG):
# add bottom line with legend
lfile.write(r'\hline' '\n')
lfile.write(r'\multicolumn{%d}{|l|}{\footnotesize ' %
(7 if enumerate else 6))
if haveG:
lfile.write(r'$\mathcal{G}(p_0, \sigma)$: ' +
r'$p(\mathbf{x})=p_0 e^{-\mathbf{x}^2/2\sigma^2}$')
if haveG and haveU:
lfile.write(r', ')
if haveU:
lfile.write(
r'$\mathcal{U}[a, b)$: uniform distribution on $[a, b)$')
lfile.write(r'}\\\hline' '\n')
lfile.write(r'\end{tabularx}' '\n\n')
if standalone:
lfile.write(r'\end{document}''\n')
lfile.close()
# ----------------------------------------------------------------------------
def _evalkernel(mask, kernel, weight, extent, intensity, tcd):
"""
Plot kernel within extent.
Kernel values are multiplied with abs(weight). If weight is a
distribution, the mean value is used.
Result is a masked array, in which the values outside the mask are
masked.
"""
# determine resolution, number of data points
dx = max(extent) / plotParams.n_kern
nx = np.ceil(extent[0] / dx)
ny = np.ceil(extent[1] / dx)
x = np.linspace(-0.5 * extent[0], 0.5 * extent[0], nx)
y = np.linspace(-0.5 * extent[1], 0.5 * extent[1], ny)
X, Y = np.meshgrid(x, y)
if intensity == 'wp':
return np.ma.masked_array(abs(weight) * _kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
elif intensity == 'p':
return np.ma.masked_array(_kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
elif intensity == 'tcd':
return np.ma.masked_array(
abs(tcd) * abs(weight) * _kerneval(X, Y, kernel),
np.logical_not(_maskeval(X, Y, mask)))
# ----------------------------------------------------------------------------
def _weighteval(weight):
"""Returns weight, or mean of distribution, signed."""
w = None
if isinstance(weight, (float, int)):
w = weight
elif isinstance(weight, dict):
assert (len(weight) == 1)
if 'uniform' in weight:
w = 0.5 * (weight['uniform']['min'] + weight['uniform']['max'])
elif 'gaussian' in weight:
w = weight['gaussian']['mean']
else:
raise Exception(
'Unknown weight type "%s"' % tuple(weight.keys())[0])
if not w:
raise Exception('Cannot handle weight.')
return float(w)
# ----------------------------------------------------------------------------
def _maskeval(x, y, mask):
"""
Evaluate mask given as topology style dict at
(x,y). Assume x,y are 2d numpy matrices.
"""
assert (len(mask) == 1)
if 'circular' in mask:
r = mask['circular']['radius']
m = x ** 2 + y ** 2 <= r ** 2
elif 'doughnut' in mask:
ri = mask['doughnut']['inner_radius']
ro = mask['doughnut']['outer_radius']
d = x ** 2 + y ** 2
m = np.logical_and(ri <= d, d <= ro)
elif 'rectangular' in mask:
ll = mask['rectangular']['lower_left']
ur = mask['rectangular']['upper_right']
m = np.logical_and(np.logical_and(ll[0] <= x, x <= ur[0]),
np.logical_and(ll[1] <= y, y <= ur[1]))
else:
raise Exception('Unknown mask type "%s"' % tuple(mask.keys())[0])
return m
# ----------------------------------------------------------------------------
def _kerneval(x, y, fun):
"""
Evaluate function given as topology style dict at
(x,y). Assume x,y are 2d numpy matrices
"""
if isinstance(fun, (float, int)):
return float(fun) * np.ones(np.shape(x))
elif isinstance(fun, dict):
assert (len(fun) == 1)
if 'gaussian' in fun:
g = fun['gaussian']
p0 = g['p_center']
sig = g['sigma']
return p0 * np.exp(-0.5 * (x ** 2 + y ** 2) / sig ** 2)
else:
raise Exception('Unknown kernel "%s"', tuple(fun.keys())[0])
# something very wrong
raise Exception('Cannot handle kernel.')
# ----------------------------------------------------------------------------
def _addKernels(kList):
"""
Add a list of kernels.
Arguments:
kList: List of masked arrays of equal size.
Returns:
Masked array of same size as input. All values are added,
setting masked values to 0. The mask for the sum is the
logical AND of all individual masks, so that only such
values are masked that are masked in all kernels.
_addKernels always returns a new array object, even if
kList has only a single element.
"""
assert (len(kList) > 0)
if len(kList) < 2:
return kList[0].copy()
d = np.ma.filled(kList[0], fill_value=0).copy()
m = kList[0].mask.copy()
for k in kList[1:]:
d += np.ma.filled(k, fill_value=0)
m = np.logical_and(m, k.mask)
return np.ma.masked_array(d, m)
# ----------------------------------------------------------------------------
def _flattened(lst):
"""Returned list flattend at first level."""
return sum(lst, [])
# ----------------------------------------------------------------------------
"""
if __name__ == "__main__":
import sys
sys.path += ['./examples']
# import simple
# reload(simple)
cp = ConnectionPattern(simple.layerList, simple.connectList)
import simple2
reload(simple2)
cp2 = ConnectionPattern(simple2.layerList, simple2.connectList)
st3 = ((SynType('GABA_B', -5.0, 'orange'),
SynType('GABA_A', -1.0, 'm')),
(SynType('NMDA', 5.0, 'b'),
SynType('FOO', 1.0, 'aqua'),
SynType('AMPA', 3.0, 'g')))
cp3s = ConnectionPattern(simple2.layerList, simple2.connectList,
synTypes=st3)
import simple3
reload(simple3)
cp3 = ConnectionPattern(simple3.layerList, simple3.connectList)
# cp._prepareAxes('by layer')
# cp2._prepareAxes('by layer')
# cp3._prepareAxes('detailed')
cp2.plot()
cp2.plot(mode='layer')
cp2.plot(mode='population')
cp2.plot(mode='totals')
cp2.plot(mode=('AMPA',))
cp2.plot(mode=('AMPA','GABA_B'))
# cp3.plot()
# cp3.plot(mode='population')
# cp3.plot(mode='layer')
# cp3.plot(mode='totals')
# cp.plot(normalize=True)
# cp.plot(totals=True, normalize=True)
# cp2.plot()
# cp2.plot(file=('cp3.eps'))
# cp2.plot(byLayer=True)
# cp2.plot(totals=True)
"""
| gpl-2.0 |
wgmueller1/BDA_py_demos | demos_ch5/demo5_1.py | 19 | 5055 | """Bayesian Data Analysis, 3rd ed
Chapter 5, demo 1
Hierarchical model for Rats experiment (BDA3, p. 102).
"""
from __future__ import division
import numpy as np
from scipy.stats import beta
from scipy.special import gammaln
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=(plt.rcParams['lines.color'],)) # Disable color cycle
# rat data (BDA3, p. 102)
y = np.array([
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 5, 2,
5, 3, 2, 7, 7, 3, 3, 2, 9, 10, 4, 4, 4, 4, 4, 4, 4,
10, 4, 4, 4, 5, 11, 12, 5, 5, 6, 5, 6, 6, 6, 6, 16, 15,
15, 9, 4
])
n = np.array([
20, 20, 20, 20, 20, 20, 20, 19, 19, 19, 19, 18, 18, 17, 20, 20, 20,
20, 19, 19, 18, 18, 25, 24, 23, 20, 20, 20, 20, 20, 20, 10, 49, 19,
46, 27, 17, 49, 47, 20, 20, 13, 48, 50, 20, 20, 20, 20, 20, 20, 20,
48, 19, 19, 19, 22, 46, 49, 20, 20, 23, 19, 22, 20, 20, 20, 52, 46,
47, 24, 14
])
M = len(y)
# plot the separate and pooled models
plt.figure(figsize=(8,10))
x = np.linspace(0, 1, 250)
# separate
plt.subplot(2, 1, 1)
lines = plt.plot(x, beta.pdf(x[:,None], y[:-1] + 1, n[:-1] - y[:-1] + 1),
linewidth=1)
# highlight the last line
line1, = plt.plot(x, beta.pdf(x, y[-1] + 1, n[-1] - y[-1] + 1), 'r')
plt.legend((lines[0], line1),
(r'Posterior of $\theta_j$', r'Posterior of $\theta_{71}$'))
plt.yticks(())
plt.title('separate model')
# pooled
plt.subplot(2, 1, 2)
plt.plot(x, beta.pdf(x, y.sum() + 1, n.sum() - y.sum() + 1),
linewidth=2, label=(r'Posterior of common $\theta$'))
plt.legend()
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title('pooled model')
# compute the marginal posterior of alpha and beta in the hierarchical model in a grid
A = np.linspace(0.5, 6, 100)
B = np.linspace(3, 33, 100)
# calculated in logarithms for numerical accuracy
lp = (
- 5/2 * np.log(A + B[:,None])
+ np.sum(
gammaln(A + B[:,None])
- gammaln(A)
- gammaln(B[:,None])
+ gammaln(A + y[:,None,None])
+ gammaln(B[:,None] + (n - y)[:,None,None])
- gammaln(A + B[:,None] + n[:,None,None]),
axis=0
)
)
# subtract the maximum value to avoid over/underflow in exponentation
lp -= lp.max()
p = np.exp(lp)
# plot the marginal posterior
fig = plt.figure()
plt.imshow(p, origin='lower', aspect='auto', extent=(A[0], A[-1], B[0], B[-1]))
plt.xlabel(r'$\alpha$', fontsize=20)
plt.ylabel(r'$\beta$', fontsize=20)
plt.title('The marginal posterior of alpha and beta in hierarchical model')
# sample from the posterior grid of alpha and beta
nsamp = 1000
samp_indices = np.unravel_index(
np.random.choice(p.size, size=nsamp, p=p.ravel()/p.sum()),
p.shape
)
samp_A = A[samp_indices[1]]
samp_B = B[samp_indices[0]]
# add random jitter, see BDA3 p. 76
samp_A += (np.random.rand(nsamp) - 0.5) * (A[1]-A[0])
samp_B += (np.random.rand(nsamp) - 0.5) * (B[1]-B[0])
# Plot samples from the distribution of distributions Beta(alpha,beta),
# that is, plot Beta(alpha,beta) using the posterior samples of alpha and beta
fig = plt.figure(figsize=(8,10))
plt.subplot(2, 1, 1)
plt.plot(x, beta.pdf(x[:,None], samp_A[:20], samp_B[:20]), linewidth=1)
plt.yticks(())
plt.title(r'Posterior samples from the distribution of distributions '
r'Beta($\alpha$,$\beta$)')
# The average of above distributions, is the predictive distribution for a new
# theta, and also the prior distribution for theta_j.
# Plot this.
plt.subplot(2, 1, 2)
plt.plot(x, np.mean(beta.pdf(x, samp_A[:,None], samp_B[:,None]), axis=0))
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title(r'Predictive distribution for a new $\theta$ '
r'and prior for $\theta_j$')
# And finally compare the separate model and hierarchical model
plt.figure(figsize=(8,10))
x = np.linspace(0, 1, 250)
# first plot the separate model (same as above)
plt.subplot(2, 1, 1)
# note that for clarity only every 7th distribution is plotted
plt.plot(x, beta.pdf(x[:,None], y[7:-1:7] + 1, n[7:-1:7] - y[7:-1:7] + 1),
linewidth=1)
# highlight the last line
plt.plot(x, beta.pdf(x, y[-1] + 1, n[-1] - y[-1] + 1), 'r')
plt.yticks(())
plt.title('separate model')
# And the hierarchical model. Note that these marginal posteriors for theta_j are
# more narrow than in separate model case, due to borrowed information from
# the other theta_j's.
plt.subplot(2, 1, 2)
# note that for clarity only every 7th distribution is plotted
lines = plt.plot(
x,
np.mean(
beta.pdf(
x[:,None],
y[7::7] + samp_A[:,None,None],
n[7::7] - y[7::7] + samp_B[:,None,None]
),
axis=0
),
linewidth=1,
)
# highlight the last line
lines[-1].set_linewidth(2)
lines[-1].set_color('r')
plt.yticks(())
plt.xlabel(r'$\theta$', fontsize=20)
plt.title('hierarchical model')
plt.show()
| gpl-3.0 |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/tests/test_dummy.py | 3 | 11091 | import warnings
import numpy as np
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.dummy import DummyClassifier, DummyRegressor
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
def test_most_frequent_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
clf = DummyClassifier(strategy="most_frequent", random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
| bsd-3-clause |
js850/pele | pele/gui/ui/dgraph_dlg.py | 1 | 25464 | import sys
from PyQt4 import QtGui
from PyQt4.QtGui import QApplication, QWidget, QColorDialog, QInputDialog
from PyQt4.QtCore import pyqtSlot
import networkx as nx
import matplotlib.colors as col
import dgraph_browser
from pele.utils.disconnectivity_graph import DisconnectivityGraph, database2graph
from pele.storage import Database, TransitionState
from pele.utils.events import Signal
from pele.rates import RatesLinalg, RateCalculation, compute_committors
def check_thermodynamic_info(transition_states):
"""return False if any transition state or minimum does not have pgorder or fvib"""
def myiter(tslist):
for ts in tslist:
yield ts
yield ts.minimum1
yield ts.minimum2
for mts in myiter(transition_states):
if not mts.invalid:
if mts.fvib is None or mts.pgorder is None:
return False
return True
def minimum_energy_path(graph, m1, m2):
"""find the minimum energy path between m1 and m2 and color the dgraph appropriately"""
# add weight attribute to the graph
# note: this is not actually the minimum energy path.
# This minimizes the sum of energies along the path
# TODO: use minimum spanning tree to find the minimum energy path
emin = min(( m.energy for m in graph.nodes_iter() ))
for u, v, data in graph.edges_iter(data=True):
data["weight"] = data["ts"].energy - emin
path = nx.shortest_path(graph, m1, m2, weight="weight")
return path
class TreeLeastCommonAncestor(object):
"""Find the least common ancestor to a set of trees"""
def __init__(self, trees):
self.start_trees = trees
self.run()
def run(self):
# find all common ancestors
common_ancestors = set()
for tree in self.start_trees:
parents = set(tree.get_ancestors())
parents.add(tree)
if len(common_ancestors) == 0:
common_ancestors.update(parents)
else:
# remove all elements that are not common
common_ancestors.intersection_update(parents)
assert len(common_ancestors) > 0
if len(common_ancestors) == 0:
raise Exception("the trees don't have any common ancestors")
# sort the common ancestors by the number of ancestors each has
common_ancestors = list(common_ancestors)
if len(common_ancestors) > 1:
common_ancestors.sort(key=lambda tree: len(list(tree.get_ancestors())))
# the least common ancestor is the one with the most ancestors
self.least_common_ancestor = common_ancestors[-1]
return self.least_common_ancestor
def get_all_paths_to_common_ancestor(self):
"""return all the ancestors of all the input trees up to the least common ancestor"""
trees = set(self.start_trees)
for tree in self.start_trees:
for parent in tree.get_ancestors():
trees.add(parent)
if parent == self.least_common_ancestor:
break
return trees
# for tree in common_ancestors:
# for parent in tree.get_ancestors():
# if parent in common_ancestors
#
# return iter(common_ancestors).next()
class LabelMinimumAction(QtGui.QAction):
"""This action will create a dialog box to label a minimum"""
def __init__(self, minimum, parent=None):
QtGui.QAction.__init__(self, "add label", parent)
self.parent = parent
self.minimum = minimum
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QInputDialog(parent=self.parent)
# dialog.setLabelText("")
dialog.setLabelText("set label for minimum: " + str(self.minimum.energy))
dialog.setInputMode(0)
dialog.exec_()
if dialog.result():
label = dialog.textValue()
self.parent._minima_labels[self.minimum] = label
class ColorPathAction(QtGui.QAction):
"""this action will color the minimum energy path to minimum1"""
def __init__(self, minimum1, minimum2, parent=None):
QtGui.QAction.__init__(self, "show path to %d" % (minimum2._id), parent)
self.parent = parent
self.minimum1 = minimum1
self.minimum2 = minimum2
self.triggered.connect(self.__call__)
def __call__(self, val):
self.parent._color_minimum_energy_path(self.minimum1, self.minimum2)
class ColorMFPTAction(QtGui.QAction):
"""this action will color the minima by mean first passage times to minimum1"""
def __init__(self, minimum1, parent=None):
QtGui.QAction.__init__(self, "color by mfpt", parent)
self.parent = parent
self.minimum1 = minimum1
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QInputDialog(parent=self.parent)
# dialog.setLabelText("")
dialog.setLabelText("Temperature for MFPT calculation")
dialog.setInputMode(2)
dialog.setDoubleValue(1.)
dialog.exec_()
if dialog.result():
T = dialog.doubleValue()
self.parent._color_by_mfpt(self.minimum1, T=T)
class ColorCommittorAction(QtGui.QAction):
"""this action will color the graph by committor probabilities"""
def __init__(self, minimum1, minimum2, parent=None):
QtGui.QAction.__init__(self, "color by committor %d" % (minimum2._id), parent)
self.parent = parent
self.minimum1 = minimum1
self.minimum2 = minimum2
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QInputDialog(parent=self.parent)
# dialog.setLabelText("")
dialog.setLabelText("Temperature for committor calculation")
dialog.setInputMode(2)
dialog.setDoubleValue(1.)
dialog.exec_()
if dialog.result():
T = dialog.doubleValue()
self.parent._color_by_committor(self.minimum1, self.minimum2, T=T)
class LayoutByCommittorAction(QtGui.QAction):
"""this action will color the graph by committor probabilities"""
def __init__(self, minimum1, minimum2, parent=None):
QtGui.QAction.__init__(self, "layout by committor %d" % (minimum2._id), parent)
self.parent = parent
self.minimum1 = minimum1
self.minimum2 = minimum2
self.triggered.connect(self.__call__)
def __call__(self, val):
dialog = QInputDialog(parent=self.parent)
# dialog.setLabelText("")
dialog.setLabelText("Temperature for committor calculation")
dialog.setInputMode(2)
dialog.setDoubleValue(1.)
dialog.exec_()
if dialog.result():
T = dialog.doubleValue()
self.parent._layout_by_committor(self.minimum1, self.minimum2, T=T)
class DGraphWidget(QWidget):
"""
dialog for showing and modifying the disconnectivity graph
Parameters
----------
database : Database object
graph : networkx Graph, optional
you can bypass the database and pass a graph directly. if you pass the graph,
pass None as the database
params : dict
initialize the values for the disconnectivity graph
"""
def __init__(self, database, graph=None, params={}, parent=None):
super(DGraphWidget, self).__init__(parent=parent)
self.database = database
self.graph = graph
self.ui = dgraph_browser.Ui_Form()
self.ui.setupUi(self)
self.canvas = self.ui.widget.canvas
# self.ui.wgt_mpl_toolbar = NavigationToolbar()
# self.toolbar = self.
self.input_params = params.copy()
self.params = {}
self.set_defaults()
self.minimum_selected = Signal()
# self.minimum_selected(minim)
self._selected_minimum = None
# self.rebuild_disconnectivity_graph()
self.colour_tree = []
self.tree_selected = None
self._tree_cid = None
self._minima_cid = None
self._minima_labels = dict()
# # populate the dropdown list with the color names
# self._colors = sorted(col.cnames.keys())
# self.ui.comboBox_colour.addItems(self._colors)
# [self.ui.comboBox_colour.addItem(s) for s in self._colors]
# self.ui.comboBox_colour.activated[str].connect(self._color_tree)
def _set_checked(self, keyword, default):
"""utility to set the default values for check boxes
objects must have the name chkbx_keyword
"""
if keyword in self.input_params:
v = self.input_params[keyword]
else:
v = default
line = "self.ui.chkbx_%s.setChecked(bool(%d))" % (keyword, v)
exec(line)
def _set_lineEdit(self, keyword, default=None):
"""utility to set the default values for lineEdit objects
objects must have the name lineEdit_keyword
"""
if keyword in self.input_params:
v = self.input_params[keyword]
else:
v = default
if v is not None:
line = "self.ui.lineEdit_%s.setText(str(%s))" % (keyword, str(v))
exec(line)
def set_defaults(self):
self._set_checked("center_gmin", True)
self._set_checked("show_minima", True)
self._set_checked("order_by_energy", False)
self._set_checked("order_by_basin_size", True)
self._set_checked("include_gmin", True)
self._set_checked("show_trees", False)
# self.ui.chkbx_show_minima.setChecked(True)
# self.ui.chkbx_order_by_energy.setChecked(False)
# self.ui.chkbx_order_by_basin_size.setChecked(True)
# self.ui.chkbx_include_gmin.setChecked(True)
self._set_lineEdit("Emax")
self._set_lineEdit("subgraph_size")
self._set_lineEdit("nlevels")
# self.line_width = 0.5
self._set_lineEdit("linewidth", default=0.5)
def _get_input_parameters(self):
self.params = self.input_params.copy()
if "show_minima" in self.params:
self.params.pop("show_minima")
params = self.params
Emax = self.ui.lineEdit_Emax.text()
if len(Emax) > 0:
self.params["Emax"] = float(Emax)
subgraph_size = self.ui.lineEdit_subgraph_size.text()
if len(subgraph_size) > 0:
self.params["subgraph_size"] = int(subgraph_size)
nlevels = self.ui.lineEdit_nlevels.text()
if len(nlevels) > 0:
self.params["nlevels"] = int(nlevels)
offset = self.ui.lineEdit_offset.text()
if len(offset) > 0:
params["node_offset"] = float(offset)
line_width = self.ui.lineEdit_linewidth.text()
if len(line_width) > 0:
self.line_width = float(line_width)
self.title = self.ui.lineEdit_title.text()
params["center_gmin"] = self.ui.chkbx_center_gmin.isChecked()
self.show_minima = self.ui.chkbx_show_minima.isChecked()
params["order_by_energy"] = self.ui.chkbx_order_by_energy.isChecked()
params["order_by_basin_size"] = self.ui.chkbx_order_by_basin_size.isChecked()
params["include_gmin"] = self.ui.chkbx_include_gmin.isChecked()
self.show_trees = self.ui.chkbx_show_trees.isChecked()
# @pyqtSlot(str)
# def _color_tree(self, colour):
# if self.tree_selected is not None:
# c = col.hex2color(col.cnames[str(colour)])
# print "coloring tree", colour, self.tree_selected
#
# for tree in self.tree_selected.get_all_trees():
# tree.data["colour"] = c
#
# self.redraw_disconnectivity_graph()
## self.tree_selected = None
@pyqtSlot()
def on_btnRedraw_clicked(self):
self.redraw_disconnectivity_graph()
@pyqtSlot()
def on_btnRebuild_clicked(self):
self.rebuild_disconnectivity_graph()
def redraw_disconnectivity_graph(self):
self.params = self._get_input_parameters()
self._draw_disconnectivity_graph(self.show_minima, self.show_trees)
def rebuild_disconnectivity_graph(self):
self._get_input_parameters()
self._minima_labels = dict()
self._build_disconnectivity_graph(**self.params)
self._draw_disconnectivity_graph(self.show_minima, self.show_trees)
def _build_disconnectivity_graph(self, **params):
if self.database is not None:
db = self.database
apply_Emax = "Emax" in params and "T" not in params
if apply_Emax:
self.graph = database2graph(db, Emax=params['Emax'])
else:
self.graph = database2graph(db)
dg = DisconnectivityGraph(self.graph, **params)
dg.calculate()
self.dg = dg
def _get_tree_layout(self, tree):
treelist = []
xlist = []
energies = []
for tree in tree.get_all_trees():
xlist.append(tree.data["x"])
treelist.append(tree)
if tree.is_leaf():
energies.append(tree.data["minimum"].energy)
else:
energies.append(tree.data["ethresh"])
return treelist, xlist, energies
def _on_pick_tree(self, event):
"""a matplotlib callback function for when a tree is clicked on"""
if event.artist != self._treepoints:
# print "you clicked on something other than a node"
return True
ind = event.ind[0]
self.tree_selected = self._tree_list[ind]
print "tree clicked on", self.tree_selected
# launch a color selector dialog and color
# all subtrees by the selected color
color_dialog = QColorDialog(parent=self)
color_dialog.exec_()
if color_dialog.result():
color = color_dialog.selectedColor()
rgba = color.getRgbF() # red green blue alpha
print "color", rgba
rgb = rgba[:3]
for tree in self.tree_selected.get_all_trees():
tree.data["colour"] = rgb
self.redraw_disconnectivity_graph()
def _color_minimum_energy_path(self, m1, m2):
"""find the minimum energy path between m1 and m2 and color the dgraph appropriately"""
# add weight attribute to the graph
# note: this is not actually the minimum energy path.
# This minimizes the sum of energies along the path
# TODO: use minimum spanning tree to find the minimum energy path
path = minimum_energy_path(self.graph, m1, m2)
# emin = min(( m.energy for m in self.graph.nodes_iter() ))
# for u, v, data in self.graph.edges_iter(data=True):
# data["weight"] = data["ts"].energy - emin
# path = nx.shortest_path(self.graph, m1, m2, weight="weight")
print "there are", len(path), "minima in the path from", m1._id, "to", m2._id
# color all trees up to the least common ancestor in the dgraph
trees = [self.dg.minimum_to_leave[m] for m in path]
ancestry = TreeLeastCommonAncestor(trees)
all_trees = ancestry.get_all_paths_to_common_ancestor()
# remove the least common ancestor so the coloring doesn't go to higher energies
all_trees.remove(ancestry.least_common_ancestor)
# color the trees
for tree in all_trees:
tree.data["colour"] = (1., 0., 0.)
self.redraw_disconnectivity_graph()
def _color_by_mfpt(self, min1, T=1.):
print "coloring by the mean first passage time to get to minimum", min1._id
# get a list of transition states in the same cluster as min1
edges = nx.bfs_edges(self.graph, min1)
transition_states = [ self.graph.get_edge_data(u, v)["ts"] for u, v in edges ]
if not check_thermodynamic_info(transition_states):
raise Exception("The thermodynamic information is not yet computed")
# get an arbitrary second minimum2
for ts in transition_states:
if ts.minimum2 != min1:
min2 = ts.minimum2
break
A = [min1]
B = [min2]
rcalc = RatesLinalg(transition_states, A, B, T=T)
rcalc.compute_rates()
mfptimes = rcalc.get_mfptimes()
tmax = max(mfptimes.itervalues())
def get_mfpt(m):
try:
return mfptimes[m]
except KeyError:
return tmax
self.dg.color_by_value(get_mfpt)
self.redraw_disconnectivity_graph()
def _color_by_committor(self, min1, min2, T=1.):
print "coloring by the probability that a trajectory gets to minimum", min1._id, "before", min2._id
# get a list of transition states in the same cluster as min1
edges = nx.bfs_edges(self.graph, min1)
transition_states = [ self.graph.get_edge_data(u, v)["ts"] for u, v in edges ]
if not check_thermodynamic_info(transition_states):
raise Exception("The thermodynamic information is not yet computed")
A = [min2]
B = [min1]
committors = compute_committors(transition_states, A, B, T=T)
def get_committor(m):
try:
return committors[m]
except KeyError:
return 1.
self.dg.color_by_value(get_committor)
self.redraw_disconnectivity_graph()
def _layout_by_committor(self, min1, min2, T=1.):
print "coloring by the probability that a trajectory gets to minimum", min1._id, "before", min2._id
# get a list of transition states in the same cluster as min1
edges = nx.bfs_edges(self.graph, min1)
transition_states = [ self.graph.get_edge_data(u, v)["ts"] for u, v in edges ]
if not check_thermodynamic_info(transition_states):
raise Exception("The thermodynamic information is not yet computed")
A = [min2]
B = [min1]
committors = compute_committors(transition_states, A, B, T=T)
print "maximum committor", max(committors.values())
print "minimum committor", min(committors.values())
print "number of committors near 1", len([v for v in committors.values() if v > 1.-1e-4])
print "number of committors equal to 1", len([v for v in committors.values() if v == 1.])
def get_committor(m):
try:
return committors[m]
except KeyError:
return 1.
self.dg.get_value = get_committor
self.dg._layout_x_axis(self.dg.tree_graph)
self.dg.color_by_value(get_committor)
self.redraw_disconnectivity_graph()
def _on_left_click_minimum(self, minimum):
print "you clicked on minimum with id", minimum._id, "and energy", minimum.energy
self.minimum_selected(minimum)
self._selected_minimum = minimum
self.ui.label_selected_minimum.setText("%g (%d)" % (minimum.energy, minimum._id))
def _on_right_click_minimum(self, minimum):
"""create a menu with the list of available actions"""
menu = QtGui.QMenu("list menu", parent=self)
action1 = LabelMinimumAction(minimum, parent=self)
menu.addAction(action1)
if self._selected_minimum is not None:
action2 = ColorPathAction(minimum, self._selected_minimum, parent=self)
menu.addAction(action2)
menu.addAction(ColorCommittorAction(minimum, self._selected_minimum, parent=self))
menu.addAction(LayoutByCommittorAction(minimum, self._selected_minimum, parent=self))
action3 = ColorMFPTAction(minimum, parent=self)
menu.addAction(action3)
menu.exec_(QtGui.QCursor.pos())
def _on_pick_minimum(self, event):
"""matplotlib event called when a minimum is clicked on"""
if event.artist != self._minima_points:
# print "you clicked on something other than a node"
return True
ind = event.ind[0]
min1 = self._minima_list[ind]
if event.mouseevent.button == 3:
self._on_right_click_minimum(min1)
else:
self._on_left_click_minimum(min1)
def _draw_disconnectivity_graph(self, show_minima=True, show_trees=False):
ax = self.canvas.axes
ax.clear()
ax.hold(True)
dg = self.dg
# plot the lines and set up the rest of the plot using the built in function
# this might change some of the minima x positions, so this has to go before
# anything dependent on those positions
dg.plot(axes=ax, show_minima=False, linewidth=self.line_width,
title=self.title)
if len(self._minima_labels) > 0:
dg.label_minima(self._minima_labels, axes=ax)
self.ui.widget.canvas.fig.tight_layout()
# if show_trees
if self._tree_cid is not None:
self.canvas.mpl_disconnect(self._tree_cid)
self._tree_cid = None
if show_trees:
# draw the nodes
tree_list, x_pos, energies = self._get_tree_layout(dg.tree_graph)
treepoints = ax.scatter(x_pos, energies, picker=5, color='red', alpha=0.5)
self._treepoints = treepoints
self._tree_list = tree_list
# def on_pick_tree(event):
# if event.artist != treepoints:
# # print "you clicked on something other than a node"
# return True
# ind = event.ind[0]
# self.tree_selected = tree_list[ind]
# print "tree clicked on", self.tree_selected
#
# color_dialog = QColorDialog(parent=self)
# color_dialog.exec_()
# color = color_dialog.selectedColor()
# rgba = color.getRgbF() # red green blue alpha
# print "color", rgba
# rgb = rgba[:3]
# for tree in self.tree_selected.get_all_trees():
# tree.data["colour"] = rgb
self._tree_cid = self.canvas.mpl_connect('pick_event', self._on_pick_tree)
#draw minima as points and make them interactive
if self._minima_cid is not None:
self.canvas.mpl_disconnect(self._minima_cid)
self._minima_cid = None
if show_minima:
xpos, minima = dg.get_minima_layout()
energies = [m.energy for m in minima]
self._minima_points = ax.scatter(xpos, energies, picker=5)
self._minima_list = minima
# def on_pick_min(event):
# if event.artist != points:
# # print "you clicked on something other than a node"
# return True
# ind = event.ind[0]
# min1 = minima[ind]
# print "you clicked on minimum with id", min1._id, "and energy", min1.energy
# self.minimum_selected(min1)
self._minima_cid = self.canvas.mpl_connect('pick_event', self._on_pick_minimum)
self.canvas.draw()
class DGraphDialog(QtGui.QMainWindow):
def __init__(self, database, graph=None, params={}, parent=None, app=None):
super(DGraphDialog, self).__init__(parent=parent)
self.setWindowTitle("Disconnectivity graph")
self.dgraph_widget = DGraphWidget(database, graph, params, parent=self)
self.setCentralWidget(self.dgraph_widget)
def rebuild_disconnectivity_graph(self):
self.dgraph_widget.rebuild_disconnectivity_graph()
def reduced_db2graph(db, Emax):
'''
make a networkx graph from a database including only transition states with energy < Emax
'''
from pele.storage.database import Minimum
g = nx.Graph()
# js850> It's not strictly necessary to add the minima explicitly here,
# but for some reason it is much faster if you do (factor of 2). Even
# if this means there are many more minima in the graph. I'm not sure
# why this is. This step is already often the bottleneck of the d-graph
# calculation.
minima = db.session.query(Minimum).filter(Minimum.energy <= Emax)
g.add_nodes_from(minima)
# if we order by energy first and add the transition states with the largest
# the we will take the smallest energy transition state in the case of duplicates
ts = db.session.query(TransitionState).filter(TransitionState.energy <= Emax)\
.order_by(-TransitionState.energy)
for t in ts:
g.add_edge(t.minimum1, t.minimum2, ts=t)
return g
if __name__ == "__main__":
db = Database("lj31.db", createdb=False)
if len(db.minima()) < 2:
raise Exception("database has no minima")
if True:
from pele.systems import LJCluster
from pele.thermodynamics import get_thermodynamic_information
system = LJCluster(31)
get_thermodynamic_information(system, db, nproc=10)
app = QApplication(sys.argv)
md = DGraphDialog(db)
md.show()
md.rebuild_disconnectivity_graph()
sys.exit(app.exec_())
| gpl-3.0 |
jniediek/mne-python | mne/tests/test_report.py | 3 | 10689 | # Authors: Mainak Jas <[email protected]>
# Teon Brooks <[email protected]>
#
# License: BSD (3-clause)
import glob
import os
import os.path as op
import shutil
import sys
import warnings
from nose.tools import assert_true, assert_equal, assert_raises
from nose.plugins.skip import SkipTest
from mne import Epochs, read_events, pick_types, read_evokeds
from mne.io import read_raw_fif
from mne.datasets import testing
from mne.report import Report
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel,
requires_PIL, run_tests_if_main, slow_test)
from mne.viz import plot_trans
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
# Set our plotters to test mode
warnings.simplefilter('always') # enable b/c these tests throw warnings
@slow_test
@testing.requires_testing_data
@requires_PIL
def test_render_report():
"""Test rendering -*.fif files for mne report."""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = read_raw_fif(raw_fname_new, add_eeg_ref=False)
picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks,
add_eeg_ref=False)
epochs.save(epochs_fname)
epochs.average().save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
if sys.version.startswith('3.5'): # XXX Some strange MPL/3.5 error...
raise SkipTest('Python 3.5 and mpl have unresolved issues')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, on_error='raise')
assert_true(len(w) >= 1)
assert_true(repr(report))
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
# Check saving functionality
report.data_path = tempdir
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, pattern=pattern)
assert_true(len(w) >= 1)
assert_true(repr(report))
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
@testing.requires_testing_data
@requires_mayavi
@requires_PIL
def test_render_add_sections():
"""Test adding figures/images to section."""
from PIL import Image
tempdir = _TempDir()
import matplotlib.pyplot as plt
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png and then gif
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
im = Image.open(img_fname)
op.join(tempdir, 'testimage.gif')
im.save(img_fname) # matplotlib does not support gif
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
assert_raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
assert_raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_trans(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
assert_true(repr(report))
@slow_test
@testing.requires_testing_data
@requires_mayavi
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report."""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*',
n_jobs=2)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert_true(repr(report))
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report."""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(tempdir)
assert_true(len(w) >= 1)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report."""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert_true(html in html_compare)
assert_true(repr(report))
def test_add_slider_to_section():
"""Test adding a slider with a series of images to mne report."""
tempdir = _TempDir()
from matplotlib import pyplot as plt
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = list()
figs.append(plt.figure())
plt.plot([1, 2, 3])
plt.close('all')
figs.append(plt.figure())
plt.plot([3, 2, 1])
plt.close('all')
report.add_slider_to_section(figs, section=section)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert_raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
assert_raises(ValueError, report.add_slider_to_section, figs, ['wug'])
assert_raises(TypeError, report.add_slider_to_section, figs, 'wug')
def test_validate_input():
"""Test Report input validation."""
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
assert_raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
assert_raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
run_tests_if_main()
| bsd-3-clause |
haeusser/tensorflow | tensorflow/examples/learn/multiple_gpu.py | 49 | 3078 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of using Estimator with multiple GPUs to distribute one model.
This example only runs if you have multiple GPUs to assign to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability.
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
Args:
features: `Tensor` of input features.
target: `Tensor` of targets.
Returns:
Tuple of predictions, loss and training op.
"""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.5}
with tf.device('/gpu:1'):
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
with tf.device('/gpu:2'):
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
abhisg/scikit-learn | benchmarks/bench_plot_parallel_pairwise.py | 297 | 1247 | # Author: Mathieu Blondel <[email protected]>
# License: BSD 3 clause
import time
import pylab as pl
from sklearn.utils import check_random_state
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_kernels
def plot(func):
random_state = check_random_state(0)
one_core = []
multi_core = []
sample_sizes = range(1000, 6000, 1000)
for n_samples in sample_sizes:
X = random_state.rand(n_samples, 300)
start = time.time()
func(X, n_jobs=1)
one_core.append(time.time() - start)
start = time.time()
func(X, n_jobs=-1)
multi_core.append(time.time() - start)
pl.figure('scikit-learn parallel %s benchmark results' % func.__name__)
pl.plot(sample_sizes, one_core, label="one core")
pl.plot(sample_sizes, multi_core, label="multi core")
pl.xlabel('n_samples')
pl.ylabel('Time (s)')
pl.title('Parallel %s' % func.__name__)
pl.legend()
def euclidean_distances(X, n_jobs):
return pairwise_distances(X, metric="euclidean", n_jobs=n_jobs)
def rbf_kernels(X, n_jobs):
return pairwise_kernels(X, metric="rbf", n_jobs=n_jobs, gamma=0.1)
plot(euclidean_distances)
plot(rbf_kernels)
pl.show()
| bsd-3-clause |
jakobj/nest-simulator | pynest/examples/brunel_delta_nest.py | 6 | 12075 | # -*- coding: utf-8 -*-
#
# brunel_delta_nest.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Random balanced network (delta synapses)
----------------------------------------------
This script simulates an excitatory and an inhibitory population on
the basis of the network used in [1]_
When connecting the network, customary synapse models are used, which
allow for querying the number of created synapses. Using spike
recorders, the average firing rates of the neurons in the populations
are established. The building as well as the simulation time of the
network are recorded.
References
~~~~~~~~~~~~~~
.. [1] Brunel N (2000). Dynamics of sparsely connected networks of excitatory and
inhibitory spiking neurons. Journal of Computational Neuroscience 8,
183-208.
"""
###############################################################################
# Import all necessary modules for simulation, analysis and plotting.
import time
import nest
import nest.raster_plot
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# Assigning the current time to a variable in order to determine the build
# time of the network.
startbuild = time.time()
###############################################################################
# Assigning the simulation parameters to variables.
dt = 0.1 # the resolution in ms
simtime = 1000.0 # Simulation time in ms
delay = 1.5 # synaptic delay in ms
###############################################################################
# Definition of the parameters crucial for asynchronous irregular firing of
# the neurons.
g = 5.0 # ratio inhibitory weight/excitatory weight
eta = 2.0 # external rate relative to threshold rate
epsilon = 0.1 # connection probability
###############################################################################
# Definition of the number of neurons in the network and the number of neurons
# recorded from
order = 2500
NE = 4 * order # number of excitatory neurons
NI = 1 * order # number of inhibitory neurons
N_neurons = NE + NI # number of neurons in total
N_rec = 50 # record from 50 neurons
###############################################################################
# Definition of connectivity parameters
CE = int(epsilon * NE) # number of excitatory synapses per neuron
CI = int(epsilon * NI) # number of inhibitory synapses per neuron
C_tot = int(CI + CE) # total number of synapses per neuron
###############################################################################
# Initialization of the parameters of the integrate and fire neuron and the
# synapses. The parameters of the neuron are stored in a dictionary.
tauMem = 20.0 # time constant of membrane potential in ms
theta = 20.0 # membrane threshold potential in mV
neuron_params = {"C_m": 1.0,
"tau_m": tauMem,
"t_ref": 2.0,
"E_L": 0.0,
"V_reset": 0.0,
"V_m": 0.0,
"V_th": theta}
J = 0.1 # postsynaptic amplitude in mV
J_ex = J # amplitude of excitatory postsynaptic potential
J_in = -g * J_ex # amplitude of inhibitory postsynaptic potential
###############################################################################
# Definition of threshold rate, which is the external rate needed to fix the
# membrane potential around its threshold, the external firing rate and the
# rate of the poisson generator which is multiplied by the in-degree CE and
# converted to Hz by multiplication by 1000.
nu_th = theta / (J * CE * tauMem)
nu_ex = eta * nu_th
p_rate = 1000.0 * nu_ex * CE
###############################################################################
# Configuration of the simulation kernel by the previously defined time
# resolution used in the simulation. Setting ``print_time`` to `True` prints the
# already processed simulation time as well as its percentage of the total
# simulation time.
nest.SetKernelStatus({"resolution": dt, "print_time": True,
"overwrite_files": True})
print("Building network")
###############################################################################
# Configuration of the model ``iaf_psc_delta`` and ``poisson_generator`` using
# ``SetDefaults``. This function expects the model to be the inserted as a
# string and the parameter to be specified in a dictionary. All instances of
# theses models created after this point will have the properties specified
# in the dictionary by default.
nest.SetDefaults("iaf_psc_delta", neuron_params)
nest.SetDefaults("poisson_generator", {"rate": p_rate})
###############################################################################
# Creation of the nodes using ``Create``. We store the returned handles in
# variables for later reference. Here the excitatory and inhibitory, as well
# as the poisson generator and two spike recorders. The spike recorders will
# later be used to record excitatory and inhibitory spikes.
nodes_ex = nest.Create("iaf_psc_delta", NE)
nodes_in = nest.Create("iaf_psc_delta", NI)
noise = nest.Create("poisson_generator")
espikes = nest.Create("spike_recorder")
ispikes = nest.Create("spike_recorder")
###############################################################################
# Configuration of the spike recorders recording excitatory and inhibitory
# spikes by sending parameter dictionaries to ``set``. Setting the property
# `record_to` to *"ascii"* ensures that the spikes will be recorded to a file,
# whose name starts with the string assigned to the property `label`.
espikes.set(label="brunel-py-ex", record_to="ascii")
ispikes.set(label="brunel-py-in", record_to="ascii")
print("Connecting devices")
###############################################################################
# Definition of a synapse using ``CopyModel``, which expects the model name of
# a pre-defined synapse, the name of the customary synapse and an optional
# parameter dictionary. The parameters defined in the dictionary will be the
# default parameter for the customary synapse. Here we define one synapse for
# the excitatory and one for the inhibitory connections giving the
# previously defined weights and equal delays.
nest.CopyModel("static_synapse", "excitatory",
{"weight": J_ex, "delay": delay})
nest.CopyModel("static_synapse", "inhibitory",
{"weight": J_in, "delay": delay})
###############################################################################
# Connecting the previously defined poisson generator to the excitatory and
# inhibitory neurons using the excitatory synapse. Since the poisson
# generator is connected to all neurons in the population the default rule
# (# ``all_to_all``) of ``Connect`` is used. The synaptic properties are inserted
# via ``syn_spec`` which expects a dictionary when defining multiple variables
# or a string when simply using a pre-defined synapse.
nest.Connect(noise, nodes_ex, syn_spec="excitatory")
nest.Connect(noise, nodes_in, syn_spec="excitatory")
###############################################################################
# Connecting the first ``N_rec`` nodes of the excitatory and inhibitory
# population to the associated spike recorders using excitatory synapses.
# Here the same shortcut for the specification of the synapse as defined
# above is used.
nest.Connect(nodes_ex[:N_rec], espikes, syn_spec="excitatory")
nest.Connect(nodes_in[:N_rec], ispikes, syn_spec="excitatory")
print("Connecting network")
print("Excitatory connections")
###############################################################################
# Connecting the excitatory population to all neurons using the pre-defined
# excitatory synapse. Beforehand, the connection parameter are defined in a
# dictionary. Here we use the connection rule ``fixed_indegree``,
# which requires the definition of the indegree. Since the synapse
# specification is reduced to assigning the pre-defined excitatory synapse it
# suffices to insert a string.
conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE}
nest.Connect(nodes_ex, nodes_ex + nodes_in, conn_params_ex, "excitatory")
print("Inhibitory connections")
###############################################################################
# Connecting the inhibitory population to all neurons using the pre-defined
# inhibitory synapse. The connection parameters as well as the synapse
# parameters are defined analogously to the connection from the excitatory
# population defined above.
conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI}
nest.Connect(nodes_in, nodes_ex + nodes_in, conn_params_in, "inhibitory")
###############################################################################
# Storage of the time point after the buildup of the network in a variable.
endbuild = time.time()
###############################################################################
# Simulation of the network.
print("Simulating")
nest.Simulate(simtime)
###############################################################################
# Storage of the time point after the simulation of the network in a variable.
endsimulate = time.time()
###############################################################################
# Reading out the total number of spikes received from the spike recorder
# connected to the excitatory population and the inhibitory population.
events_ex = espikes.n_events
events_in = ispikes.n_events
###############################################################################
# Calculation of the average firing rate of the excitatory and the inhibitory
# neurons by dividing the total number of recorded spikes by the number of
# neurons recorded from and the simulation time. The multiplication by 1000.0
# converts the unit 1/ms to 1/s=Hz.
rate_ex = events_ex / simtime * 1000.0 / N_rec
rate_in = events_in / simtime * 1000.0 / N_rec
###############################################################################
# Reading out the number of connections established using the excitatory and
# inhibitory synapse model. The numbers are summed up resulting in the total
# number of synapses.
num_synapses = (nest.GetDefaults("excitatory")["num_connections"] +
nest.GetDefaults("inhibitory")["num_connections"])
###############################################################################
# Establishing the time it took to build and simulate the network by taking
# the difference of the pre-defined time variables.
build_time = endbuild - startbuild
sim_time = endsimulate - endbuild
###############################################################################
# Printing the network properties, firing rates and building times.
print("Brunel network simulation (Python)")
print("Number of neurons : {0}".format(N_neurons))
print("Number of synapses: {0}".format(num_synapses))
print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons))
print(" Inhibitory : {0}".format(int(CI * N_neurons)))
print("Excitatory rate : %.2f Hz" % rate_ex)
print("Inhibitory rate : %.2f Hz" % rate_in)
print("Building time : %.2f s" % build_time)
print("Simulation time : %.2f s" % sim_time)
###############################################################################
# Plot a raster of the excitatory neurons and a histogram.
nest.raster_plot.from_device(espikes, hist=True)
plt.show()
| gpl-2.0 |
marl/jams | docs/conf.py | 1 | 9902 | # -*- coding: utf-8 -*-
#
# jams documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 8 10:34:40 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'numpydoc'
]
import glob
autosummary_generate = glob.glob('*.rst')
numpydoc_show_class_members = False
intersphinx_mapping = {'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'np': ('https://docs.scipy.org/doc/numpy/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'pd': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'mir_eval': ('https://craffel.github.io/mir_eval/', None),
'json': ('https://docs.python.org/2/', None),
'jsonschema': ('https://python-jsonschema.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
exclude_trees = ['_templates', '_build']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'jams'
copyright = u'2015, JAMS development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import imp
jams_version = imp.load_source('jams.version', '../jams/version.py')
version = jams_version.short_version
# The full version, including alpha/beta/rc tags.
release = jams_version.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'autolink'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Mock
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = (['jsonschema', 'mir_eval', 'pandas', 'numpy',
'mir_eval.sonify', 'mir_eval.util', 'mir_eval.display',
'decorator',
'matplotlib', 'matplotlib.pyplot', 'matplotlib.offsetbox',
'sortedcontainers'])
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
import sphinx_rtd_theme
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
# MOCK_MODULES = ['numpy', 'pandas']
# sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
else:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'jamsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'jams.tex', u'jams Documentation',
u'JAMS development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jams', u'jams Documentation',
[u'JAMS development team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'jams', u'jams Documentation', u'JAMS development team', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| isc |
mpi2/PhenotypeData | external_tools/src/main/python/images/qc_apply_all_sites_model.py | 1 | 9124 | # coding: utf-8
""" Application of all sites QC mode
We apply the model built from data from all sites. This has to be
run by submission to the LSF queue on the cluster.
This script was generated from an ipython notebook of the same name
then modified into its current form
"""
import sys
import os
import re
import argparse
import numpy as np
import torch
import torchvision
from torchvision import datasets, models, transforms
import torch.nn as nn
import pandas as pd
# Import helper functions
import qc_helper as helper
# Parameters for this run
parser = argparse.ArgumentParser(
description = "Apply quality control model to X-ray images"
)
parser.add_argument(
'--site-name', dest='site_name', required=True,
help='Abbreviated name of site as in the directory in images/clean'
)
parser.add_argument(
'--parameter-stable-id', dest='parameter_stable_id', required=True,
help='Parameter stable ID as specified in IMPRESS'
)
parser.add_argument(
'-d', '--base-dir', dest='dir_base', default="/nfs/komp2/web/images/clean/impc/",
help='Base directory for location of images'
)
parser.add_argument(
'-p', '--print-every', dest='print_every', default=-1, type=int,
help='Number of iterations before printing prediction stats note that this also saves the predictions up to this point which is useful incase the program crashes. Use -1 to prevent printing anything.'
)
parser.add_argument(
'-o', '--output-dir', dest='output_dir', default="/nfs/nobackup/spot/machine_learning/impc_mouse_xrays/quality_control_all_sites/images_to_classify/",
help='Directory to read and write files associated with prediction'
)
parser.add_argument(
'-m', '--model-path', dest='model_path', required=True,
help="Path to json file describing model to use for predictions. Must be in same dir as model and have same name as model - only with .json ext whilst model has .pt extension"
)
parser.add_argument(
'--output-filename', dest='output_filename',
help="Name to use for output file base. If not specified the site name a nd parameter stable ID are concatenated"
)
args = parser.parse_args()
print_every = args.print_every
site_name = args.site_name;
parameter_stable_id = args.parameter_stable_id
dir_base = args.dir_base
# File containing description of the model and all inputs needed
model_info, label_map, files_to_process = helper.parse_model_desc(args.model_path)
classes = list(label_map.keys())
n_classes = len(classes)
# Set model version
try:
model_version = model_info['model_version']
except KeyError as e:
print(f'Key {str(e)} not present - could not get model_version. Exiting')
sys.exit(-1)
if model_version != 1 and model_version != 2:
print(f"Model version must be 1 or 2 (value read = {model_version}) - Exiting")
sys.exit(-1)
to_process = os.path.join(args.output_dir,site_name+"_"+parameter_stable_id+".txt")
# Create the output file names using argument if supplied or from site
# name and parameter stable ID if --output-filename parameter not supplied
if args.output_filename is None:
output_filename = site_name+"_"+parameter_stable_id
else:
output_filename = args.output_filename
processed_output_path = os.path.join(args.output_dir,output_filename+"_processed.csv")
mis_classified_output_path = os.path.join(args.output_dir,output_filename+"_misclassified.csv")
unable_to_read_output_path = os.path.join(args.output_dir,output_filename+"_unable_to_read.csv")
# Dict to map parameter_stable_ids to expected_class
#parameter_to_class_map = {
# 'IMPC_XRY_051_001' : 1,
# 'IMPC_XRY_049_001' : 2,
# 'IMPC_XRY_034_001' : 3,
# 'IMPC_XRY_048_001' : 4,
# 'IMPC_XRY_050_001' : 5,
# 'IMPC_XRY_052_001' : 6,
#}
# Because of inclusion of LA need to make mapping more general
parameter_to_class_map = {
'_XRY_051_001' : 1,
'_XRY_049_001' : 2,
'_XRY_034_001' : 3,
'_XRY_048_001' : 4,
'_XRY_050_001' : 5,
'_XRY_052_001' : 6,
}
regex = re.compile('(_XRY_0[0-9]{2}_001)')
parameter_id_stem = regex.findall(parameter_stable_id)[0]
expected_class = parameter_to_class_map[parameter_id_stem]
# In[3]:
# check if CUDA is available
use_cuda = torch.cuda.is_available()
if not use_cuda:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
# Read in metadata
imdetails = pd.read_csv(to_process)
n_images = len(imdetails)
print(f"Number of images available: {n_images}")
# Create transforms and dataset
im_size = 224
data_transform = transforms.Compose([ transforms.Lambda(lambda im: helper.crop_to_square(im)),
transforms.Resize(im_size),
transforms.ToTensor(),
transforms.Normalize((0.48527132, 0.46777139, 0.39808026), (0.26461128, 0.25852081, 0.26486896))])
dataset = helper.ImageDataset(imdetails, path_column="imagename",
root_dir=None, transform=data_transform)
# define dataloader parameters
batch_size = 10
num_workers=0
### Transforming the Data
#
# When we perform transfer learning, we have to shape our input data into the shape that the pre-trained model expects. VGG16 expects `224`-dim square images as input and so, we resize each flower image to fit this mold.
# ## Define the Model
#
# To define a model for training we'll follow these steps:
# 1. Load in a pre-trained VGG16 model
# 2. "Freeze" all the parameters, so the net acts as a fixed feature extractor
# 3. Remove the last layer
# 4. Replace the last layer with a linear classifier of our own
#
# **Freezing simply means that the parameters in the pre-trained model will *not* change during training.**
# In[9]:
# Load the pretrained model from pytorch
model_transfer = models.vgg16(pretrained=True)
# Freeze training for all "features" layers
for param in model_transfer.features.parameters():
param.require_grad = False
# Replace last layer for our use case
num_features = model_transfer.classifier[6].in_features
features = list(model_transfer.classifier.children())[:-1]
features.extend([nn.Linear(num_features, n_classes)])
if model_version == 2:
features.extend([nn.Softmax(dim=1)])
model_transfer.classifier = nn.Sequential(*features)
# Load our learnt weights
model_path = os.path.join(model_info['model_dir'],model_info['model_fname'])
if use_cuda:
model_transfer = model_transfer.cuda()
model_transfer.load_state_dict(torch.load(model_path))
else:
model_transfer.load_state_dict(torch.load(model_path, map_location='cpu'))
print("Configured model from: " + model_path)
# Apply the model to qc images
n_images = len(dataset)
predictions = np.ones([n_images,],np.byte) * -1
class_scores = np.zeros([n_images,], np.float)
mis_classifieds = []
unable_to_read = []
for i in range(n_images):
try:
image, imname = dataset[i]
if use_cuda:
image = image.cuda()
output = model_transfer(image.unsqueeze(0))
output =np.squeeze(output.data.cpu().numpy())
index = np.argwhere(output == output.max())[0]
#predictions[i] = index+1
predictions[i] = classes[index[0]]
class_scores[i] = output[index]
if predictions[i] != expected_class:
mis_classifieds.append((i,imdetails['imagename'][i],predictions[i], class_scores[i]))
if print_every > 0 and i%print_every == 0:
print(f"Iteration {i}")
print("Number of misclassifieds: {0}".format(len(mis_classifieds)))
# Also save predictions in case job crashes
processed_output_path_temp = "{0}_{1:05d}".format(processed_output_path,i)
imdetails['classlabel'] = predictions
imdetails['classscore'] = class_scores
imdetails.to_csv(processed_output_path_temp, index=False)
except Exception as e:
print("An error occured")
print(e)
unable_to_read.append(i)
# Save the new dataframe
imdetails['classlabel'] = predictions
imdetails['classscore'] = class_scores
imdetails = imdetails.astype({'classscore': 'float64'})
imdetails.to_csv(processed_output_path, index=False, float_format='%.2f')
print("Saved processed images to " + processed_output_path)
# Save misclassifieds
if len(mis_classifieds) > 0:
mis_classifieds_df = pd.DataFrame(columns=('index','imagepath','expected','predicted','classscore'))
for i, (index, im_path, predicted, class_score) in enumerate(mis_classifieds):
mis_classifieds_df.loc[i] = [index, im_path, expected_class, predicted, class_score]
mis_classifieds_df.to_csv(mis_classified_output_path, index=False)
print("Saved misclassified images to " + mis_classified_output_path)
if len(unable_to_read) > 0:
unable_to_read_df = pd.DataFrame(columns=('index','imagepath',))
for i, ind in enumerate(unable_to_read):
unable_to_read_df.loc[i] = [ind, imdetails['imagename'][ind]]
unable_to_read_df.to_csv(unable_to_read_output_path, index=False)
print("Saved unable_to_read to " + unable_to_read_output_path)
| apache-2.0 |
kthyng/tracpy | tests/test_grid.py | 1 | 5743 | #!/usr/bin/env python
"""
Test projection and grid routines. Generally test all available pyproj
projection presets but not basemap since they are slower.
"""
import tracpy
import tracpy.calcs
import os
import numpy as np
import matplotlib.tri as mtri
# List projection setup for use in tests
# pyproj-based setups
projpyproj = ['galveston', 'nwgom-pyproj']
projbasemap = ['nwgom'] # don't usually test with this since slow
def test_proj_init():
"""Test initialization of preset pyproj projections."""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object
proj = tracpy.tools.make_proj(setup=projsetup)
assert proj
def test_grid_init():
"""Test initialization of grid."""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object
proj = tracpy.tools.make_proj(setup=projsetup)
grid_filename = os.path.join('input', 'grid.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=True)
assert grid
def test_proj_variant():
"""Test creating a projection with different than built-in variables."""
pass
def test_proj_iteration():
"""Test projection conversion back and forth between spaces.
Set up a projection, then convert between spaces and check that the
result is close to the starting values.
"""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object. Can use either 'galveston' or 'nwgom-pyproj'
# built in projection setups to test quickly ('nwgom' is for use with
# usebasemap=True and thus is slow for testing).
proj = tracpy.tools.make_proj(setup=projsetup)
grid_filename = os.path.join('input', 'grid.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=True)
# convert back and forth
lon_rho2, lat_rho2 = grid.proj(grid.x_rho, grid.y_rho, inverse=True)
print(grid.lat_rho[0, :])
print(lat_rho2[0, :])
print(grid.lon_rho[0, :])
print(lon_rho2[0, :])
assert np.allclose(grid.lat_rho, lat_rho2)
assert np.allclose(grid.lon_rho, lon_rho2)
def test_grid_triangulation_spherical():
"""Test that the grid triangulations are valid: spherical test cases."""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object
proj = tracpy.tools.make_proj(setup=projsetup)
grid_filename = os.path.join('input', 'grid.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=True)
assert mtri.LinearTriInterpolator(grid.trir, grid.x_rho.flatten())
def test_grid_triangulation_projected():
"""Test that the grid triangulations are valid: projected test cases."""
# loop through projection presets
for projsetup in projpyproj:
# Get projection object
proj = tracpy.tools.make_proj(setup=projsetup)
grid_filename = os.path.join('input', 'gridxy.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=False)
assert mtri.LinearTriInterpolator(grid.trir, grid.x_rho.flatten())
def test_interpolation():
"""Test interpolation with grid space and projected grid the same.
Create a test case with the 'projected' grid in grid space coordinates.
When interpolating between them, there should be a shift because the
rho points in projected space are not in the same setup as grid coords.
"""
# Get projection object
proj = tracpy.tools.make_proj(setup='nwgom-pyproj')
grid_filename = os.path.join('input', 'gridij.nc')
# Read in grid
grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=False)
# Do some interpolating
# projected grid to grid space, delaunay
X, Y, _ = tracpy.tools.interpolate2d(grid.x_rho[2, 3], grid.y_rho[2, 3],
grid, 'd_xy2ij')
# There is a shift between the rho grid and the grid space grid because
# of the staggered layout. Grid space counts from the u/v grid and
# therefore is a little different from the rho grid.
assert np.allclose(X, grid.x_rho[2, 3] + 0.5)
assert np.allclose(Y, grid.y_rho[2, 3] + 0.5)
# grid space to projected coordinates, delaunay
x, y, _ = tracpy.tools.interpolate2d(grid.X[2, 3], grid.Y[2, 3], grid,
'd_ij2xy')
assert np.allclose(x, grid.X[2, 3] - 0.5)
assert np.allclose(y, grid.Y[2, 3] - 0.5)
# grid space to projected coordinates, map_coords
x, y, _ = tracpy.tools.interpolate2d(grid.X[2, 3], grid.Y[2, 3], grid,
'm_ij2xy')
assert np.allclose(x, grid.X[2, 3] - 0.5)
assert np.allclose(y, grid.Y[2, 3] - 0.5)
# def test_interpolation():
# """Test that interpolation methods work.
# Convert back and forth between spaces using multiple tools.interpolation2d
# methods to make sure the values stay close.
# """
# # Get projection object
# proj = tracpy.tools.make_proj(setup='nwgom-pyproj')
# grid_filename = os.path.join('input', 'grid.nc')
# # Read in grid
# grid = tracpy.inout.readgrid(grid_filename, proj, usespherical=True)
# # Do some interpolating
# X_rho, Y_rho, _ = tracpy.tools.interpolate2d(grid.lon_rho[2, 3],
# grid.lat_rho[2,3], grid, 'd_ll2ij')
# print X_rho
# print grid.X[2,3]
# assert np.allclose(X_rho, grid.X[2,3]-0.5)
def test_verts():
"""Test properties of vertices."""
pass
| mit |
jni/spectral-graphs | scripts/spectral-graph-display.py | 2 | 1141 | import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
| mit |
mdaddysman/Insulin-Tracking | MisPartAnlysis.py | 1 | 9449 | import numpy as np
import matplotlib.pyplot as plt
import CalcMSD as MSD
def split(data,cutoff,wanted = [1,1,1,1],showloss = True):
#wanted: 0: List of Trajectories as lists of particle positions; 1:trajectory length info, minimum, maximum, average;
# 2: List of Frame numbers for each particle in each trajectory; 3:min/max x and y position throughout all frames
for i in xrange(4-len(wanted)):
wanted.append(0)
Traj = []
ntraj = int(max(data[:,3]))
mintrajlen = 100
maxtrajlen = 0
ave = 0
count = 0
Len = []
Frame = []
minx, miny = 0 , 0
maxx, maxy = 0, 0
for i in range(ntraj):
ind = np.where(data[:,3] == i+1)
traj = data[ind,0:2][0]
Len.append(len(traj))
if(len(traj)<=cutoff): count = count+1
if(len(traj)>cutoff):
if(wanted[0] == 1 or wanted[1] == 1):
Traj.append(traj)
if(wanted[2] == 1):
Frame.append(data[ind,2][0])
if(wanted[1] == 1):
mintrajlen = min(mintrajlen,len(traj))
maxtrajlen = max(maxtrajlen,len(traj))
ave = ave + len(traj)
if(wanted[3] == 1):
mintraj = np.min(traj,axis=0)
if(minx > mintraj[0]): minx = mintraj[0]
if(miny > mintraj[1]): miny = mintraj[1]
maxtraj = np.max(traj,axis=0)
if(maxx < maxtraj[0]): maxx = maxtraj[0]
if(maxy < maxtraj[1]): maxy = maxtraj[1]
if(showloss): print( count/ntraj,'lost')
output = []
if(wanted[0] == 1): output.append(Traj)
if(wanted[1] == 1):
L = len(Traj)
ave = ave/L
ave = mintrajlen
output = output + [mintrajlen,maxtrajlen,ave]
if(wanted[2] == 1): output.append(Frame)
if(wanted[3] == 1): output = output + [[minx,maxx],[miny,maxy]]
return output
def Dense(data): #returns number of particles per frame
frames = data[:,2]
Npart = []
for i in range(int(max(frames))):
Npart.append(len(np.where(frames == i)[0]))
return Npart
def Crowd(Frame,Npart,Traj): #returns average number of particles of trajectory
Npart = np.array(Npart)
crowd = []
for i in range(len(Traj)):
crowd.append(np.ave(Frame.pop()))
return crowd
def vor_density(Loc): #density given by voronoi tesselations
from scipy.spatial import Voronoi, voronoi_plot_2d
vor = Voronoi(Loc)
edges = np.array(vor.regions)[vor.point_region]
vert = vor.vertices
Dens = []
ind_reg = -1
for edg in edges:
if(-1 in edg):
Dens.append(-1)
elif(len(edg) > 1):
ind_reg = ind_reg+1
pt = []
if(-1 in edg):
edg.remove(-1)
l = len(edg)
for cnt in xrange(l):
v = edg[cnt]
pt.append(vert[v])
pt.sort(key=lambda pos: pos[0])
pt = np.array(pt)
y = np.array(pt[:,1])
x = np.array(pt[:,0])
m = (y[-1]-y[0])/(x[-1]-x[0])
b = y[0]-m*x[0]
upind = np.where(y+0.0001>=x*m+b)[0]
downind = np.where(y-0.0001<= x*m+b)[0]
x2 = x[upind]
y2 = y[upind]
y3 = x2*m+b
area = np.sum((x2[1:]-x2[:-1])*((y2[:-1]-y3[:-1])-(y2[:-1]-y2[1:])/2-(y3[1:]-y3[:-1])/2))
x2 = x[downind]
y2 = y[downind]
y3 = x2*m+b
area = area+np.sum((x2[:-1]-x2[1:])*((y2[:-1]-y3[:-1])-(y2[:-1]-y2[1:])/2-(y3[1:]-y3[:-1])/2))
Dens.append(1/area)
else: Dens.append(0)
return Dens
def find_poly(Loc): #identify polygon that encloses datapoints, Loc=array([[x_1,y_1],[x_2,y_2]...])
def slope_int(p1,p2):
if(p1[0] == p2[0]):
p = p1[0]+0.0000001
m = (p1[1]-p2[1])/(p-p2[0])
b = p1[1]-m*p
print "warning: p1=p2"
else:
m = (p1[1]-p2[1])/(p1[0]-p2[0])
b = p1[1]-m*p1[0]
return [m,b]
ind1 = np.argmax(Loc[:,0])
ind2 = np.argmax(Loc[:,1])
ind3 = np.argmin(Loc[:,0])
ind4 = np.argmin(Loc[:,1])
topx = Loc[ind2,0]
botx = Loc[ind4,0]
Upedge = [slope_int(Loc[ind1],Loc[ind2]),slope_int(Loc[ind3],Loc[ind2])]
Uppoint = [Loc[ind1],Loc[ind2],Loc[ind3]]
Downedge = [slope_int(list(Loc[ind1]),Loc[ind4]),slope_int(Loc[ind3],Loc[ind4])]
Downpoint = [Loc[ind1],Loc[ind4],Loc[ind3]]
if(ind2 == ind3):
Upedge.pop(1)
Uppoint.pop(1)
if(ind1 == ind2):
Upedge.pop(0)
Uppoint.pop(0)
if(ind3 == ind4):
Downedge.pop(1)
Downpoint.pop(1)
if(ind1 == ind4):
Downedge.pop(0)
Downpoint.pop(0)
X = Loc[:,0]
Y = Loc[:,1]
lenedg = len(Upedge)
i = 0
while(i<lenedg):
edg = Upedge[i]
ind = np.argmax(Y - X*edg[0]+edg[1])
x = X[ind]
y = Y[ind]
pt0,pt1 = Uppoint[i],Uppoint[i+1]
x0, x1 = x,x
if(pt1[0] == x):
if(x<topx):
pt1[0] = pt1[0]-0.00001
else:
x1 = x1+0.00001
if(pt0[0] == x):
if(x<topx): x0=x0-0.00001
else: pt0[0] = pt0[0]+0.00001
if(y-0.001 > x*edg[0]+edg[1]):
Upedge[i] = slope_int([x1,y],pt1)
Uppoint.insert(i+1,[x,y])
Upedge.insert(i,slope_int(pt0,[x0,y]))
lenedg = lenedg+1
i = i-1
i = i+1
lenedg = len(Downedge)
i = 0
while(i<lenedg):
edg = Downedge[i]
ind = np.argmax(X*edg[0]+edg[1]-Y)
x = X[ind]
y = Y[ind]
pt0 = list(Downpoint[i])
pt1 = list(Downpoint[i+1])
x_1,x_0 = x, x
ind = 0
if(pt0[0] == x):
if(pt0[1] == y): ind = 1
if(x<botx):
x_0 = x-0.00001
else:
pt0[0]=x+0.00001
if(pt1[0] == x):
if(pt1[1] == y): ind = 1
if(x<botx):
pt1[0] = x-0.0001
else:
x_1 = x+0.0001
if(x*edg[0]+edg[1] > y-0.0001):
if(ind == 0):
Downpoint.insert(i+1,[x,y])
Downedge[i] = slope_int([x_1,y],pt1)
Downedge.insert(i,slope_int(pt0,[x_0,y]))
lenedg = lenedg+1
i = i-1
i = i+1
return [[Upedge,Downedge],[Uppoint,Downpoint]]
def dist_from_edge(Loc,Upedge,Downedge): #identify shortest distance from point array to polygon
X = Loc[:,0]
Y = Loc[:,1]
Dist = []
for edg in Upedge:
[m,b] = edg
if(m == 0):
Dist.append((Y-b)**2)
else:
Dist.append(np.linalg.norm([(Y-m*X+b)/2/m, (m*X-Y+3*b)/2],axis=0))
for edg in Downedge:
[m,b] = edg
if(m == 0):
Dist.append((Y-b)**2)
else:
Dist.append(np.linalg.norm([(Y-m*X+b)/2/m, (m*X-Y+3*b)/2],axis=0))
return np.min(Dist,axis=0)
def find_all_poly(data,t=0): #data = array([[x_1,y_1,t_1,...]...])
#identify polygon encompassing datapoints in each frame and the t frames before/after it. Takes awhile
maxframes = int(max(data[:,2]))
Output = []
for i in xrange(maxframes):
if(i%20 == 0): print i
ind = []
for j in range(-t,t+1):
ind = ind+list(np.where(data[:,2] == i+j)[0])
if(ind != []):
Output.append(find_poly(data[ind,:2]))
return Output
def find_all_dist(data,t=0):#data = array([[x_1,y_1,t_1,...]...])
#Identify shortest distance of each datapoint from the Polygon for the frame its in
maxframes = int(max(data[:,2]))
Output = data[:,1]
Poly = find_all_poly(data,t)
for i in xrange(maxframes):
ind = np.where(data[:,2] == i)[0]
if(ind != []):
[Upedg,Dnedg] = Poly[i][0]
Output[ind] = dist_from_edge(data[:,:2],Upedg,Dnedg)
return Output
def showPoly(Uppoint,Downpoint,Loc,specialedge=[],specialpt = []):
Uppoint = np.array(Uppoint)
plt.plot(Uppoint[:,0],Uppoint[:,1],'k')
Downpoint = np.array(Downpoint)
plt.plot(Downpoint[:,0],Downpoint[:,1],'k')
for edg in specialedge:
edg = np.array(edg)
plt.plot(edg[:,0],edg[:,1],'r')
plt.plot(Loc[:,0],Loc[:,1],'bo')
for pt in specialpt:
plt.plot(pt[0],pt[1],'ro')
plt.show()
def showPoly2(Uppoint,Downpoint,Loc,maxx,minx,specialedge=[],specialpt = []):
x=np.arange(21)*(maxx-minx)/20+minx
for edg in Uppoint:
edg = np.array(edg)
plt.plot(edg[:,0],edg[:,1],'k')
for edg in Downpoint:
edg = np.array(edg)
plt.plot(edg[:,0],edg[:,1],'k')
for edg in specialedge:
plt.fill_between(x,edg[0]*x+edg[1])
print edg[0]
plt.plot(Loc[:,0],Loc[:,1],'bo')
for pt in specialpt:
plt.plot(pt[0],pt[1],'ro')
plt.ylim(0,500)
plt.show()
if(__name__ == '__main__'): #For testing functions
# Loc = np.transpose([30*np.random.random(30),20*np.random.random(30)])
f = 'Bot_large.txt' #scrum data
data_l = np.genfromtxt(f,delimiter = ',')
f = 'Bot_small.txt' #particle data
data_s = np.genfromtxt(f,delimiter = ',')
#generating necesary info from data
maxframe = int(np.max(data_s[:,2]))
# [Traj_s,mintrajlen,maxtrajlen,ave,Frame_s,[minx,maxx],[miny,maxy]] = split(data_s,5)
# [Traj_l,Frame_l] = split(data_l,5,[1,0,1,0])
data = np.concatenate([data_s,data_l])
## Celledg = find_all_poly(data,3)
loc = data[:50,:2]
print vor_density(loc)
| gpl-3.0 |
dennisss/sympy | sympy/interactive/printing.py | 22 | 15069 | """Tools for setting up printing in interactive sessions. """
from __future__ import print_function, division
from io import BytesIO
from sympy import latex as default_latex
from sympy import preview
from sympy.core.compatibility import integer_types
from sympy.utilities.misc import debug
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import sys
from sympy.core.compatibility import builtins
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
builtins._ = None
print(stringify_func(arg))
builtins._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func, use_latex, euler, forecolor,
backcolor, fontsize, latex_mode, print_builtin,
latex_printer):
"""Setup printing in IPython interactive session. """
try:
from IPython.lib.latextools import latex_to_png
except ImportError:
pass
preamble = "\\documentclass[%s]{article}\n" \
"\\pagestyle{empty}\n" \
"\\usepackage{amsmath,amsfonts}%s\\begin{document}"
if euler:
addpackages = '\\usepackage{euler}'
else:
addpackages = ''
preamble = preamble % (fontsize, addpackages)
imagesize = 'tight'
offset = "0cm,0cm"
resolution = 150
dvi = r"-T %s -D %d -bg %s -fg %s -O %s" % (
imagesize, resolution, backcolor, forecolor, offset)
dvioptions = dvi.split()
debug("init_printing: DVIOPTIONS:", dvioptions)
debug("init_printing: PREAMBLE:", preamble)
latex = latex_printer or default_latex
def _print_plain(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
if _can_print_latex(arg):
p.text(stringify_func(arg))
else:
p.text(IPython.lib.pretty.pretty(arg))
def _preview_wrapper(o):
exprbuffer = BytesIO()
try:
preview(o, output='png', viewer='BytesIO',
outputbuffer=exprbuffer, preamble=preamble,
dvioptions=dvioptions)
except Exception as e:
# IPython swallows exceptions
debug("png printing:", "_preview_wrapper exception raised:",
repr(e))
raise
return exprbuffer.getvalue()
def _matplotlib_wrapper(o):
# mathtext does not understand certain latex flags, so we try to
# replace them with suitable subs
o = o.replace(r'\operatorname', '')
o = o.replace(r'\overline', r'\bar')
return latex_to_png(o)
def _can_print_latex(o):
"""Return True if type o can be printed with LaTeX.
If o is a container type, this is True if and only if every element of
o can be printed with LaTeX.
"""
from sympy import Basic
from sympy.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
if isinstance(o, (list, tuple, set, frozenset)):
return all(_can_print_latex(i) for i in o)
elif isinstance(o, dict):
return all(_can_print_latex(i) and _can_print_latex(o[i]) for i in o)
elif isinstance(o, bool):
return False
# TODO : Investigate if "elif hasattr(o, '_latex')" is more useful
# to use here, than these explicit imports.
elif isinstance(o, (Basic, MatrixBase, Vector, Dyadic)):
return True
elif isinstance(o, (float, integer_types)) and print_builtin:
return True
return False
def _print_latex_png(o):
"""
A function that returns a png rendered by an external latex
distribution, falling back to matplotlib rendering
"""
if _can_print_latex(o):
s = latex(o, mode=latex_mode)
try:
return _preview_wrapper(s)
except RuntimeError:
if latex_mode != 'inline':
s = latex(o, mode='inline')
return _matplotlib_wrapper(s)
def _print_latex_matplotlib(o):
"""
A function that returns a png rendered by mathtext
"""
if _can_print_latex(o):
s = latex(o, mode='inline')
try:
return _matplotlib_wrapper(s)
except Exception:
# Matplotlib.mathtext cannot render some things (like
# matrices)
return None
def _print_latex_text(o):
"""
A function to generate the latex representation of sympy expressions.
"""
if _can_print_latex(o):
s = latex(o, mode='plain')
s = s.replace(r'\dag', r'\dagger')
s = s.strip('$')
return '$$%s$$' % s
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print(out)
else:
print(repr(arg))
import IPython
if IPython.__version__ >= '0.11':
from sympy.core.basic import Basic
from sympy.matrices.matrices import MatrixBase
from sympy.physics.vector import Vector, Dyadic
printable_types = [Basic, MatrixBase, float, tuple, list, set,
frozenset, dict, Vector, Dyadic] + list(integer_types)
plaintext_formatter = ip.display_formatter.formatters['text/plain']
for cls in printable_types:
plaintext_formatter.for_type(cls, _print_plain)
png_formatter = ip.display_formatter.formatters['image/png']
if use_latex in (True, 'png'):
debug("init_printing: using png formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_png)
elif use_latex == 'matplotlib':
debug("init_printing: using matplotlib formatter")
for cls in printable_types:
png_formatter.for_type(cls, _print_latex_matplotlib)
else:
debug("init_printing: not using any png formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#png_formatter.for_type(cls, None)
if cls in png_formatter.type_printers:
png_formatter.type_printers.pop(cls)
latex_formatter = ip.display_formatter.formatters['text/latex']
if use_latex in (True, 'mathjax'):
debug("init_printing: using mathjax formatter")
for cls in printable_types:
latex_formatter.for_type(cls, _print_latex_text)
else:
debug("init_printing: not using text/latex formatter")
for cls in printable_types:
# Better way to set this, but currently does not work in IPython
#latex_formatter.for_type(cls, None)
if cls in latex_formatter.type_printers:
latex_formatter.type_printers.pop(cls)
else:
ip.set_hook('result_display', _result_display)
def init_printing(pretty_print=True, order=None, use_unicode=None,
use_latex=None, wrap_line=None, num_columns=None,
no_global=False, ip=None, euler=False, forecolor='Black',
backcolor='Transparent', fontsize='10pt',
latex_mode='equation*', print_builtin=True,
str_printer=None, pretty_printer=None,
latex_printer=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify or the provided pretty
printer; if False, use sstrrepr to stringify or the provided string
printer.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
use_latex: string, boolean, or None
If True, use default latex rendering in GUI interfaces (png and
mathjax);
if False, do not use latex rendering;
if 'png', enable latex rendering with an external latex compiler,
falling back to matplotlib if external compilation fails;
if 'matplotlib', enable latex rendering with matplotlib;
if 'mathjax', enable latex text generation, for example MathJax
rendering in IPython notebook or text rendering in LaTeX documents
wrap_line: boolean
If True, lines will wrap at the end; if False, they will not wrap
but continue as one line. This is only relevant if `pretty_print` is
True.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns; if
None, number of columns before wrapping is set to terminal width.
This is only relevant if `pretty_print` is True.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
euler: boolean, optional, default=False
Loads the euler package in the LaTeX preamble for handwritten style
fonts (http://www.ctan.org/pkg/euler).
forecolor: string, optional, default='Black'
DVI setting for foreground color.
backcolor: string, optional, default='Transparent'
DVI setting for background color.
fontsize: string, optional, default='10pt'
A font size to pass to the LaTeX documentclass function in the
preamble.
latex_mode: string, optional, default='equation*'
The mode used in the LaTeX printer. Can be one of:
{'inline'|'plain'|'equation'|'equation*'}.
print_builtin: boolean, optional, default=True
If true then floats and integers will be printed. If false the
printer will only print SymPy types.
str_printer: function, optional, default=None
A custom string printer function. This should mimic
sympy.printing.sstrrepr().
pretty_printer: function, optional, default=None
A custom pretty printer. This should mimic sympy.printing.pretty().
latex_printer: function, optional, default=None
A custom LaTeX printer. This should mimic sympy.printing.latex()
This should mimic sympy.printing.latex().
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
import sys
from sympy.printing.printer import Printer
if pretty_print:
if pretty_printer is not None:
stringify_func = pretty_printer
else:
from sympy.printing import pretty as stringify_func
else:
if str_printer is not None:
stringify_func = str_printer
else:
from sympy.printing import sstrrepr as stringify_func
# Even if ip is not passed, double check that not in IPython shell
if ip is None:
try:
ip = get_ipython()
except NameError:
pass
if ip and ip.__module__.startswith('IPython') and pretty_print:
try:
import IPython
# IPython 1.0 deprecates the frontend module, so we import directly
# from the terminal module to prevent a deprecation message from being
# shown.
if IPython.__version__ >= '1.0':
from IPython.terminal.interactiveshell import TerminalInteractiveShell
else:
from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
from code import InteractiveConsole
except ImportError:
pass
else:
# This will be True if we are in the qtconsole or notebook
if not isinstance(ip, (InteractiveConsole, TerminalInteractiveShell)) \
and 'ipython-console' not in ''.join(sys.argv):
if use_unicode is None:
debug("init_printing: Setting use_unicode to True")
use_unicode = True
if use_latex is None:
debug("init_printing: Setting use_latex to True")
use_latex = True
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode,
wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: \
_stringify_func(expr, order=order,
use_unicode=use_unicode,
wrap_line=wrap_line,
num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if ip is not None and ip.__module__.startswith('IPython'):
_init_ipython_printing(ip, stringify_func, use_latex, euler,
forecolor, backcolor, fontsize, latex_mode,
print_builtin, latex_printer)
else:
_init_python_printing(stringify_func)
| bsd-3-clause |
aajtodd/zipline | zipline/modelling/engine.py | 7 | 15401 | """
Compute Engine for FFC API
"""
from abc import (
ABCMeta,
abstractmethod,
)
from operator import and_
from six import (
iteritems,
itervalues,
with_metaclass,
)
from six.moves import (
reduce,
zip_longest,
)
from numpy import (
add,
empty_like,
)
from pandas import (
DataFrame,
date_range,
MultiIndex,
)
from zipline.lib.adjusted_array import ensure_ndarray
from zipline.errors import NoFurtherDataError
from zipline.modelling.classifier import Classifier
from zipline.modelling.factor import Factor
from zipline.modelling.filter import Filter
from zipline.modelling.graph import TermGraph
class FFCEngine(with_metaclass(ABCMeta)):
@abstractmethod
def factor_matrix(self, terms, start_date, end_date):
"""
Compute values for `terms` between `start_date` and `end_date`.
Returns a DataFrame with a MultiIndex of (date, asset) pairs on the
index. On each date, we return a row for each asset that passed all
instances of `Filter` in `terms, and the columns of the returned frame
will be the keys in `terms` whose values are instances of `Factor`.
Parameters
----------
terms : dict
Map from str -> zipline.modelling.term.Term.
start_date : datetime
The first date of the matrix.
end_date : datetime
The last date of the matrix.
Returns
-------
matrix : pd.DataFrame
A matrix of factors
"""
raise NotImplementedError("factor_matrix")
class NoOpFFCEngine(FFCEngine):
"""
FFCEngine that doesn't do anything.
"""
def factor_matrix(self, terms, start_date, end_date):
return DataFrame(
index=MultiIndex.from_product(
[date_range(start=start_date, end=end_date, freq='D'), ()],
),
columns=sorted(terms.keys())
)
class SimpleFFCEngine(object):
"""
FFC Engine class that computes each term independently.
Parameters
----------
loader : FFCLoader
A loader to use to retrieve raw data for atomic terms.
calendar : DatetimeIndex
Array of dates to consider as trading days when computing a range
between a fixed start and end.
asset_finder : zipline.assets.AssetFinder
An AssetFinder instance. We depend on the AssetFinder to determine
which assets are in the top-level universe at any point in time.
"""
__slots__ = [
'_loader',
'_calendar',
'_finder',
'__weakref__',
]
def __init__(self, loader, calendar, asset_finder):
self._loader = loader
self._calendar = calendar
self._finder = asset_finder
def factor_matrix(self, terms, start_date, end_date):
"""
Compute a factor matrix.
Parameters
----------
terms : dict[str -> zipline.modelling.term.Term]
Dict mapping term names to instances. The supplied names are used
as column names in our output frame.
start_date : pd.Timestamp
Start date of the computed matrix.
end_date : pd.Timestamp
End date of the computed matrix.
The algorithm implemented here can be broken down into the following
stages:
0. Build a dependency graph of all terms in `terms`. Topologically
sort the graph to determine an order in which we can compute the terms.
1. Ask our AssetFinder for a "lifetimes matrix", which should contain,
for each date between start_date and end_date, a boolean value for each
known asset indicating whether the asset existed on that date.
2. Compute each term in the dependency order determined in (0), caching
the results in a a dictionary to that they can be fed into future
terms.
3. For each date, determine the number of assets passing **all**
filters. The sum, N, of all these values is the total number of rows in
our output frame, so we pre-allocate an output array of length N for
each factor in `terms`.
4. Fill in the arrays allocated in (3) by copying computed values from
our output cache into the corresponding rows.
5. Stick the values computed in (4) into a DataFrame and return it.
Step 0 is performed by `zipline.modelling.graph.TermGraph`.
Step 1 is performed in `self.build_lifetimes_matrix`.
Step 2 is performed in `self.compute_chunk`.
Steps 3, 4, and 5 are performed in self._format_factor_matrix.
See Also
--------
FFCEngine.factor_matrix
"""
if end_date <= start_date:
raise ValueError(
"start_date must be before end_date \n"
"start_date=%s, end_date=%s" % (start_date, end_date)
)
graph = TermGraph(terms)
max_extra_rows = graph.max_extra_rows
lifetimes = self.build_lifetimes_matrix(
start_date,
end_date,
max_extra_rows,
)
raw_outputs = self.compute_chunk(graph, lifetimes, {})
lifetimes_between_dates = lifetimes[max_extra_rows:]
dates = lifetimes_between_dates.index.values
assets = lifetimes_between_dates.columns.values
# We only need filters and factors to compute the final output matrix.
filters, factors = {}, {}
for name, term in iteritems(terms):
if isinstance(term, Filter):
filters[name] = raw_outputs[name]
elif isinstance(term, Factor):
factors[name] = raw_outputs[name]
elif isinstance(term, Classifier):
continue
else:
raise ValueError("Unknown term type: %s" % term)
# Treat base_mask as an implicit filter.
# TODO: Is there a clean way to make this actually just be a filter?
filters['base'] = lifetimes_between_dates.values
return self._format_factor_matrix(dates, assets, filters, factors)
def build_lifetimes_matrix(self, start_date, end_date, extra_rows):
"""
Compute a lifetimes matrix from our AssetFinder, then drop columns that
didn't exist at all during the query dates.
Parameters
----------
start_date : pd.Timestamp
Base start date for the matrix.
end_date : pd.Timestamp
End date for the matrix.
extra_rows : int
Number of rows prior to `start_date` to include.
Extra rows are needed by terms like moving averages that require a
trailing window of data to compute.
Returns
-------
lifetimes : pd.DataFrame
Frame of dtype `bool` containing dates from `extra_rows` days
before `start_date`, continuing through to `end_date`. The
returned frame contains as columns all assets in our AssetFinder
that existed for at least one day between `start_date` and
`end_date`.
"""
calendar = self._calendar
finder = self._finder
start_idx, end_idx = self._calendar.slice_locs(start_date, end_date)
if start_idx < extra_rows:
raise NoFurtherDataError(
msg="Insufficient data to compute FFC Matrix: "
"start date was %s, "
"earliest known date was %s, "
"and %d extra rows were requested." % (
start_date, calendar[0], extra_rows,
),
)
# Build lifetimes matrix reaching back to `extra_rows` days before
# `start_date.`
lifetimes = finder.lifetimes(
calendar[start_idx - extra_rows:end_idx]
)
assert lifetimes.index[extra_rows] == start_date
assert lifetimes.index[-1] == end_date
if not lifetimes.columns.unique:
columns = lifetimes.columns
duplicated = columns[columns.duplicated()].unique()
raise AssertionError("Duplicated sids: %d" % duplicated)
# Filter out columns that didn't exist between the requested start and
# end dates.
existed = lifetimes.iloc[extra_rows:].any()
return lifetimes.loc[:, existed]
def _inputs_for_term(self, term, workspace, extra_rows):
"""
Compute inputs for the given term.
This is mostly complicated by the fact that for each input we store
as many rows as will be necessary to serve any term requiring that
input. Thus if Factor A needs 5 extra rows of price, and Factor B
needs 3 extra rows of price, we need to remove 2 leading rows from our
stored prices before passing them to Factor B.
"""
term_extra_rows = term.extra_input_rows
if term.windowed:
return [
workspace[input_].traverse(
term.window_length,
offset=extra_rows[input_] - term_extra_rows
)
for input_ in term.inputs
]
else:
return [
ensure_ndarray(
workspace[input_][
extra_rows[input_] - term_extra_rows:
],
)
for input_ in term.inputs
]
def compute_chunk(self, graph, base_mask, initial_workspace):
"""
Compute the FFC terms in the graph for the requested start and end
dates.
Parameters
----------
graph : zipline.modelling.graph.TermGraph
Returns
-------
results : dict
Dictionary mapping requested results to outputs.
"""
loader = self._loader
extra_rows = graph.extra_rows
max_extra_rows = graph.max_extra_rows
workspace = {}
if initial_workspace is not None:
workspace.update(initial_workspace)
for term in graph.ordered():
# Subclasses are allowed to pre-populate computed values for terms,
# and in the future we may pre-compute atomic terms coming from the
# same dataset. In both cases, it's possible that we already have
# an entry for this term.
if term in workspace:
continue
base_mask_for_term = base_mask.iloc[
max_extra_rows - extra_rows[term]:
]
if term.atomic:
# FUTURE OPTIMIZATION: Scan the resolution order for terms in
# the same dataset and load them here as well.
to_load = [term]
loaded = loader.load_adjusted_array(
to_load,
base_mask_for_term,
)
for loaded_term, adj_array in zip_longest(to_load, loaded):
workspace[loaded_term] = adj_array
else:
if term.windowed:
compute = term.compute_from_windows
else:
compute = term.compute_from_arrays
workspace[term] = compute(
self._inputs_for_term(term, workspace, extra_rows),
base_mask_for_term,
)
assert(workspace[term].shape == base_mask_for_term.shape)
out = {}
for name, term in iteritems(graph.outputs):
# Truncate off extra rows from outputs.
out[name] = workspace[term][extra_rows[term]:]
return out
def _format_factor_matrix(self, dates, assets, filters, factors):
"""
Convert raw computed filters/factors into a DataFrame for public APIs.
Parameters
----------
dates : np.array[datetime64]
Row index for arrays in `filters` and `factors.`
assets : np.array[int64]
Column index for arrays in `filters` and `factors.`
filters : dict
Dict mapping filter names -> computed filters.
factors : dict
Dict mapping factor names -> computed factors.
Returns
-------
factor_matrix : pd.DataFrame
The indices of `factor_matrix` are as follows:
index : two-tiered MultiIndex of (date, asset).
For each date, we return a row for each asset that passed all
filters on that date.
columns : keys from `factor_data`
Each date/asset/factor triple contains the computed value of the given
factor on the given date for the given asset.
"""
# FUTURE OPTIMIZATION: Cythonize all of this.
# Boolean mask of values that passed all filters.
unioned = reduce(and_, itervalues(filters))
# Parallel arrays of (x,y) coords for (date, asset) pairs that passed
# all filters. Each entry here will correspond to a row in our output
# frame.
nonzero_xs, nonzero_ys = unioned.nonzero()
# Raw arrays storing (date, asset) pairs.
# These will form the index of our output frame.
raw_dates_index = empty_like(nonzero_xs, dtype='datetime64[ns]')
raw_assets_index = empty_like(nonzero_xs, dtype=int)
# Mapping from column_name -> array.
# This will be the `data` arg to our output frame.
columns = {
name: empty_like(nonzero_xs, dtype=factor.dtype)
for name, factor in iteritems(factors)
}
# We're going to iterate over `iteritems(columns)` a whole bunch of
# times down below. It's faster to construct iterate over a tuple of
# pairs.
columns_iter = tuple(iteritems(columns))
# This is tricky.
# unioned.sum(axis=1) gives us an array of the same size as `dates`
# containing, for each date, the number of assets that passed our
# filters on that date.
# Running this through add.accumulate gives us an array containing, for
# each date, the running total of the number of assets that passed our
# filters on or before that date.
# This means that (bounds[i - 1], bounds[i]) gives us the indices of
# the first and last rows in our output frame for each date in `dates`.
bounds = add.accumulate(unioned.sum(axis=1))
day_start = 0
for day_idx, day_end in enumerate(bounds):
day_bounds = slice(day_start, day_end)
column_indices = nonzero_ys[day_bounds]
raw_dates_index[day_bounds] = dates[day_idx]
raw_assets_index[day_bounds] = assets[column_indices]
for name, colarray in columns_iter:
colarray[day_bounds] = factors[name][day_idx, column_indices]
# Upper bound of current row becomes lower bound for next row.
day_start = day_end
return DataFrame(
data=columns,
index=MultiIndex.from_arrays(
[
raw_dates_index,
# FUTURE OPTIMIZATION:
# Avoid duplicate lookups by grouping and only looking up
# each unique sid once.
self._finder.retrieve_all(raw_assets_index),
],
)
).tz_localize('UTC', level=0)
| apache-2.0 |
procoder317/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 244 | 2496 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.cross_validation import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
luo66/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
larsmans/scikit-learn | sklearn/utils/random.py | 15 | 10411 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Return
------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = classes[j].astype(int)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
np.random.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
marscher/mdtraj | MDTraj/core/topology.py | 1 | 30937 | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Peter Eastman, Robert McGibbon
# Contributors: Kyle A. Beauchamp
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
#
# Portions of this code originate from the OpenMM molecular simulation
# toolkit, copyright (c) 2012 Stanford University and Peter Eastman. Those
# portions are distributed under the following terms:
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
##############################################################################
##############################################################################
# Imports
##############################################################################
from __future__ import print_function, division
import os
import re
import numpy as np
import itertools
from mdtraj.core import element as elem
import xml.etree.ElementTree as etree
from mdtraj.utils import ilen, import_
##############################################################################
# Utilities
##############################################################################
def _topology_from_subset(topology, atom_indices):
"""Create a new topology that only contains the supplied indices
Note
----
This really should be a copy constructor (class method) on Topology,
but I want it to work on either the mdtraj topology OR the OpenMM
topology. An inplace version for the topology object we have here
is also available.
Parameters
----------
topology : topology
The base topology
atom_indices : list([int])
The indices of the atoms to keep
"""
newTopology = Topology()
old_atom_to_new_atom = {}
for chain in topology._chains:
newChain = newTopology.add_chain()
for residue in chain._residues:
resSeq = getattr(residue, 'resSeq', None) or residue.index
newResidue = newTopology.add_residue(residue.name, newChain, resSeq)
for atom in residue._atoms:
if atom.index in atom_indices:
newAtom = newTopology.add_atom(atom.name, atom.element, newResidue)
old_atom_to_new_atom[atom] = newAtom
bondsiter = topology.bonds
if not hasattr(bondsiter, '__iter__'):
bondsiter = bondsiter()
for atom1, atom2 in bondsiter:
try:
newTopology.add_bond(old_atom_to_new_atom[atom1],
old_atom_to_new_atom[atom2])
except KeyError:
pass
# we only put bonds into the new topology if both of their partners
# were indexed and thus HAVE a new atom
# Delete empty residues
for chain in newTopology._chains:
chain._residues = [r for r in chain._residues if len(r._atoms) > 0]
# Delete empty chains
newTopology._chains = [c for c in newTopology._chains if len(c._residues) > 0]
# Re-set the numAtoms and numResidues
newTopology._numAtoms = ilen(newTopology.atoms)
newTopology._numResidues = ilen(newTopology.residues)
return newTopology
##############################################################################
# Classes
##############################################################################
class Topology(object):
"""Topology stores the topological information about a system.
The structure of a Topology object is similar to that of a PDB file.
It consists of a set of Chains (often but not always corresponding to
polymer chains). Each Chain contains a set of Residues, and each Residue
contains a set of Atoms. In addition, the Topology stores a list of which
atom pairs are bonded to each other.
Atom and residue names should follow the PDB 3.0 nomenclature for all
molecules for which one exists.
Attributes
----------
chains : generator
Iterator over all Chains in the Topology.
residues : genetator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
Examples
--------
>>> topology = md.load('example.pdb').topology
>>> print(topology)
<mdtraj.Topology with 1 chains, 3 residues, 22 atoms, 21 bonds at 0x105a98e90>
>>> table, bonds = topology.to_dataframe()
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 0 CYS 0
1 1 CH3 C 0 CYS 0
2 2 H2 H 0 CYS 0
3 3 H3 H 0 CYS 0
4 4 C C 0 CYS 0
>>> # rename residue "CYS" to "CYSS"
>>> table[table['residue'] == 'CYS']['residue'] = 'CYSS'
>>> print(table.head())
serial name element resSeq resName chainID
0 0 H1 H 0 CYSS 0
1 1 CH3 C 0 CYSS 0
2 2 H2 H 0 CYSS 0
3 3 H3 H 0 CYSS 0
4 4 C C 0 CYSS 0
>>> t2 = md.Topology.from_dataframe(table, bonds)
"""
_standardBonds = {}
def __init__(self):
"""Create a new Topology object"""
self._chains = []
self._numResidues = 0
self._numAtoms = 0
self._bonds = []
self._atoms = []
self._residues = []
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return "<%s>" % (self._string_summary_basic())
def __repr__(self):
return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self))
def _string_summary_basic(self):
return "mdtraj.Topology with %d chains, %d residues, %d atoms, %d bonds" % (self.n_chains, self.n_residues, self.n_atoms, len(self._bonds))
def copy(self):
"""Return a copy of the topology
Returns
-------
out : Topology
A copy of this topology
"""
out = Topology()
for chain in self.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq)
for atom in residue.atoms:
out.add_atom(atom.name, atom.element, r)
for a1, a2 in self.bonds:
out.add_bond(a1, a2)
return out
def __copy__(self, *args):
return self.copy()
def __deepcopy__(self, *args):
return self.copy()
def join(self, other):
"""Join two topologies together
Parameters
----------
other : Topology
Another topology object
Returns
-------
out : Topology
A joint topology, with all of the atoms/residues/chains/bonds
in each of the individual topologies
"""
if not isinstance(other, Topology):
raise ValueError('other must be an instance of Topology to join')
out = self.copy()
atom_mapping = {}
for chain in other.chains:
c = out.add_chain()
for residue in chain.residues:
r = out.add_residue(residue.name, c, residue.resSeq)
for atom in residue.atoms:
a = out.add_atom(atom.name, atom.element, r)
atom_mapping[atom] = a
for a1, a2 in other.bonds:
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_openmm(self):
"""Convert this topology into OpenMM topology
Returns
-------
topology : simtk.openmm.app.Topology
This topology, as an OpenMM topology
"""
app = import_('simtk.openmm.app')
out = app.Topology()
atom_mapping = {}
for chain in self.chains:
c = out.addChain()
for residue in chain.residues:
r = out.addResidue(residue.name, c)
for atom in residue.atoms:
a = out.addAtom(atom.name, app.Element.getBySymbol(atom.element.symbol), r)
atom_mapping[atom] = a
for a1, a2 in self.bonds:
out.addBond(atom_mapping[a1], atom_mapping[a2])
return out
@classmethod
def from_openmm(cls, value):
"""Create a mdtraj topology from an OpenMM topology
Parameters
----------
value : simtk.openmm.app.Topology
An OpenMM topology that you wish to convert to a
mdtraj topology.
"""
app = import_('simtk.openmm.app')
if not isinstance(value, app.Topology):
raise TypeError('value must be an OpenMM Topology. '
'You supplied a %s' % type(value))
out = cls()
atom_mapping = {}
for chain in value.chains():
c = out.add_chain()
for residue in chain.residues():
r = out.add_residue(residue.name, c)
for atom in residue.atoms():
a = out.add_atom(atom.name, elem.get_by_symbol(atom.element.symbol), r)
atom_mapping[atom] = a
for a1, a2 in value.bonds():
out.add_bond(atom_mapping[a1], atom_mapping[a2])
return out
def to_dataframe(self):
"""Convert this topology into a pandas dataframe
Returns
-------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame.
bonds : np.ndarray
The bonds in this topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond.
"""
pd = import_('pandas')
data = []
for atom in self.atoms:
if atom.element is None:
element_symbol = ""
else:
element_symbol = atom.element.symbol
data.append((atom.index, atom.name, element_symbol,
atom.residue.resSeq, atom.residue.name,
atom.residue.chain.index))
atoms = pd.DataFrame(data, columns=["serial", "name", "element",
"resSeq", "resName", "chainID"])
atoms = atoms.set_index("serial")
bonds = np.array([(a.index, b.index) for (a, b) in self.bonds])
return atoms, bonds
@classmethod
def from_dataframe(cls, atoms, bonds=None):
"""Create a mdtraj topology from a pandas data frame
Parameters
----------
atoms : pandas.DataFrame
The atoms in the topology, represented as a data frame. This data
frame should have columns "serial" (atom index), "name" (atom name),
"element" (atom's element), "resSeq" (index of the residue)
"resName" (name of the residue), "chainID" (index of the chain),
following the same conventions as wwPDB 3.0 format.
bonds : np.ndarray, shape=(n_bonds, 2), dtype=int, optional
The bonds in the topology, represented as an n_bonds x 2 array
of the indices of the atoms involved in each bond. Specifiying
bonds here is optional. To create standard protein bonds, you can
use `create_standard_bonds` to "fill in" the bonds on your newly
created Topology object
See Also
--------
create_standard_bonds
"""
pd = import_('pandas')
for col in ["name", "element", "resSeq" , "resName", "chainID"]:
if col not in atoms.columns:
raise ValueError('dataframe must have column %s' % col)
out = cls()
if not isinstance(atoms, pd.DataFrame):
raise TypeError('atoms must be an instance of pandas.DataFrame. '
'You supplied a %s' % type(atoms))
if not isinstance(bonds, np.ndarray):
raise TypeError('bonds must be an instance of numpy.ndarray. '
'You supplied a %s' % type(bonds))
if not np.all(np.arange(len(atoms)) == atoms.index):
raise ValueError('atoms must be uniquely numbered starting from zero.')
out._atoms = [None for i in range(len(atoms))]
for ci in np.unique(atoms['chainID']):
chain_atoms = atoms[atoms['chainID'] == ci]
c = out.add_chain()
for ri in np.unique(chain_atoms['resSeq']):
residue_atoms = chain_atoms[chain_atoms['resSeq'] == ri]
rnames = residue_atoms['resName']
residue_name = np.array(rnames)[0]
if not np.all(rnames == residue_name):
raise ValueError('All of the atoms with residue index %d do not share the same residue name' % ri)
r = out.add_residue(residue_name, c, ri)
for ai, atom in residue_atoms.iterrows():
if atom['element'] == "":
element = None
else:
element = elem.get_by_symbol(atom['element'])
a = Atom(atom['name'], element, ai, r)
out._atoms[ai] = a
r._atoms.append(a)
if bonds is not None:
for ai1, ai2 in bonds:
out.add_bond(out.atom(ai1), out.atom(ai2))
out._numAtoms = out.n_atoms
return out
def __eq__(self, other):
"""Are two topologies equal?
Parameters
----------
other : object
The object to compare to
Returns
-------
equality : bool
Are the two topologies identical?
"""
if not isinstance(other, Topology):
return False
if self is other:
return True
if len(self._chains) != len(other._chains):
return False
for c1, c2 in zip(self.chains, other.chains):
if c1.index != c2.index:
return False
if len(c1._residues) != len(c2._residues):
return False
for r1, r2 in zip(c1.residues, c2.residues):
if (r1.index != r1.index) or (r1.name != r2.name): # or (r1.resSeq != r2.resSeq):
return False
if len(r1._atoms) != len(r2._atoms):
return False
for a1, a2 in zip(r1.atoms, r2.atoms):
if (a1.index != a2.index) or (a1.name != a2.name):
return False
if a1.element is not None and a2.element is not None:
if a1.element != a2.element:
return False
#for attr in ['atomic_number', 'name', 'symbol']:
# if getattr(a1.element, attr) != getattr(a2.element, attr):
# return False
if len(self._bonds) != len(other._bonds):
return False
# the bond ordering is somewhat ambiguous, so try and fix it for comparison
self_sorted_bonds = sorted([(a1.index, b1.index) for (a1, b1) in self.bonds])
other_sorted_bonds = sorted([(a2.index, b2.index) for (a2, b2) in other.bonds])
for i in range(len(self._bonds)):
(a1, b1) = self_sorted_bonds[i]
(a2, b2) = other_sorted_bonds[i]
if (a1 != a2) or (b1 != b2):
return False
return True
def add_chain(self):
"""Create a new Chain and add it to the Topology.
Returns
-------
chain : mdtraj.topology.Chain
the newly created Chain
"""
chain = Chain(len(self._chains), self)
self._chains.append(chain)
return chain
def add_residue(self, name, chain, resSeq=None):
"""Create a new Residue and add it to the Topology.
Parameters
----------
name : str
The name of the residue to add
chain : mdtraj.topology.Chain
The Chain to add it to
resSeq : int, optional
Residue sequence number, such as from a PDB record. These sequence
numbers are arbitrary, and do not necessarily start at 0 (or 1).
If not supplied, the resSeq attribute will be set to the
residue's sequential (0 based) index.
Returns
-------
residue : mdtraj.topology.Residue
The newly created Residue
"""
if resSeq is None:
resSeq = self._numResidues
residue = Residue(name, self._numResidues, chain, resSeq)
self._residues.append(residue)
self._numResidues += 1
chain._residues.append(residue)
return residue
def add_atom(self, name, element, residue):
"""Create a new Atom and add it to the Topology.
Parameters
----------
name : str
The name of the atom to add
element : mdtraj.element.Element
The element of the atom to add
residue : mdtraj.topology.Residue
The Residue to add it to
Returns
-------
atom : mdtraj.topology.Atom
the newly created Atom
"""
atom = Atom(name, element, self._numAtoms, residue)
self._atoms.append(atom)
self._numAtoms += 1
residue._atoms.append(atom)
return atom
def add_bond(self, atom1, atom2):
"""Create a new bond and add it to the Topology.
Parameters
----------
atom1 : mdtraj.topology.Atom
The first Atom connected by the bond
atom2 : mdtraj.topology.Atom
The second Atom connected by the bond
"""
if atom1.index < atom2.index:
self._bonds.append((atom1, atom2))
else:
self._bonds.append((atom2, atom1))
def chain(self, index):
"""Get a specific chain by index. These indices
start from zero.
Returns
-------
chain : Chain
The `index`-th chain in the topology.
"""
return self._chains[index]
@property
def chains(self):
"""Iterator over all Chains in the Topology.
Returns
-------
chainiter : listiterator
Iterator over all Chains in the Topology.
"""
return iter(self._chains)
@property
def n_chains(self):
"""Get the number of chains in the Topology"""
return len(self._chains)
def residue(self, index):
"""Get a specific residue by index. These indices
start from zero.
Returns
-------
residue : Residue
The `index`-th residue in the topology.
"""
return self._residues[index]
@property
def residues(self):
"""Iterator over all Residues in the Topology.
Returns
-------
residueiter : generator
Iterator over all Residues in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
yield residue
@property
def n_residues(self):
"""Get the number of residues in the Topology"""
return len(self._residues)
def atom(self, index):
"""Get a specific atom by index. These indices
start from zero.
Returns
-------
atom : Atom
The `index`-th atom in the topology.
"""
return self._atoms[index]
@property
def atoms(self):
"""Iterator over all Atoms in the Topology.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Topology.
"""
for chain in self._chains:
for residue in chain._residues:
for atom in residue._atoms:
yield atom
@property
def n_atoms(self):
"""Get the number of atoms in the Topology"""
return len(self._atoms)
@property
def bonds(self):
"""Iterator over all bonds (each represented as a tuple of two Atoms) in the Topology.
Returns
-------
atomiter : generator
Iterator over all tuple of Atoms in the Trajectory involved in a bond.
"""
return iter(self._bonds)
def create_standard_bonds(self):
"""Create bonds based on the atom and residue names for all standard residue types.
"""
if len(Topology._standardBonds) == 0:
# Load the standard bond defitions.
tree = etree.parse(os.path.join(os.path.dirname(__file__), '..', 'formats', 'pdb', 'data', 'residues.xml'))
for residue in tree.getroot().findall('Residue'):
bonds = []
Topology._standardBonds[residue.attrib['name']] = bonds
for bond in residue.findall('Bond'):
bonds.append((bond.attrib['from'], bond.attrib['to']))
for chain in self._chains:
# First build a map of atom names to atoms.
atomMaps = []
for residue in chain._residues:
atomMap = {}
atomMaps.append(atomMap)
for atom in residue._atoms:
atomMap[atom.name] = atom
# Loop over residues and construct bonds.
for i in range(len(chain._residues)):
name = chain._residues[i].name
if name in Topology._standardBonds:
for bond in Topology._standardBonds[name]:
if bond[0].startswith('-') and i > 0:
fromResidue = i-1
fromAtom = bond[0][1:]
elif bond[0].startswith('+') and i <len(chain._residues):
fromResidue = i+1
fromAtom = bond[0][1:]
else:
fromResidue = i
fromAtom = bond[0]
if bond[1].startswith('-') and i > 0:
toResidue = i-1
toAtom = bond[1][1:]
elif bond[1].startswith('+') and i <len(chain._residues):
toResidue = i+1
toAtom = bond[1][1:]
else:
toResidue = i
toAtom = bond[1]
if fromAtom in atomMaps[fromResidue] and toAtom in atomMaps[toResidue]:
self.add_bond(atomMaps[fromResidue][fromAtom], atomMaps[toResidue][toAtom])
def create_disulfide_bonds(self, positions):
"""Identify disulfide bonds based on proximity and add them to the Topology.
Parameters
----------
positions : list
The list of atomic positions based on which to identify bonded atoms
"""
def isCyx(res):
names = [atom.name for atom in res._atoms]
return 'SG' in names and 'HG' not in names
cyx = [res for res in self.residues if res.name == 'CYS' and isCyx(res)]
atomNames = [[atom.name for atom in res._atoms] for res in cyx]
for i in range(len(cyx)):
sg1 = cyx[i]._atoms[atomNames[i].index('SG')]
pos1 = positions[sg1.index]
for j in range(i):
sg2 = cyx[j]._atoms[atomNames[j].index('SG')]
pos2 = positions[sg2.index]
delta = [x-y for (x,y) in zip(pos1, pos2)]
distance = np.sqrt(delta[0]*delta[0] + delta[1]*delta[1] + delta[2]*delta[2])
if distance < 0.3: # this is supposed to be nm. I think we're good
self.add_bond(sg1, sg2)
def subset(self, atom_indices):
"""Create a new Topology from a subset of the atoms in an existing topology.
Notes
-----
The existing topology will not be altered.
Parameters
----------
atom_indices array_like
A list of the indices corresponding to the atoms in that you'd
like to retain.
"""
return _topology_from_subset(self, atom_indices)
class Chain(object):
"""A Chain object represents a chain within a Topology.
Attributes
----------
index : int
The index of the Chain within its Topology
topology : mdtraj.Topology
The Topology this Chain belongs to
residues : genetator
Iterator over all Residues in the Chain.
atoms : generator
Iterator over all Atoms in the Chain.
"""
def __init__(self, index, topology):
"""Construct a new Chain. You should call add_chain() on the Topology instead of calling this directly."""
## The index of the Chain within its Topology
self.index = index
## The Topology this Chain belongs to
self.topology = topology
self._residues = []
@property
def residues(self):
"""Iterator over all Residues in the Chain.
Returns
-------
residueiter : listiterator
Iterator over all Residues in the Topology.
"""
return iter(self._residues)
def residue(self, index):
"""Get a specific residue in this Chain
Returns
-------
residue : Residue
"""
return self._residue[index]
@property
def n_residues(self):
"Get the number of residues in this Chain"
return len(self._residues)
@property
def atoms(self):
"""Iterator over all Atoms in the Chain.
Returns
-------
atomiter : generator
Iterator over all Atoms in the Chain.
"""
for residue in self._residues:
for atom in residue._atoms:
yield atom
def atom(self, index):
"""Get a specific atom in this Chain
Returns
-------
atom : Atom
"""
# this could be made faster by caching the list
# of atoms internally if necessary
return next(itertools.islice(self.atoms, index, index+1))
@property
def n_atoms(self):
"""Get the number of atoms in this Chain"""
return sum(r.n_atoms for r in self._residues)
class Residue(object):
"""A Residue object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Residue
index : int
The index of the Residue within its Topology
chain : int
The residue sequence number
"""
def __init__(self, name, index, chain, resSeq):
"""Construct a new Residue. You should call add_residue()
on the Topology instead of calling this directly."""
self.name = name
self.index = index
self.chain = chain
self.resSeq = resSeq
self._atoms = []
@property
def atoms(self):
"""Iterator over all Atoms in the Residue.
Returns
-------
atomiter : listiterator
Iterator over all Atoms in the Residue.
"""
return iter(self._atoms)
def atom(self, index):
"""Get a specific atom in this Residue.
Returns
-------
atom : Atom
"""
return self._atoms[index]
@property
def n_atoms(self):
"""Get the number of atoms in this Residue"""
return len(self._atoms)
def __str__(self):
return '%s%s' % (self.name, self.resSeq)
class Atom(object):
"""An Atom object represents a residue within a Topology.
Attributes
----------
name : str
The name of the Atom
element : mdtraj.element.Element
The element of the Atoms
index : int
The index of the Atom within its Topology
residue : mdtraj.topology.Residue
The Residue this Atom belongs to
"""
def __init__(self, name, element, index, residue):
"""Construct a new Atom. You should call add_atom() on the Topology instead of calling this directly."""
## The name of the Atom
self.name = name
## That Atom's element
self.element = element
## The index of the Atom within its Topology
self.index = index
## The Residue this Atom belongs to
self.residue = residue
def __eq__(self, other):
""" Check whether two Atom objects are equal. """
if self.name != other.name:
return False
if self.index != other.index:
return False
if self.element.name != other.element.name:
return False
if self.residue.name != other.residue.name:
return False
if self.residue.index != other.residue.index:
return False
if self.residue.chain.index != other.residue.chain.index:
return False
return True
def __hash__(self):
""" A quick comparison. """
return self.index
def __str__(self):
return '%s-%s' % (self.residue, self.name)
| lgpl-2.1 |
dparks1134/DBB | dbb/plots/AbstractPlot.py | 1 | 6095 | ###############################################################################
#
# AbstractPlot.py - Abstract base class for plotting.
#
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.transforms as mtransforms
from matplotlib.patches import Rectangle
import matplotlib as mpl
import numpy as np
class AbstractPlot(FigureCanvas):
'''
Abstract base class for plotting.
'''
def __init__(self, options):
self.options = options
# Global plot settings
mpl.rcParams['font.size'] = self.options.font_size
mpl.rcParams['axes.titlesize'] = self.options.font_size
mpl.rcParams['axes.labelsize'] = self.options.font_size
mpl.rcParams['xtick.labelsize'] = self.options.font_size
mpl.rcParams['ytick.labelsize'] = self.options.font_size
mpl.rcParams['legend.fontsize'] = self.options.font_size
mpl.rcParams['svg.fonttype'] = 'none'
self.fig = Figure(facecolor='white', dpi=options.dpi)
FigureCanvas.__init__(self, self.fig)
self.cid = None
self.type = '<none>'
self.name = '<none>'
self.axesColour = (0.5, 0.5, 0.5)
def savePlot(self, filename, dpi=300):
imgFormat = filename[filename.rfind('.')+1:len(filename)]
if imgFormat in ['png', 'pdf', 'ps', 'eps','svg']:
self.fig.savefig(filename,format=imgFormat,dpi=dpi,facecolor='white',edgecolor='white')
else:
pass
def labelExtents(self, xLabels, xFontSize, xRotation, yLabels, yFontSize, yRotation):
self.fig.clear()
tempAxes = self.fig.add_axes([0,0,1.0,1.0])
tempAxes.set_xticks(np.arange(len(xLabels)))
tempAxes.set_yticks(np.arange(len(yLabels)))
xText = tempAxes.set_xticklabels(xLabels, size=xFontSize, rotation=xRotation)
yText = tempAxes.set_yticklabels(yLabels, size=yFontSize, rotation=yRotation)
bboxes = []
for label in xText:
bbox = label.get_window_extent(self.get_renderer())
bboxi = bbox.inverse_transformed(self.fig.transFigure)
bboxes.append(bboxi)
xLabelBounds = mtransforms.Bbox.union(bboxes)
bboxes = []
for label in yText:
bbox = label.get_window_extent(self.get_renderer())
bboxi = bbox.inverse_transformed(self.fig.transFigure)
bboxes.append(bboxi)
yLabelBounds = mtransforms.Bbox.union(bboxes)
self.fig.clear()
return xLabelBounds, yLabelBounds
def xLabelExtents(self, labels, fontSize, rotation=0):
self.fig.clear()
tempAxes = self.fig.add_axes([0,0,1.0,1.0])
tempAxes.set_xticks(np.arange(len(labels)))
xLabels = tempAxes.set_xticklabels(labels, size=fontSize, rotation=rotation)
bboxes = []
for label in xLabels:
bbox = label.get_window_extent(self.get_renderer())
bboxi = bbox.inverse_transformed(self.fig.transFigure)
bboxes.append(bboxi)
xLabelBounds = mtransforms.Bbox.union(bboxes)
self.fig.clear()
return xLabelBounds
def yLabelExtents(self, labels, fontSize, rotation=0):
self.fig.clear()
tempAxes = self.fig.add_axes([0,0,1.0,1.0])
tempAxes.set_yticks(np.arange(len(labels)))
yLabels = tempAxes.set_yticklabels(labels, size=fontSize, rotation=rotation)
bboxes = []
for label in yLabels:
bbox = label.get_window_extent(self.get_renderer())
bboxi = bbox.inverse_transformed(self.fig.transFigure)
bboxes.append(bboxi)
yLabelBounds = mtransforms.Bbox.union(bboxes)
self.fig.clear()
return yLabelBounds
def formatLabels(self, labels):
formattedLabels = []
for label in labels:
value = float(label.get_text())
if value < 0.01:
valueStr = '%.2e' % value
if 'e-00' in valueStr:
valueStr = valueStr.replace('e-00', 'e-')
elif 'e-0' in valueStr:
valueStr = valueStr.replace('e-0', 'e-')
else:
valueStr = '%.3f' % value
formattedLabels.append(valueStr)
return formattedLabels
def removeExtraZeros(self, label):
if '.' in label:
while label[-1] == '0':
label = label[0:-1]
if label[-1] == '.': # remove potential trailing decimal point
label = label[0:-1]
return label
def boundingBox(self, data, ax, label, bBoundingBoxes, bLabels):
''' Draw bounding box around data.'''
data = np.array(data)
width = max(data[:,0]) - min(data[:,0])
height = max(data[:,1]) - min(data[:,1])
r = Rectangle((min(data[:,0]), min(data[:,1])), width, height)
if bBoundingBoxes:
ax.add_artist(r)
r.set_clip_box(ax.bbox)
r.set_alpha(0.1)
r.set_facecolor((0.5, 0.5, 0.5))
if bLabels:
ax.annotate(label, xy = (min(data[:,0]), max(data[:,1])), xytext = (0, 0),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = (0.5, 0.5, 0.5), alpha = 0.1), zorder=10)
| gpl-3.0 |
nanditav/15712-TensorFlow | tensorflow/contrib/metrics/python/kernel_tests/histogram_ops_test.py | 12 | 9744 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for histogram_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.metrics.python.ops import histogram_ops
class Strict1dCumsumTest(tf.test.TestCase):
"""Test this private function."""
def test_empty_tensor_returns_empty(self):
with self.test_session():
tensor = tf.constant([])
result = histogram_ops._strict_1d_cumsum(tensor, 0)
expected = tf.constant([])
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_1_tensor_works(self):
with self.test_session():
tensor = tf.constant([3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 1)
expected = tf.constant([3], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
def test_length_3_tensor_works(self):
with self.test_session():
tensor = tf.constant([1, 2, 3], dtype=tf.float32)
result = histogram_ops._strict_1d_cumsum(tensor, 3)
expected = tf.constant([1, 3, 6], dtype=tf.float32)
np.testing.assert_array_equal(expected.eval(), result.eval())
class AUCUsingHistogramTest(tf.test.TestCase):
def setUp(self):
self.rng = np.random.RandomState(0)
def test_empty_labels_and_scores_gives_nan_auc(self):
with self.test_session():
labels = tf.constant([], shape=[0], dtype=tf.bool)
scores = tf.constant([], shape=[0], dtype=tf.float32)
score_range = [0, 1.]
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels, scores,
score_range)
tf.local_variables_initializer().run()
update_op.run()
self.assertTrue(np.isnan(auc.eval()))
def test_perfect_scores_gives_auc_1(self):
self._check_auc(nbins=100,
desired_auc=1.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_terrible_scores_gives_auc_0(self):
self._check_auc(nbins=100,
desired_auc=0.0,
score_range=[0, 1.],
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=1)
def test_many_common_conditions(self):
for nbins in [50]:
for desired_auc in [0.3, 0.5, 0.8]:
for score_range in [[-1, 1], [-10, 0]]:
for frac_true in [0.3, 0.8]:
# Tests pass with atol = 0.03. Moved up to 0.05 to avoid flakes.
self._check_auc(nbins=nbins,
desired_auc=desired_auc,
score_range=score_range,
num_records=100,
frac_true=frac_true,
atol=0.05,
num_updates=50)
def test_large_class_imbalance_still_ok(self):
# With probability frac_true ** num_records, each batch contains only True
# records. In this case, ~ 95%.
# Tests pass with atol = 0.02. Increased to 0.05 to avoid flakes.
self._check_auc(nbins=100,
desired_auc=0.8,
score_range=[-1, 1.],
num_records=10,
frac_true=0.995,
atol=0.05,
num_updates=1000)
def test_super_accuracy_with_many_bins_and_records(self):
# Test passes with atol = 0.0005. Increased atol to avoid flakes.
self._check_auc(nbins=1000,
desired_auc=0.75,
score_range=[0, 1.],
num_records=1000,
frac_true=0.5,
atol=0.005,
num_updates=100)
def _check_auc(self,
nbins=100,
desired_auc=0.75,
score_range=None,
num_records=50,
frac_true=0.5,
atol=0.05,
num_updates=10):
"""Check auc accuracy against synthetic data.
Args:
nbins: nbins arg from contrib.metrics.auc_using_histogram.
desired_auc: Number in [0, 1]. The desired auc for synthetic data.
score_range: 2-tuple, (low, high), giving the range of the resultant
scores. Defaults to [0, 1.].
num_records: Positive integer. The number of records to return.
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually
be True.
atol: Absolute tolerance for final AUC estimate.
num_updates: Update internal histograms this many times, each with a new
batch of synthetic data, before computing final AUC.
Raises:
AssertionError: If resultant AUC is not within atol of theoretical AUC
from synthetic data.
"""
score_range = [0, 1.] or score_range
with self.test_session():
labels = tf.placeholder(tf.bool, shape=[num_records])
scores = tf.placeholder(tf.float32, shape=[num_records])
auc, update_op = tf.contrib.metrics.auc_using_histogram(labels,
scores,
score_range,
nbins=nbins)
tf.local_variables_initializer().run()
# Updates, then extract auc.
for _ in range(num_updates):
labels_a, scores_a = synthetic_data(desired_auc, score_range,
num_records, self.rng, frac_true)
update_op.run(feed_dict={labels: labels_a, scores: scores_a})
labels_a, scores_a = synthetic_data(desired_auc, score_range, num_records,
self.rng, frac_true)
# Fetch current auc, and verify that fetching again doesn't change it.
auc_eval = auc.eval()
self.assertAlmostEqual(auc_eval, auc.eval(), places=5)
msg = ('nbins: %s, desired_auc: %s, score_range: %s, '
'num_records: %s, frac_true: %s, num_updates: %s') % (nbins,
desired_auc,
score_range,
num_records,
frac_true,
num_updates)
np.testing.assert_allclose(desired_auc, auc_eval, atol=atol, err_msg=msg)
def synthetic_data(desired_auc, score_range, num_records, rng, frac_true):
"""Create synthetic boolean_labels and scores with adjustable auc.
Args:
desired_auc: Number in [0, 1], the theoretical AUC of resultant data.
score_range: 2-tuple, (low, high), giving the range of the resultant scores
num_records: Positive integer. The number of records to return.
rng: Initialized np.random.RandomState random number generator
frac_true: Number in (0, 1). Expected fraction of resultant labels that
will be True. This is just in expectation...more or less may actually be
True.
Returns:
boolean_labels: np.array, dtype=bool.
scores: np.array, dtype=np.float32
"""
# We prove here why the method (below) for computing AUC works. Of course we
# also checked this against sklearn.metrics.roc_auc_curve.
#
# First do this for score_range = [0, 1], then rescale.
# WLOG assume AUC >= 0.5, otherwise we will solve for AUC >= 0.5 then swap
# the labels.
# So for AUC in [0, 1] we create False and True labels
# and corresponding scores drawn from:
# F ~ U[0, 1], T ~ U[x, 1]
# We have,
# AUC
# = P[T > F]
# = P[T > F | F < x] P[F < x] + P[T > F | F > x] P[F > x]
# = (1 * x) + (0.5 * (1 - x)).
# Inverting, we have:
# x = 2 * AUC - 1, when AUC >= 0.5.
assert 0 <= desired_auc <= 1
assert 0 < frac_true < 1
if desired_auc < 0.5:
flip_labels = True
desired_auc = 1 - desired_auc
frac_true = 1 - frac_true
else:
flip_labels = False
x = 2 * desired_auc - 1
labels = rng.binomial(1, frac_true, size=num_records).astype(bool)
num_true = labels.sum()
num_false = num_records - labels.sum()
# Draw F ~ U[0, 1], and T ~ U[x, 1]
false_scores = rng.rand(num_false)
true_scores = x + rng.rand(num_true) * (1 - x)
# Reshape [0, 1] to score_range.
def reshape(scores):
return score_range[0] + scores * (score_range[1] - score_range[0])
false_scores = reshape(false_scores)
true_scores = reshape(true_scores)
# Place into one array corresponding with the labels.
scores = np.nan * np.ones(num_records, dtype=np.float32)
scores[labels] = true_scores
scores[~labels] = false_scores
if flip_labels:
labels = ~labels
return labels, scores
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
ZenDevelopmentSystems/scikit-learn | sklearn/metrics/cluster/tests/test_bicluster.py | 394 | 1770 | """Testing for bicluster metrics module"""
import numpy as np
from sklearn.utils.testing import assert_equal, assert_almost_equal
from sklearn.metrics.cluster.bicluster import _jaccard
from sklearn.metrics import consensus_score
def test_jaccard():
a1 = np.array([True, True, False, False])
a2 = np.array([True, True, True, True])
a3 = np.array([False, True, True, False])
a4 = np.array([False, False, True, True])
assert_equal(_jaccard(a1, a1, a1, a1), 1)
assert_equal(_jaccard(a1, a1, a2, a2), 0.25)
assert_equal(_jaccard(a1, a1, a3, a3), 1.0 / 7)
assert_equal(_jaccard(a1, a1, a4, a4), 0)
def test_consensus_score():
a = [[True, True, False, False],
[False, False, True, True]]
b = a[::-1]
assert_equal(consensus_score((a, a), (a, a)), 1)
assert_equal(consensus_score((a, a), (b, b)), 1)
assert_equal(consensus_score((a, b), (a, b)), 1)
assert_equal(consensus_score((a, b), (b, a)), 1)
assert_equal(consensus_score((a, a), (b, a)), 0)
assert_equal(consensus_score((a, a), (a, b)), 0)
assert_equal(consensus_score((b, b), (a, b)), 0)
assert_equal(consensus_score((b, b), (b, a)), 0)
def test_consensus_score_issue2445():
''' Different number of biclusters in A and B'''
a_rows = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
a_cols = np.array([[True, True, False, False],
[False, False, True, True],
[False, False, False, True]])
idx = [0, 2]
s = consensus_score((a_rows, a_cols), (a_rows[idx], a_cols[idx]))
# B contains 2 of the 3 biclusters in A, so score should be 2/3
assert_almost_equal(s, 2.0/3.0)
| bsd-3-clause |
wwj718/ANALYSE | docs/en_us/platform_api/source/conf.py | 6 | 6731 | # -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=W0622
# pylint: disable=W0212
# pylint: disable=W0613
import sys, os
from path import path
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
#templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path.append('source/_static')
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root = path('../../../..').abspath()
sys.path.insert(0, root)
sys.path.append(root / "lms/djangoapps/mobile_api")
sys.path.append(root / "lms/djangoapps/mobile_api/course_info")
sys.path.append(root / "lms/djangoapps/mobile_api/users")
sys.path.append(root / "lms/djangoapps/mobile_api/video_outlines")
sys.path.insert(0, os.path.abspath(os.path.normpath(os.path.dirname(__file__)
+ '/../../../')))
sys.path.append('.')
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms.envs.test'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinxcontrib.napoleon']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# Output file base name for HTML help builder.
htmlhelp_basename = 'edXDocs'
project = u'edX Platform API Version 0.5 Alpha'
copyright = u'2014, edX'
# --- Mock modules ------------------------------------------------------------
# Mock all the modules that the readthedocs build can't import
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
# The list of modules and submodules that we know give RTD trouble.
# Make sure you've tried including the relevant package in
# docs/share/requirements.txt before adding to this list.
MOCK_MODULES = [
'bson',
'bson.errors',
'bson.objectid',
'dateutil',
'dateutil.parser',
'fs',
'fs.errors',
'fs.osfs',
'lazy',
'mako',
'mako.template',
'matplotlib',
'matplotlib.pyplot',
'mock',
'numpy',
'oauthlib',
'oauthlib.oauth1',
'oauthlib.oauth1.rfc5849',
'PIL',
'pymongo',
'pyparsing',
'pysrt',
'requests',
'scipy.interpolate',
'scipy.constants',
'scipy.optimize',
'yaml',
'webob',
'webob.multidict',
]
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -----------------------------------------------------------------------------
# from http://djangosnippets.org/snippets/2533/
# autogenerate models definitions
import inspect
import types
from HTMLParser import HTMLParser
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), encoding, errors)
elif not isinstance(s, unicode):
s = unicode(s, encoding, errors)
return s
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def process_docstring(app, what, name, obj, options, lines):
"""Autodoc django models"""
# This causes import errors if left outside the function
from django.db import models
# If you want extract docs from django forms:
# from django import forms
# from django.forms.models import BaseInlineFormSet
# Only look at objects that inherit from Django's base MODEL class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
def setup(app):
"""Setup docsting processors"""
#Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| agpl-3.0 |
hrjn/scikit-learn | sklearn/tests/test_common.py | 39 | 6031 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import re
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield (_named_check(check_parameters_default_constructible, name),
name, Estimator)
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield _named_check(check, name), name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin))]
for name, Classifier in linear_classifiers:
yield _named_check(check_class_weight_balanced_linear_classifier,
name), name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = dict((name, ispkg)
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__,
prefix='sklearn.'))
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert_equal(missing_tests, [],
'{0} do not have `tests` subpackages. Perhaps they require '
'__init__.py or an add_subpackage directive in the parent '
'setup.py'.format(missing_tests))
| bsd-3-clause |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/backends/backend_emf.py | 1 | 26381 | """
Enhanced Metafile backend. See http://pyemf.sourceforge.net for the EMF
driver library.
"""
from __future__ import division
try:
import pyemf
except ImportError:
raise ImportError('You must first install pyemf from http://pyemf.sf.net')
import os,sys,math,re
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_UNFITTED, KERNING_DEFAULT, KERNING_UNSCALED
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.mlab import quad2cubic
# Font handling stuff snarfed from backend_ps, but only using TTF fonts
_fontd = {}
# Debug print stuff
debugHandle = False
debugPrint = False
debugText = False
# Hashable font properties class. In EMF, angle of rotation is a part
# of the font properties, so a handle to a new font must be obtained
# if the rotation changes.
class EMFFontProperties(FontProperties):
def __init__(self,other,angle):
FontProperties.__init__(self,other.get_family(),
other.get_style(),
other.get_variant(),
other.get_weight(),
other.get_stretch(),
other.get_size())
self._angle=angle
def __hash__(self):
return hash( (FontProperties.__hash__(self), self._angle))
def __str__(self):
return str( (FontProperties.__str__(self), self._angle))
def set_angle(self,angle):
self._angle=angle
def get_angle(self):
return self._angle
# Hashable pen (line style) properties.
class EMFPen:
def __init__(self,emf,gc):
self.emf=emf
self.gc=gc
r,g,b=gc.get_rgb()[:3]
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
self.width=int(gc.get_linewidth())
self.style=0
self.set_linestyle()
if debugHandle: print "EMFPen: style=%d width=%d rgb=(%d,%d,%d)" % (self.style,self.width,self.r,self.g,self.b)
def __hash__(self):
return hash((self.style,self.width,self.r,self.g,self.b))
def set_linestyle(self):
# Hack. Negative width lines will not get drawn.
if self.width<0:
self.style=pyemf.PS_NULL
else:
styles={'solid':pyemf.PS_SOLID, 'dashed':pyemf.PS_DASH,
'dashdot':pyemf.PS_DASHDOT, 'dotted':pyemf.PS_DOT}
#style=styles.get(self.gc.get_linestyle('solid'))
style=self.gc.get_linestyle('solid')
if debugHandle: print "EMFPen: style=%s" % style
if style in styles:
self.style=styles[style]
else:
self.style=pyemf.PS_SOLID
def get_handle(self):
handle=self.emf.CreatePen(self.style,self.width,(self.r,self.g,self.b))
return handle
# Hashable brush (fill style) properties.
class EMFBrush:
def __init__(self,emf,rgb):
self.emf=emf
r,g,b=rgb[:3]
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
if debugHandle: print "EMFBrush: rgb=(%d,%d,%d)" % (self.r,self.g,self.b)
def __hash__(self):
return hash((self.r,self.g,self.b))
def get_handle(self):
handle=self.emf.CreateSolidBrush((self.r,self.g,self.b))
return handle
class RendererEMF(RendererBase):
"""
The renderer handles drawing/rendering operations through a
pyemf.EMF instance.
"""
fontweights = {
100 : pyemf.FW_NORMAL,
200 : pyemf.FW_NORMAL,
300 : pyemf.FW_NORMAL,
400 : pyemf.FW_NORMAL,
500 : pyemf.FW_NORMAL,
600 : pyemf.FW_BOLD,
700 : pyemf.FW_BOLD,
800 : pyemf.FW_BOLD,
900 : pyemf.FW_BOLD,
'ultralight' : pyemf.FW_ULTRALIGHT,
'light' : pyemf.FW_LIGHT,
'normal' : pyemf.FW_NORMAL,
'medium' : pyemf.FW_MEDIUM,
'semibold' : pyemf.FW_SEMIBOLD,
'bold' : pyemf.FW_BOLD,
'heavy' : pyemf.FW_HEAVY,
'ultrabold' : pyemf.FW_ULTRABOLD,
'black' : pyemf.FW_BLACK,
}
def __init__(self, outfile, width, height, dpi):
"Initialize the renderer with a gd image instance"
self.outfile = outfile
# a map from get_color args to colors
self._cached = {}
# dict of hashed properties to already created font handles
self._fontHandle = {}
self.lastHandle = {'font':-1, 'pen':-1, 'brush':-1}
self.emf=pyemf.EMF(width,height,dpi,'in')
self.width=int(width*dpi)
self.height=int(height*dpi)
self.dpi = dpi
self.pointstodpi = dpi/72.0
self.hackPointsForMathExponent = 2.0
# set background transparent for text
self.emf.SetBkMode(pyemf.TRANSPARENT)
# set baseline for text to be bottom left corner
self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)
self._lastClipRect = None
if debugPrint: print "RendererEMF: (%f,%f) %s dpi=%f" % (self.width,self.height,outfile,dpi)
def save(self):
self.emf.save(self.outfile)
def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation):
"""
Draw an arc using GraphicsContext instance gcEdge, centered at x,y,
with width and height and angles from 0.0 to 360.0
0 degrees is at 3-o'clock
positive angles are anti-clockwise
If the color rgbFace is not None, fill the arc with it.
"""
if debugPrint: print "draw_arc: (%f,%f) angles=(%f,%f) w,h=(%f,%f)" % (x,y,angle1,angle2,width,height)
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
# This algorithm doesn't work very well on small circles
# because of rounding error. This shows up most obviously on
# legends where the circles are small anyway, and it is
# compounded by the fact that it puts several circles right
# next to each other so the differences are obvious.
hw=width/2
hh=height/2
x1=int(x-width/2)
y1=int(y-height/2)
if brush:
self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
else:
self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
if self._lastClipRect != new_bounds:
self._lastClipRect = new_bounds
if new_bounds is None:
# use the maximum rectangle to disable clipping
x, y, width, height = (0, 0, self.width, self.height)
else:
x, y, width, height = new_bounds
self.emf.BeginPath()
self.emf.MoveTo(int(x), int(self.height - y))
self.emf.LineTo(int(x) + int(width), int(self.height - y))
self.emf.LineTo(int(x) + int(width), int(self.height - y) - int(height))
self.emf.LineTo(int(x), int(self.height - y) - int(height))
self.emf.CloseFigure()
self.emf.EndPath()
self.emf.SelectClipPath()
def convert_path(self, tpath):
self.emf.BeginPath()
last_points = None
for points, code in tpath.iter_segments():
if code == Path.MOVETO:
self.emf.MoveTo(*points)
elif code == Path.LINETO:
self.emf.LineTo(*points)
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
self.emf.PolyBezierTo(zip(points[2::2], points[3::2]))
elif code == Path.CURVE4:
self.emf.PolyBezierTo(zip(points[::2], points[1::2]))
elif code == Path.CLOSEPOLY:
self.emf.CloseFigure()
last_points = points
self.emf.EndPath()
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
self.handle_clip_rectangle(gc)
gc._rgb = gc._rgb[:3]
self.select_pen(gc)
self.select_brush(rgbFace)
transform = transform + Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
tpath = transform.transform_path(path)
self.convert_path(tpath)
if rgbFace is None:
self.emf.StrokePath()
else:
self.emf.StrokeAndFillPath()
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas. y is
the distance from the origin. That is, if origin is upper, y
is the distance from top. If origin is lower, y is the
distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
# pyemf2 currently doesn't support bitmaps.
pass
def draw_line(self, gc, x1, y1, x2, y2):
"""
Draw a single line from x1,y1 to x2,y2
"""
if debugPrint: print "draw_line: (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
if self.select_pen(gc):
self.emf.Polyline([(long(x1),long(self.height-y1)),(long(x2),long(self.height-y2))])
else:
if debugPrint: print "draw_line: optimizing away (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
def draw_lines(self, gc, x, y):
"""
x and y are equal length arrays, draw lines connecting each
point in x, y
"""
if debugPrint: print "draw_lines: %d points" % len(str(x))
# optimize away anything that won't actually be drawn. Edge
# style must not be PS_NULL for it to appear on screen.
if self.select_pen(gc):
points = [(long(x[i]), long(self.height-y[i])) for i in range(len(x))]
self.emf.Polyline(points)
def draw_point(self, gc, x, y):
"""
Draw a single point at x,y
Where 'point' is a device-unit point (or pixel), not a matplotlib point
"""
if debugPrint: print "draw_point: (%f,%f)" % (x,y)
# don't cache this pen
pen=EMFPen(self.emf,gc)
self.emf.SetPixel(long(x),long(self.height-y),(pen.r,pen.g,pen.b))
def draw_polygon(self, gcEdge, rgbFace, points):
"""
Draw a polygon using the GraphicsContext instance gc.
points is a len vertices tuple, each element
giving the x,y coords a vertex
If the color rgbFace is not None, fill the polygon with it
"""
if debugPrint: print "draw_polygon: %d points" % len(points)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
points = [(long(x), long(self.height-y)) for x,y in points]
self.emf.Polygon(points)
else:
points = [(long(x), long(self.height-y)) for x,y in points]
if debugPrint: print "draw_polygon: optimizing away polygon: %d points = %s" % (len(points),str(points))
def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height):
"""
Draw a non-filled rectangle using the GraphicsContext instance gcEdge,
with lower left at x,y with width and height.
If rgbFace is not None, fill the rectangle with it.
"""
if debugPrint: print "draw_rectangle: (%f,%f) w=%f,h=%f" % (x,y,width,height)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height))
else:
if debugPrint: print "draw_rectangle: optimizing away (%f,%f) w=%f,h=%f" % (x,y,width,height)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text.Text instance s at x,y (display coords) with font
properties instance prop at angle in degrees, using GraphicsContext gc
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
if ismath: s = self.strip_math(s)
self.handle_clip_rectangle(gc)
self.emf.SetTextColor(gc.get_rgb()[:3])
self.select_font(prop,angle)
if isinstance(s, unicode):
# unicode characters do not seem to work with pyemf
try:
s = s.replace(u'\u2212', '-').encode('iso-8859-1')
except UnicodeEncodeError:
pass
self.emf.TextOut(x,y,s)
def draw_plain_text(self, gc, x, y, s, prop, angle):
"""
Draw a text string verbatim; no conversion is done.
"""
if debugText: print "draw_plain_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if debugText: print " properties:\n"+str(prop)
self.select_font(prop,angle)
# haxor follows! The subtleties of text placement in EMF
# still elude me a bit. It always seems to be too high on the
# page, about 10 pixels too high on a 300dpi resolution image.
# So, I'm adding this hack for the moment:
hackoffsetper300dpi=10
xhack=math.sin(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
yhack=math.cos(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
self.emf.TextOut(long(x+xhack),long(y+yhack),s)
def draw_math_text(self, gc, x, y, s, prop, angle):
"""
Draw a subset of TeX, currently handles exponents only. Since
pyemf doesn't have any raster functionality yet, the
texmanager.get_rgba won't help.
"""
if debugText: print "draw_math_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
self.draw_plain_text(gc,x,y,"10",prop,angle)
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/2),exp,propexp,angle)
else:
# if it isn't an exponent, then render the raw TeX string.
self.draw_plain_text(gc,x,y,s,prop,angle)
def get_math_text_width_height(self, s, prop):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps. This
method must be kept in sync with draw_math_text.
"""
if debugText: print "get_math_text_width_height:"
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w1, h1 = font.get_width_height()
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
fontexp=self._get_font_ttf(propexp)
fontexp.set_text(exp, 0.0)
w2, h2 = fontexp.get_width_height()
w=w1+w2
h=h1+(h2/2)
w /= 64.0 # convert from subpixels
h /= 64.0
w+=self.points_to_pixels(self.hackPointsForMathExponent)
if debugText: print " math string=%s w,h=(%f,%f)" % (s, w, h)
else:
w,h=self.get_text_width_height(s,prop,False)
return w, h
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if ismath: s = self.strip_math(s)
font = self._get_font_ttf(prop)
font.set_text(s, 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
return w, h, d
def flipy(self):
"""return true if y small numbers are top for renderer
Is used for drawing text (text.py) and images (image.py) only
"""
return True
def get_canvas_width_height(self):
"""
return the canvas width and height in display coords
"""
return self.width,self.height
def set_handle(self,type,handle):
"""
Update the EMF file with the current handle, but only if it
isn't the same as the last one. Don't want to flood the file
with duplicate info.
"""
if self.lastHandle[type] != handle:
self.emf.SelectObject(handle)
self.lastHandle[type]=handle
def get_font_handle(self, prop, angle):
"""
Look up the handle for the font based on the dict of
properties *and* the rotation angle, since in EMF the font
rotation is a part of the font definition.
"""
prop=EMFFontProperties(prop,angle)
size=int(prop.get_size_in_points()*self.pointstodpi)
face=prop.get_name()
key = hash(prop)
handle = self._fontHandle.get(key)
if handle is None:
handle=self.emf.CreateFont(-size, 0, int(angle)*10, int(angle)*10,
self.fontweights.get(prop.get_weight(), pyemf.FW_NORMAL),
int(prop.get_style() == 'italic'),
0, 0,
pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS,
pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY,
pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face);
if debugHandle: print "get_font_handle: creating handle=%d for face=%s size=%d" % (handle,face,size)
self._fontHandle[key]=handle
if debugHandle: print " found font handle %d for face=%s size=%d" % (handle,face,size)
self.set_handle("font",handle)
return handle
def select_font(self,prop,angle):
handle=self.get_font_handle(prop,angle)
self.set_handle("font",handle)
def select_pen(self, gc):
"""
Select a pen that includes the color, line width and line
style. Return the pen if it will draw a line, or None if the
pen won't produce any output (i.e. the style is PS_NULL)
"""
pen=EMFPen(self.emf,gc)
key=hash(pen)
handle=self._fontHandle.get(key)
if handle is None:
handle=pen.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found pen handle %d" % handle
self.set_handle("pen",handle)
if pen.style != pyemf.PS_NULL:
return pen
else:
return None
def select_brush(self, rgb):
"""
Select a fill color, and return the brush if the color is
valid or None if this won't produce a fill operation.
"""
if rgb is not None:
brush=EMFBrush(self.emf,rgb)
key=hash(brush)
handle=self._fontHandle.get(key)
if handle is None:
handle=brush.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found brush handle %d" % handle
self.set_handle("brush",handle)
return brush
else:
return None
def _get_font_ttf(self, prop):
"""
get the true type font properties, used because EMFs on
windows will use true type fonts.
"""
key = hash(prop)
font = _fontd.get(key)
if font is None:
fname = findfont(prop)
if debugText: print "_get_font_ttf: name=%s" % fname
font = FT2Font(str(fname))
_fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def get_text_width_height(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps
"""
if debugText: print "get_text_width_height: ismath=%s properties: %s" % (str(ismath),str(prop))
if ismath:
if debugText: print " MATH TEXT! = %s" % str(ismath)
w,h = self.get_math_text_width_height(s, prop)
return w,h
font = self._get_font_ttf(prop)
font.set_text(s, 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
if debugText: print " text string=%s w,h=(%f,%f)" % (s, w, h)
return w, h
def new_gc(self):
return GraphicsContextEMF()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
#return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
return points/72.0 * self.dpi
class GraphicsContextEMF(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasEMF(thisFig)
manager = FigureManagerEMF(canvas, num)
return manager
class FigureCanvasEMF(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
pass
filetypes = {'emf': 'Enhanced Metafile'}
def print_emf(self, filename, dpi=300, **kwargs):
width, height = self.figure.get_size_inches()
renderer = RendererEMF(filename,width,height,dpi)
self.figure.draw(renderer)
renderer.save()
def get_default_filetype(self):
return 'emf'
class FigureManagerEMF(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerEMF
| gpl-2.0 |
totalgood/twip | docs/notebooks/08 Features -- TFIDF with Gensim.py | 1 | 2679 |
# coding: utf-8
# In[3]:
from __future__ import division, print_function, absolute_import
from past.builtins import basestring
import os
import pandas as pd
from twip.constant import DATA_PATH
import string
# In[4]:
import matplotlib
from IPython.display import display, HTML
get_ipython().magic(u'matplotlib inline')
np = pd.np
display(HTML("<style>.container { width:100% !important; }</style>"))
pd.set_option('display.max_rows', 6)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 500)
# In[5]:
import gzip
from gensim.models import TfidfModel
from gensim.corpora import Dictionary
# Load previously cleaned data
# In[6]:
dates = pd.read_csv(os.path.join(DATA_PATH, 'datetimes.csv.gz'), engine='python')
nums = pd.read_csv(os.path.join(DATA_PATH, 'numbers.csv.gz'), engine='python')
with gzip.open(os.path.join(DATA_PATH, 'text.csv.gz'), 'rb') as f:
df = pd.DataFrame.from_csv(f, encoding='utf8')
df.tokens
# In[7]:
d = Dictionary.from_documents(df.tokens)
# In[11]:
df.tokens.iloc[0]
# When we said "QUOTE_NONNUMERIC" we didn't mean **ALL** nonnumeric fields ;)
# In[16]:
df['tokens'] = df.txt.str.split()
df.tokens
# In[18]:
df.tokens.values[0:3]
# In[22]:
d = Dictionary.from_documents(df.tokens)
d
# In[20]:
tfidf = TfidfModel(d)
# *Hint-Hint:* `gensim` is sprinting this week at PyCon!
# In[24]:
get_ipython().magic(u'pinfo TfidfModel')
# In[26]:
TfidfModel(df.txt)
# In[27]:
TfidfModel(df.tokens)
# In[28]:
TfidfModel((d.doc2bow(tokens) for tokens in df.tokens))
# But there's a simpler way.
# We already have a vocabulary
# with term and document frequencies in a matrix...
# In[33]:
pd.Series(d.dfs)
# In[34]:
pd.Series(d.iteritems())
# OK, now I get it
#
# - `document` is a list of strings (ordered sequence of tokens)
# - `bow` or [bag of words] is a list of `Counter`-like mappings between word IDs and their count in each document
# - `TfidfModel` is a transformation from a BOW into a BORF, a "bag of relative frequencies"
#
# TFIDF = BORF = term frequencies normalized by document occurence counts
#
# In[37]:
pd.Series(d.doc2bow(toks) for toks in df.tokens[:3])
# Did it assign 0 to the first word it found?
# Sort-of...
# In[39]:
d.token2id['python']
# In[40]:
d.token2id['Python']
# In[41]:
d.token2id['you']
# In[8]:
d.id2token[0] # guesses anyone?
# In[35]:
tfidf = TfidfModel(dictionary=d)
tfidf
# In[ ]:
tfidf.
# In[42]:
tfidf.num_docs
# In[43]:
tfidf.num_nnz
# In[44]:
tfidf.save(os.path.join(DATA_PATH, 'tfidf'))
# In[45]:
tfidf2 = TfidfModel.load(os.path.join(DATA_PATH, 'tfidf'))
# In[46]:
tfidf2.num_nnz
| mit |
sumspr/scikit-learn | sklearn/cross_decomposition/pls_.py | 187 | 28507 | """
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
"""
# Author: Edouard Duchesnay <[email protected]>
# License: BSD 3 clause
from ..base import BaseEstimator, RegressorMixin, TransformerMixin
from ..utils import check_array, check_consistent_length
from ..externals import six
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import linalg
from ..utils import arpack
from ..utils.validation import check_is_fitted
__all__ = ['PLSCanonical', 'PLSRegression', 'PLSSVD']
def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
norm_y_weights=False):
"""Inner loop of the iterative NIPALS algorithm.
Provides an alternative to the svd(X'Y); returns the first left and right
singular vectors of X'Y. See PLS for the meaning of the parameters. It is
similar to the Power method for determining the eigenvectors and
eigenvalues of a X'Y.
"""
y_score = Y[:, [0]]
x_weights_old = 0
ite = 1
X_pinv = Y_pinv = None
eps = np.finfo(X.dtype).eps
# Inner loop of the Wold algo.
while True:
# 1.1 Update u: the X weights
if mode == "B":
if X_pinv is None:
X_pinv = linalg.pinv(X) # compute once pinv(X)
x_weights = np.dot(X_pinv, y_score)
else: # mode A
# Mode A regress each X column on y_score
x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
# 1.2 Normalize u
x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
# 1.3 Update x_score: the X latent scores
x_score = np.dot(X, x_weights)
# 2.1 Update y_weights
if mode == "B":
if Y_pinv is None:
Y_pinv = linalg.pinv(Y) # compute once pinv(Y)
y_weights = np.dot(Y_pinv, x_score)
else:
# Mode A regress each Y column on x_score
y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
# 2.2 Normalize y_weights
if norm_y_weights:
y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
# 2.3 Update y_score: the Y latent scores
y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
# y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
x_weights_diff = x_weights - x_weights_old
if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
break
if ite == max_iter:
warnings.warn('Maximum number of iterations reached')
break
x_weights_old = x_weights
ite += 1
return x_weights, y_weights, ite
def _svd_cross_product(X, Y):
C = np.dot(X.T, Y)
U, s, Vh = linalg.svd(C, full_matrices=False)
u = U[:, [0]]
v = Vh.T[:, [0]]
return u, v
def _center_scale_xy(X, Y, scale=True):
""" Center X, Y and scale if the scale parameter==True
Returns
-------
X, Y, x_mean, y_mean, x_std, y_std
"""
# center
x_mean = X.mean(axis=0)
X -= x_mean
y_mean = Y.mean(axis=0)
Y -= y_mean
# scale
if scale:
x_std = X.std(axis=0, ddof=1)
x_std[x_std == 0.0] = 1.0
X /= x_std
y_std = Y.std(axis=0, ddof=1)
y_std[y_std == 0.0] = 1.0
Y /= y_std
else:
x_std = np.ones(X.shape[1])
y_std = np.ones(Y.shape[1])
return X, Y, x_mean, y_mean, x_std, y_std
class _PLS(six.with_metaclass(ABCMeta), BaseEstimator, TransformerMixin,
RegressorMixin):
"""Partial Least Squares (PLS)
This class implements the generic PLS algorithm, constructors' parameters
allow to obtain a specific implementation such as:
- PLS2 regression, i.e., PLS 2 blocks, mode A, with asymmetric deflation
and unnormalized y weights such as defined by [Tenenhaus 1998] p. 132.
With univariate response it implements PLS1.
- PLS canonical, i.e., PLS 2 blocks, mode A, with symmetric deflation and
normalized y weights such as defined by [Tenenhaus 1998] (p. 132) and
[Wegelin et al. 2000]. This parametrization implements the original Wold
algorithm.
We use the terminology defined by [Wegelin et al. 2000].
This implementation uses the PLS Wold 2 blocks algorithm based on two
nested loops:
(i) The outer loop iterate over components.
(ii) The inner loop estimates the weights vectors. This can be done
with two algo. (a) the inner loop of the original NIPALS algo. or (b) a
SVD on residuals cross-covariance matrices.
n_components : int, number of components to keep. (default 2).
scale : boolean, scale data? (default True)
deflation_mode : str, "canonical" or "regression". See notes.
mode : "A" classical PLS and "B" CCA. See notes.
norm_y_weights: boolean, normalize Y weights to one? (default False)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, the maximum number of iterations (default 500)
of the NIPALS inner loop (used only if algorithm="nipals")
tol : non-negative real, default 1e-06
The tolerance used in the iterative algorithm.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effects.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm given is "svd".
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In French but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSRegression
CCA
PLS_SVD
"""
@abstractmethod
def __init__(self, n_components=2, scale=True, deflation_mode="regression",
mode="A", algorithm="nipals", norm_y_weights=False,
max_iter=500, tol=1e-06, copy=True):
self.n_components = n_components
self.deflation_mode = deflation_mode
self.mode = mode
self.norm_y_weights = norm_y_weights
self.scale = scale
self.algorithm = algorithm
self.max_iter = max_iter
self.tol = tol
self.copy = copy
def fit(self, X, Y):
"""Fit model to data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vectors, where n_samples in the number of samples and
n_features is the number of predictors.
Y : array-like of response, shape = [n_samples, n_targets]
Target vectors, where n_samples in the number of samples and
n_targets is the number of response variables.
"""
# copy since this will contains the residuals (deflated) matrices
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
n = X.shape[0]
p = X.shape[1]
q = Y.shape[1]
if self.n_components < 1 or self.n_components > p:
raise ValueError('Invalid number of components: %d' %
self.n_components)
if self.algorithm not in ("svd", "nipals"):
raise ValueError("Got algorithm %s when only 'svd' "
"and 'nipals' are known" % self.algorithm)
if self.algorithm == "svd" and self.mode == "B":
raise ValueError('Incompatible configuration: mode B is not '
'implemented with svd algorithm')
if self.deflation_mode not in ["canonical", "regression"]:
raise ValueError('The deflation mode is unknown')
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_\
= _center_scale_xy(X, Y, self.scale)
# Residuals (deflated) matrices
Xk = X
Yk = Y
# Results matrices
self.x_scores_ = np.zeros((n, self.n_components))
self.y_scores_ = np.zeros((n, self.n_components))
self.x_weights_ = np.zeros((p, self.n_components))
self.y_weights_ = np.zeros((q, self.n_components))
self.x_loadings_ = np.zeros((p, self.n_components))
self.y_loadings_ = np.zeros((q, self.n_components))
self.n_iter_ = []
# NIPALS algo: outer loop, over components
for k in range(self.n_components):
if np.all(np.dot(Yk.T, Yk) < np.finfo(np.double).eps):
# Yk constant
warnings.warn('Y residual constant at iteration %s' % k)
break
# 1) weights estimation (inner loop)
# -----------------------------------
if self.algorithm == "nipals":
x_weights, y_weights, n_iter_ = \
_nipals_twoblocks_inner_loop(
X=Xk, Y=Yk, mode=self.mode, max_iter=self.max_iter,
tol=self.tol, norm_y_weights=self.norm_y_weights)
self.n_iter_.append(n_iter_)
elif self.algorithm == "svd":
x_weights, y_weights = _svd_cross_product(X=Xk, Y=Yk)
# compute scores
x_scores = np.dot(Xk, x_weights)
if self.norm_y_weights:
y_ss = 1
else:
y_ss = np.dot(y_weights.T, y_weights)
y_scores = np.dot(Yk, y_weights) / y_ss
# test for null variance
if np.dot(x_scores.T, x_scores) < np.finfo(np.double).eps:
warnings.warn('X scores are null at iteration %s' % k)
break
# 2) Deflation (in place)
# ----------------------
# Possible memory footprint reduction may done here: in order to
# avoid the allocation of a data chunk for the rank-one
# approximations matrix which is then subtracted to Xk, we suggest
# to perform a column-wise deflation.
#
# - regress Xk's on x_score
x_loadings = np.dot(Xk.T, x_scores) / np.dot(x_scores.T, x_scores)
# - subtract rank-one approximations to obtain remainder matrix
Xk -= np.dot(x_scores, x_loadings.T)
if self.deflation_mode == "canonical":
# - regress Yk's on y_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, y_scores)
/ np.dot(y_scores.T, y_scores))
Yk -= np.dot(y_scores, y_loadings.T)
if self.deflation_mode == "regression":
# - regress Yk's on x_score, then subtract rank-one approx.
y_loadings = (np.dot(Yk.T, x_scores)
/ np.dot(x_scores.T, x_scores))
Yk -= np.dot(x_scores, y_loadings.T)
# 3) Store weights, scores and loadings # Notation:
self.x_scores_[:, k] = x_scores.ravel() # T
self.y_scores_[:, k] = y_scores.ravel() # U
self.x_weights_[:, k] = x_weights.ravel() # W
self.y_weights_[:, k] = y_weights.ravel() # C
self.x_loadings_[:, k] = x_loadings.ravel() # P
self.y_loadings_[:, k] = y_loadings.ravel() # Q
# Such that: X = TP' + Err and Y = UQ' + Err
# 4) rotations from input space to transformed space (scores)
# T = X W(P'W)^-1 = XW* (W* : p x k matrix)
# U = Y C(Q'C)^-1 = YC* (W* : q x k matrix)
self.x_rotations_ = np.dot(
self.x_weights_,
linalg.pinv(np.dot(self.x_loadings_.T, self.x_weights_)))
if Y.shape[1] > 1:
self.y_rotations_ = np.dot(
self.y_weights_,
linalg.pinv(np.dot(self.y_loadings_.T, self.y_weights_)))
else:
self.y_rotations_ = np.ones(1)
if True or self.deflation_mode == "regression":
# FIXME what's with the if?
# Estimate regression coefficient
# Regress Y on T
# Y = TQ' + Err,
# Then express in function of X
# Y = X W(P'W)^-1Q' + Err = XB + Err
# => B = W*Q' (p x q)
self.coef_ = np.dot(self.x_rotations_, self.y_loadings_.T)
self.coef_ = (1. / self.x_std_.reshape((p, 1)) * self.coef_ *
self.y_std_)
return self
def transform(self, X, Y=None, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
# Apply rotation
x_scores = np.dot(X, self.x_rotations_)
if Y is not None:
Y = check_array(Y, ensure_2d=False, copy=copy)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Y -= self.y_mean_
Y /= self.y_std_
y_scores = np.dot(Y, self.y_rotations_)
return x_scores, y_scores
return x_scores
def predict(self, X, copy=True):
"""Apply the dimension reduction learned on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Notes
-----
This call requires the estimation of a p x q matrix, which may
be an issue in high dimensional space.
"""
check_is_fitted(self, 'x_mean_')
X = check_array(X, copy=copy)
# Normalize
X -= self.x_mean_
X /= self.x_std_
Ypred = np.dot(X, self.coef_)
return Ypred + self.y_mean_
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
copy : boolean, default True
Whether to copy X and Y, or perform in-place normalization.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
check_is_fitted(self, 'x_mean_')
return self.fit(X, y, **fit_params).transform(X, y)
class PLSRegression(_PLS):
"""PLS regression
PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1
in case of one dimensional response.
This class inherits from _PLS with mode="A", deflation_mode="regression",
norm_y_weights=False and algorithm="nipals".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2)
Number of components to keep.
scale : boolean, (default True)
whether to scale the data
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real
Tolerance used in the iterative algorithm default 1e-06.
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
coef_: array, [p, q]
The coefficients of the linear model: ``Y = X coef_ + Err``
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find weights u, v that optimizes:
``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on
the current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current X score. This performs the PLS regression known as PLS2. This
mode is prediction oriented.
This implementation provides the same results that 3 PLS packages
provided in the R language (R-project):
- "mixOmics" with function pls(X, Y, mode = "regression")
- "plspm " with function plsreg2(X, Y)
- "pls" with function oscorespls.fit(X, Y)
Examples
--------
>>> from sklearn.cross_decomposition import PLSRegression
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> pls2 = PLSRegression(n_components=2)
>>> pls2.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSRegression(copy=True, max_iter=500, n_components=2, scale=True,
tol=1e-06)
>>> Y_pred = pls2.predict(X)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="regression", mode="A",
norm_y_weights=False, max_iter=max_iter, tol=tol,
copy=copy)
class PLSCanonical(_PLS):
""" PLSCanonical implements the 2 blocks canonical PLS of the original Wold
algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000].
This class inherits from PLS with mode="A" and deflation_mode="canonical",
norm_y_weights=True and algorithm="nipals", but svd should provide similar
results up to numerical errors.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
scale : boolean, scale data? (default True)
algorithm : string, "nipals" or "svd"
The algorithm used to estimate the weights. It will be called
n_components times, i.e. once for each iteration of the outer loop.
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop (used
only if algorithm="nipals")
tol : non-negative real, default 1e-06
the tolerance used in the iterative algorithm
copy : boolean, default True
Whether the deflation should be done on a copy. Let the default
value to True unless you don't care about side effect
n_components : int, number of components to keep. (default 2).
Attributes
----------
x_weights_ : array, shape = [p, n_components]
X block weights vectors.
y_weights_ : array, shape = [q, n_components]
Y block weights vectors.
x_loadings_ : array, shape = [p, n_components]
X block loadings vectors.
y_loadings_ : array, shape = [q, n_components]
Y block loadings vectors.
x_scores_ : array, shape = [n_samples, n_components]
X scores.
y_scores_ : array, shape = [n_samples, n_components]
Y scores.
x_rotations_ : array, shape = [p, n_components]
X block to latents rotations.
y_rotations_ : array, shape = [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component. Not useful if the algorithm provided is "svd".
Notes
-----
For each component k, find weights u, v that optimize::
max corr(Xk u, Yk v) * var(Xk u) var(Yk u), such that ``|u| = |v| = 1``
Note that it maximizes both the correlations between the scores and the
intra-block variances.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score. This performs a canonical symmetric version of the PLS
regression. But slightly different than the CCA. This is mostly used
for modeling.
This implementation provides the same results that the "plspm" package
provided in the R language (R-project), using the function plsca(X, Y).
Results are equal or collinear with the function
``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference
relies in the fact that mixOmics implementation does not exactly implement
the Wold algorithm since it does not normalize y_weights to one.
Examples
--------
>>> from sklearn.cross_decomposition import PLSCanonical
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> plsca = PLSCanonical(n_components=2)
>>> plsca.fit(X, Y)
... # doctest: +NORMALIZE_WHITESPACE
PLSCanonical(algorithm='nipals', copy=True, max_iter=500, n_components=2,
scale=True, tol=1e-06)
>>> X_c, Y_c = plsca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
CCA
PLSSVD
"""
def __init__(self, n_components=2, scale=True, algorithm="nipals",
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="A",
norm_y_weights=True, algorithm=algorithm,
max_iter=max_iter, tol=tol, copy=copy)
class PLSSVD(BaseEstimator, TransformerMixin):
"""Partial Least Square SVD
Simply perform a svd on the crosscovariance matrix: X'Y
There are no iterative deflation here.
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, default 2
Number of components to keep.
scale : boolean, default True
Whether to scale X and Y.
copy : boolean, default True
Whether to copy X and Y, or perform in-place computations.
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
See also
--------
PLSCanonical
CCA
"""
def __init__(self, n_components=2, scale=True, copy=True):
self.n_components = n_components
self.scale = scale
self.copy = copy
def fit(self, X, Y):
# copy since this will contains the centered data
check_consistent_length(X, Y)
X = check_array(X, dtype=np.float64, copy=self.copy)
Y = check_array(Y, dtype=np.float64, copy=self.copy, ensure_2d=False)
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
if self.n_components > max(Y.shape[1], X.shape[1]):
raise ValueError("Invalid number of components n_components=%d"
" with X of shape %s and Y of shape %s."
% (self.n_components, str(X.shape), str(Y.shape)))
# Scale (in place)
X, Y, self.x_mean_, self.y_mean_, self.x_std_, self.y_std_ =\
_center_scale_xy(X, Y, self.scale)
# svd(X'Y)
C = np.dot(X.T, Y)
# The arpack svds solver only works if the number of extracted
# components is smaller than rank(X) - 1. Hence, if we want to extract
# all the components (C.shape[1]), we have to use another one. Else,
# let's use arpacks to compute only the interesting components.
if self.n_components >= np.min(C.shape):
U, s, V = linalg.svd(C, full_matrices=False)
else:
U, s, V = arpack.svds(C, k=self.n_components)
V = V.T
self.x_scores_ = np.dot(X, U)
self.y_scores_ = np.dot(Y, V)
self.x_weights_ = U
self.y_weights_ = V
return self
def transform(self, X, Y=None):
"""Apply the dimension reduction learned on the train data."""
check_is_fitted(self, 'x_mean_')
X = check_array(X, dtype=np.float64)
Xr = (X - self.x_mean_) / self.x_std_
x_scores = np.dot(Xr, self.x_weights_)
if Y is not None:
if Y.ndim == 1:
Y = Y.reshape(-1, 1)
Yr = (Y - self.y_mean_) / self.y_std_
y_scores = np.dot(Yr, self.y_weights_)
return x_scores, y_scores
return x_scores
def fit_transform(self, X, y=None, **fit_params):
"""Learn and apply the dimension reduction on the train data.
Parameters
----------
X : array-like of predictors, shape = [n_samples, p]
Training vectors, where n_samples in the number of samples and
p is the number of predictors.
Y : array-like of response, shape = [n_samples, q], optional
Training vectors, where n_samples in the number of samples and
q is the number of response variables.
Returns
-------
x_scores if Y is not given, (x_scores, y_scores) otherwise.
"""
return self.fit(X, y, **fit_params).transform(X, y)
| bsd-3-clause |
idlead/scikit-learn | sklearn/datasets/samples_generator.py | 20 | 56502 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| bsd-3-clause |
hlin117/scikit-learn | examples/cluster/plot_face_ward_segmentation.py | 71 | 2460 | """
=========================================================================
A demo of structured Ward hierarchical clustering on a raccoon face image
=========================================================================
Compute the segmentation of a 2D image with Ward hierarchical
clustering. The clustering is spatially constrained in order
for each segmented region to be in one piece.
"""
# Author : Vincent Michel, 2010
# Alexandre Gramfort, 2011
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction.image import grid_to_graph
from sklearn.cluster import AgglomerativeClustering
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
# Generate data
try:
face = sp.face(gray=True)
except AttributeError:
# Newer versions of scipy have face in misc
from scipy import misc
face = misc.face(gray=True)
# Resize it to 10% of the original size to speed up the processing
face = sp.misc.imresize(face, 0.10) / 255.
X = np.reshape(face, (-1, 1))
###############################################################################
# Define the structure A of the data. Pixels connected to their neighbors.
connectivity = grid_to_graph(*face.shape)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
n_clusters = 15 # number of regions
ward = AgglomerativeClustering(n_clusters=n_clusters, linkage='ward',
connectivity=connectivity)
ward.fit(X)
label = np.reshape(ward.labels_, face.shape)
print("Elapsed time: ", time.time() - st)
print("Number of pixels: ", label.size)
print("Number of clusters: ", np.unique(label).size)
###############################################################################
# Plot the results on an image
plt.figure(figsize=(5, 5))
plt.imshow(face, cmap=plt.cm.gray)
for l in range(n_clusters):
plt.contour(label == l, contours=1,
colors=[plt.cm.spectral(l / float(n_clusters)), ])
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
0x0all/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
sburns/PyCap | redcap/project.py | 2 | 36294 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""User facing class for interacting with a REDCap Project"""
import json
import warnings
import semantic_version
from .request import RCRequest, RedcapError, RequestException
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__author__ = "Scott Burns <scott.s.burnsgmail.com>"
__license__ = "MIT"
__copyright__ = "2014, Vanderbilt University"
# pylint: disable=too-many-lines
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=too-many-public-methods
# pylint: disable=redefined-builtin
class Project(object):
"""Main class for interacting with REDCap projects"""
def __init__(self, url, token, name="", verify_ssl=True, lazy=False):
"""
Parameters
----------
url : str
API URL to your REDCap server
token : str
API token to your project
name : str, optional
name for project
verify_ssl : boolean, str
Verify SSL, default True. Can pass path to CA_BUNDLE.
"""
self.token = token
self.name = name
self.url = url
self.verify = verify_ssl
self.metadata = None
self.redcap_version = None
self.field_names = None
# We'll use the first field as the default id for each row
self.def_field = None
self.field_labels = None
self.forms = None
self.events = None
self.arm_nums = None
self.arm_names = None
self.configured = False
if not lazy:
self.configure()
def configure(self):
"""Fill in project attributes"""
try:
self.metadata = self.__md()
except RequestException as request_fail:
raise RedcapError(
"Exporting metadata failed. Check your URL and token."
) from request_fail
try:
self.redcap_version = self.__rcv()
except Exception as general_fail:
raise RedcapError(
"Determination of REDCap version failed"
) from general_fail
self.field_names = self.filter_metadata("field_name")
# we'll use the first field as the default id for each row
self.def_field = self.field_names[0]
self.field_labels = self.filter_metadata("field_label")
self.forms = tuple(set(c["form_name"] for c in self.metadata))
# determine whether longitudinal
ev_data = self._call_api(self.__basepl("event"), "exp_event")[0]
arm_data = self._call_api(self.__basepl("arm"), "exp_arm")[0]
if isinstance(ev_data, dict) and ("error" in ev_data.keys()):
events = tuple([])
else:
events = ev_data
if isinstance(arm_data, dict) and ("error" in arm_data.keys()):
arm_nums = tuple([])
arm_names = tuple([])
else:
arm_nums = tuple([a["arm_num"] for a in arm_data])
arm_names = tuple([a["name"] for a in arm_data])
self.events = events
self.arm_nums = arm_nums
self.arm_names = arm_names
self.configured = True
def __md(self):
"""Return the project's metadata structure"""
p_l = self.__basepl("metadata")
p_l["content"] = "metadata"
return self._call_api(p_l, "metadata")[0]
def __basepl(self, content, rec_type="flat", format="json"):
"""Return a dictionary which can be used as is or added to for
payloads"""
payload_dict = {"token": self.token, "content": content, "format": format}
if content not in ["metapayload_dictata", "file"]:
payload_dict["type"] = rec_type
return payload_dict
def __rcv(self):
payload = self.__basepl("version")
rcv = self._call_api(payload, "version")[0].decode("utf-8")
if "error" in rcv:
warnings.warn("Version information not available for this REDCap instance")
return ""
if semantic_version.validate(rcv):
return semantic_version.Version(rcv)
return rcv
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return (
len(self.events) > 0 and len(self.arm_nums) > 0 and len(self.arm_names) > 0
)
def filter_metadata(self, key):
"""
Return a list of values for the metadata key from each field
of the project's metadata.
Parameters
----------
key: str
A known key in the metadata structure
Returns
-------
filtered :
attribute list from each field
"""
filtered = [field[key] for field in self.metadata if key in field]
if len(filtered) == 0:
raise KeyError("Key not found in metadata")
return filtered
def _kwargs(self):
"""Private method to build a dict for sending to RCRequest
Other default kwargs to the http library should go here"""
return {"verify": self.verify}
def _call_api(self, payload, typpe, **kwargs):
request_kwargs = self._kwargs()
request_kwargs.update(kwargs)
rcr = RCRequest(self.url, payload, typpe)
return rcr.execute(**request_kwargs)
def export_fem(self, arms=None, format="json", df_kwargs=None):
"""
Export the project's form to event mapping
Parameters
----------
arms : list
Limit exported form event mappings to these arm numbers
format : (``'json'``), ``'csv'``, ``'xml'``
Return the form event mappings in native objects,
csv or xml, ``'df''`` will return a ``pandas.DataFrame``
df_kwargs : dict
Passed to pandas.read_csv to control construction of
returned DataFrame
Returns
-------
fem : list, str, ``pandas.DataFrame``
form-event mapping for the project
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("formEventMapping", format=ret_format)
if arms:
for i, value in enumerate(arms):
payload["arms[{}]".format(i)] = value
response, _ = self._call_api(payload, "exp_fem")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {}
return self.read_csv(StringIO(response), **df_kwargs)
def export_field_names(self, field=None, format="json", df_kwargs=None):
"""
Export the project's export field names
Parameters
----------
fields : str
Limit exported field name to this field (only single field supported).
When not provided, all fields returned.
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'original_field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata structure for the project.
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("exportFieldNames", format=ret_format)
if field:
payload["field"] = field
response, _ = self._call_api(payload, "exp_field_names")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {"index_col": "original_field_name"}
return self.read_csv(StringIO(response), **df_kwargs)
def export_metadata(self, fields=None, forms=None, format="json", df_kwargs=None):
"""
Export the project's metadata
Parameters
----------
fields : list
Limit exported metadata to these fields
forms : list
Limit exported metadata to these forms
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Return the metadata in native objects, csv or xml.
``'df'`` will return a ``pandas.DataFrame``.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default ``{'index_col': 'field_name'}``
Returns
-------
metadata : list, str, ``pandas.DataFrame``
metadata sttructure for the project.
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("metadata", format=ret_format)
to_add = [fields, forms]
str_add = ["fields", "forms"]
for key, data in zip(str_add, to_add):
if data:
for i, value in enumerate(data):
payload["{}[{}]".format(key, i)] = value
response, _ = self._call_api(payload, "metadata")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
df_kwargs = {"index_col": "field_name"}
return self.read_csv(StringIO(response), **df_kwargs)
def delete_records(self, records):
"""
Delete records from the Project.
Parameters
----------
records : list
List of record IDs that you want to delete from the project
Returns
-------
response : int
Number of records deleted
"""
payload = dict()
payload["action"] = "delete"
payload["content"] = "record"
payload["token"] = self.token
# Turn list of records into dict, and append to payload
records_dict = {
"records[{}]".format(idx): record for idx, record in enumerate(records)
}
payload.update(records_dict)
payload["format"] = format
response, _ = self._call_api(payload, "del_record")
return response
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
def export_records(
self,
records=None,
fields=None,
forms=None,
events=None,
raw_or_label="raw",
event_name="label",
format="json",
export_survey_fields=False,
export_data_access_groups=False,
df_kwargs=None,
export_checkbox_labels=False,
filter_logic=None,
date_begin=None,
date_end=None,
):
"""
Export data from the REDCap project.
Parameters
----------
records : list
array of record names specifying specific records to export.
by default, all records are exported
fields : list
array of field names specifying specific fields to pull
by default, all fields are exported
forms : list
array of form names to export. If in the web UI, the form
name has a space in it, replace the space with an underscore
by default, all forms are exported
events : list
an array of unique event names from which to export records
:note: this only applies to longitudinal projects
raw_or_label : (``'raw'``), ``'label'``, ``'both'``
export the raw coded values or labels for the options of
multiple choice fields, or both
event_name : (``'label'``), ``'unique'``
export the unique event name or the event label
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
export_survey_fields : (``False``), True
specifies whether or not to export the survey identifier
field (e.g., "redcap_survey_identifier") or survey timestamp
fields (e.g., form_name+"_timestamp") when surveys are
utilized in the project.
export_data_access_groups : (``False``), ``True``
specifies whether or not to export the
``"redcap_data_access_group"`` field when data access groups
are utilized in the project.
:note: This flag is only viable if the user whose token is
being used to make the API request is *not* in a data
access group. If the user is in a group, then this flag
will revert to its default value.
df_kwargs : dict
Passed to ``pandas.read_csv`` to control construction of
returned DataFrame.
by default, ``{'index_col': self.def_field}``
export_checkbox_labels : (``False``), ``True``
specify whether to export checkbox values as their label on
export.
filter_logic : string
specify the filterLogic to be sent to the API.
date_begin : datetime
for the dateRangeStart filtering of the API
date_end : datetime
for the dateRangeEnd filtering snet to the API
Returns
-------
data : list, str, ``pandas.DataFrame``
exported data
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl("record", format=ret_format)
fields = self.backfill_fields(fields, forms)
keys_to_add = (
records,
fields,
forms,
events,
raw_or_label,
event_name,
export_survey_fields,
export_data_access_groups,
export_checkbox_labels,
)
str_keys = (
"records",
"fields",
"forms",
"events",
"rawOrLabel",
"eventName",
"exportSurveyFields",
"exportDataAccessGroups",
"exportCheckboxLabel",
)
for key, data in zip(str_keys, keys_to_add):
if data:
if key in ("fields", "records", "forms", "events"):
for i, value in enumerate(data):
payload["{}[{}]".format(key, i)] = value
else:
payload[key] = data
if date_begin:
payload["dateRangeBegin"] = date_begin.strftime("%Y-%m-%d %H:%M:%S")
if date_end:
payload["dateRangeEnd"] = date_end.strftime("%Y-%m-%d %H:%M:%S")
if filter_logic:
payload["filterLogic"] = filter_logic
response, _ = self._call_api(payload, "exp_record")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {"index_col": [self.def_field, "redcap_event_name"]}
else:
df_kwargs = {"index_col": self.def_field}
buf = StringIO(response)
dataframe = self.read_csv(buf, **df_kwargs)
buf.close()
return dataframe
# pylint: enable=too-many-branches
# pylint: enable=too-many-locals
# pylint: disable=import-outside-toplevel
@staticmethod
def read_csv(buf, **df_kwargs):
"""Wrapper around pandas read_csv that handles EmptyDataError"""
from pandas import DataFrame, read_csv
from pandas.errors import EmptyDataError
try:
dataframe = read_csv(buf, **df_kwargs)
except EmptyDataError:
dataframe = DataFrame()
return dataframe
# pylint: enable=import-outside-toplevel
def metadata_type(self, field_name):
"""If the given field_name is validated by REDCap, return it's type"""
return self.__meta_metadata(
field_name, "text_validation_type_or_show_slider_number"
)
def __meta_metadata(self, field, key):
"""Return the value for key for the field in the metadata"""
metadata_field = ""
try:
metadata_field = str(
[f[key] for f in self.metadata if f["field_name"] == field][0]
)
except IndexError:
print("%s not in metadata field:%s" % (key, field))
return metadata_field
else:
return metadata_field
def backfill_fields(self, fields, forms):
"""
Properly backfill fields to explicitly request specific
keys. The issue is that >6.X servers *only* return requested fields
so to improve backwards compatiblity for PyCap clients, add specific fields
when required.
Parameters
----------
fields: list
requested fields
forms: list
requested forms
Returns
-------
new fields, forms
"""
if forms and not fields:
new_fields = [self.def_field]
elif fields and self.def_field not in fields:
new_fields = list(fields)
if self.def_field not in fields:
new_fields.append(self.def_field)
elif not fields:
new_fields = self.field_names
else:
new_fields = list(fields)
return new_fields
def names_labels(self, do_print=False):
"""Simple helper function to get all field names and labels """
if do_print:
for name, label in zip(self.field_names, self.field_labels):
print("%s --> %s" % (str(name), str(label)))
return self.field_names, self.field_labels
def import_records(
self,
to_import,
overwrite="normal",
format="json",
return_format="json",
return_content="count",
date_format="YMD",
force_auto_number=False,
):
"""
Import data into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
:note:
Keys of the dictionaries should be subset of project's,
fields, but this isn't a requirement. If you provide keys
that aren't defined fields, the returned response will
contain an ``'error'`` key.
overwrite : ('normal'), 'overwrite'
``'overwrite'`` will erase values previously stored in the
database if not specified in the to_import dictionaries.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
return_content : ('count'), 'ids', 'nothing'
By default, the response contains a 'count' key with the number of
records just imported. By specifying 'ids', a list of ids
imported will be returned. 'nothing' will only return
the HTTP status code and no message.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
force_auto_number : ('False') Enables automatic assignment of record IDs
of imported records by REDCap. If this is set to true, and auto-numbering
for records is enabled for the project, auto-numbering of imported records
will be enabled.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
"""
payload = self._initialize_import_payload(to_import, format, "record")
payload["overwriteBehavior"] = overwrite
payload["returnFormat"] = return_format
payload["returnContent"] = return_content
payload["dateFormat"] = date_format
payload["forceAutoNumber"] = force_auto_number
response = self._call_api(payload, "imp_record")[0]
if "error" in response:
raise RedcapError(str(response))
return response
def import_metadata(
self, to_import, format="json", return_format="json", date_format="YMD"
):
"""
Import metadata (DataDict) into the RedCap Project
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
return_format : ('json'), 'csv', 'xml'
Response format. By default, response will be json-decoded.
date_format : ('YMD'), 'DMY', 'MDY'
Describes the formatting of dates. By default, date strings
are formatted as 'YYYY-MM-DD' corresponding to 'YMD'. If date
strings are formatted as 'MM/DD/YYYY' set this parameter as
'MDY' and if formatted as 'DD/MM/YYYY' set as 'DMY'. No
other formattings are allowed.
Returns
-------
response : dict, str
response from REDCap API, json-decoded if ``return_format`` == ``'json'``
If successful, the number of imported fields
"""
payload = self._initialize_import_payload(to_import, format, "metadata")
payload["returnFormat"] = return_format
payload["dateFormat"] = date_format
response = self._call_api(payload, "imp_metadata")[0]
if "error" in str(response):
raise RedcapError(str(response))
return response
def _initialize_import_payload(self, to_import, format, data_type):
"""
Standardize the data to be imported and add it to the payload
Parameters
----------
to_import : array of dicts, csv/xml string, ``pandas.DataFrame``
:note:
If you pass a csv or xml string, you should use the
``format`` parameter appropriately.
format : ('json'), 'xml', 'csv'
Format of incoming data. By default, to_import will be json-encoded
data_type: 'record', 'metadata'
The kind of data that are imported
Returns
-------
payload : (dict, str)
The initialized payload dictionary and updated format
"""
payload = self.__basepl(data_type)
# pylint: disable=comparison-with-callable
if hasattr(to_import, "to_csv"):
# We'll assume it's a df
buf = StringIO()
if data_type == "record":
if self.is_longitudinal():
csv_kwargs = {"index_label": [self.def_field, "redcap_event_name"]}
else:
csv_kwargs = {"index_label": self.def_field}
elif data_type == "metadata":
csv_kwargs = {"index": False}
to_import.to_csv(buf, **csv_kwargs)
payload["data"] = buf.getvalue()
buf.close()
format = "csv"
elif format == "json":
payload["data"] = json.dumps(to_import, separators=(",", ":"))
else:
# don't do anything to csv/xml
payload["data"] = to_import
# pylint: enable=comparison-with-callable
payload["format"] = format
return payload
def export_file(self, record, field, event=None, return_format="json"):
"""
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
"""
self._check_file_field(field)
# load up payload
payload = self.__basepl(content="file", format=return_format)
# there's no format field in this call
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "export"
payload["field"] = field
payload["record"] = record
if event:
payload["event"] = event
content, headers = self._call_api(payload, "exp_file")
# REDCap adds some useful things in content-type
if "content-type" in headers:
splat = [
key_values.strip() for key_values in headers["content-type"].split(";")
]
key_values = [
(key_values.split("=")[0], key_values.split("=")[1].replace('"', ""))
for key_values in splat
if "=" in key_values
]
content_map = dict(key_values)
else:
content_map = {}
return content, content_map
def import_file(
self,
record,
field,
fname,
fobj,
event=None,
repeat_instance=None,
return_format="json",
):
"""
Import the contents of a file represented by fobj to a
particular records field
Parameters
----------
record : str
record ID
field : str
field name where the file will go
fname : str
file name visible in REDCap UI
fobj : file object
file object as returned by `open`
event : str
for longitudinal projects, specify the unique event here
repeat_instance : int
(only for projects with repeating instruments/events)
The repeat instance number of the repeating event (if longitudinal)
or the repeating instrument (if classic or longitudinal).
return_format : ('json'), 'csv', 'xml'
format of error message
Returns
-------
response :
response from server as specified by ``return_format``
"""
self._check_file_field(field)
# load up payload
payload = self.__basepl(content="file", format=return_format)
# no format in this call
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "import"
payload["field"] = field
payload["record"] = record
if event:
payload["event"] = event
if repeat_instance:
payload["repeat_instance"] = repeat_instance
file_kwargs = {"files": {"file": (fname, fobj)}}
return self._call_api(payload, "imp_file", **file_kwargs)[0]
def delete_file(self, record, field, return_format="json", event=None):
"""
Delete a file from REDCap
Notes
-----
There is no undo button to this.
Parameters
----------
record : str
record ID
field : str
field name
return_format : (``'json'``), ``'csv'``, ``'xml'``
return format for error message
event : str
If longitudinal project, event to delete file from
Returns
-------
response : dict, str
response from REDCap after deleting file
"""
self._check_file_field(field)
# Load up payload
payload = self.__basepl(content="file", format=return_format)
del payload["format"]
payload["returnFormat"] = return_format
payload["action"] = "delete"
payload["record"] = record
payload["field"] = field
if event:
payload["event"] = event
return self._call_api(payload, "del_file")[0]
def _check_file_field(self, field):
"""Check that field exists and is a file field"""
is_field = field in self.field_names
is_file = self.__meta_metadata(field, "field_type") == "file"
if not (is_field and is_file):
msg = "'%s' is not a field or not a 'file' field" % field
raise ValueError(msg)
return True
def export_users(self, format="json"):
"""
Export the users of the Project
Notes
-----
Each user will have the following keys:
* ``'firstname'`` : User's first name
* ``'lastname'`` : User's last name
* ``'email'`` : Email address
* ``'username'`` : User's username
* ``'expiration'`` : Project access expiration date
* ``'data_access_group'`` : data access group ID
* ``'data_export'`` : (0=no access, 2=De-Identified, 1=Full Data Set)
* ``'forms'`` : a list of dicts with a single key as the form name and
value is an integer describing that user's form rights,
where: 0=no access, 1=view records/responses and edit
records (survey responses are read-only), 2=read only, and
3=edit survey responses,
Parameters
----------
format : (``'json'``), ``'csv'``, ``'xml'``
response return format
Returns
-------
users: list, str
list of users dicts when ``'format'='json'``,
otherwise a string
"""
payload = self.__basepl(content="user", format=format)
return self._call_api(payload, "exp_user")[0]
def export_survey_participant_list(self, instrument, event=None, format="json"):
"""
Export the Survey Participant List
Notes
-----
The passed instrument must be set up as a survey instrument.
Parameters
----------
instrument: str
Name of instrument as seen in second column of Data Dictionary.
event: str
Unique event name, only used in longitudinal projects
format: (json, xml, csv), json by default
Format of returned data
"""
payload = self.__basepl(content="participantList", format=format)
payload["instrument"] = instrument
if event:
payload["event"] = event
return self._call_api(payload, "exp_survey_participant_list")
def generate_next_record_name(self):
"""Return the next record name for auto-numbering records"""
payload = self.__basepl(content="generateNextRecordName")
return self._call_api(payload, "exp_next_id")[0]
def export_project_info(self, format="json"):
"""
Export Project Information
Parameters
----------
format: (json, xml, csv), json by default
Format of returned data
"""
payload = self.__basepl(content="project", format=format)
return self._call_api(payload, "exp_proj")[0]
# pylint: disable=too-many-locals
def export_reports(
self,
format="json",
report_id=None,
raw_or_label="raw",
raw_or_label_headers="raw",
export_checkbox_labels="false",
decimal_character=None,
df_kwargs=None,
):
"""
Export a report of the Project
Notes
-----
Parameters
----------
report_id : the report ID number provided next to the report name
on the report list page
format : (``'json'``), ``'csv'``, ``'xml'``, ``'df'``
Format of returned data. ``'json'`` returns json-decoded
objects while ``'csv'`` and ``'xml'`` return other formats.
``'df'`` will attempt to return a ``pandas.DataFrame``.
raw_or_label : raw [default], label - export the raw coded values or
labels for the options of multiple choice fields
raw_or_label_headers : raw [default], label - (for 'csv' format 'flat'
type only) for the CSV headers, export the variable/field names
(raw) or the field labels (label)
export_checkbox_labels : true, false [default] - specifies the format of
checkbox field values specifically when exporting the data as labels
(i.e., when rawOrLabel=label). When exporting labels, by default
(without providing the exportCheckboxLabel flag or if
exportCheckboxLabel=false), all checkboxes will either have a value
'Checked' if they are checked or 'Unchecked' if not checked.
But if exportCheckboxLabel is set to true, it will instead export
the checkbox value as the checkbox option's label (e.g., 'Choice 1')
if checked or it will be blank/empty (no value) if not checked.
If rawOrLabel=false, then the exportCheckboxLabel flag is ignored.
decimal_character : If specified, force all numbers into same decimal
format. You may choose to force all data values containing a
decimal to have the same decimal character, which will be applied
to all calc fields and number-validated text fields. Options
include comma ',' or dot/full stop '.', but if left blank/null,
then it will export numbers using the fields' native decimal format.
Simply provide the value of either ',' or '.' for this parameter.
Returns
-------
Per Redcap API:
Data from the project in the format and type specified
Ordered by the record (primary key of project) and then by event id
"""
ret_format = format
if format == "df":
ret_format = "csv"
payload = self.__basepl(content="report", format=ret_format)
keys_to_add = (
report_id,
raw_or_label,
raw_or_label_headers,
export_checkbox_labels,
decimal_character,
)
str_keys = (
"report_id",
"rawOrLabel",
"rawOrLabelHeaders",
"exportCheckboxLabel",
"decimalCharacter",
)
for key, data in zip(str_keys, keys_to_add):
if data:
payload[key] = data
response, _ = self._call_api(payload, "exp_report")
if format in ("json", "csv", "xml"):
return response
if format != "df":
raise ValueError(("Unsupported format: '{}'").format(format))
if not df_kwargs:
if self.is_longitudinal():
df_kwargs = {"index_col": [self.def_field, "redcap_event_name"]}
else:
df_kwargs = {"index_col": self.def_field}
buf = StringIO(response)
dataframe = self.read_csv(buf, **df_kwargs)
buf.close()
return dataframe
# pylint: enable=too-many-locals
# pylint: enable=too-many-instance-attributes
# pylint: enable=too-many-arguments
# pylint: enable=too-many-public-methods
# pylint: enable=redefined-builtin
| mit |
vigilv/scikit-learn | examples/linear_model/plot_logistic_path.py | 349 | 1195 | #!/usr/bin/env python
"""
=================================
Path with L1- Logistic Regression
=================================
Computes path on IRIS dataset.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
from sklearn.svm import l1_min_c
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 2]
y = y[y != 2]
X -= np.mean(X, 0)
###############################################################################
# Demo path functions
cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3)
print("Computing regularization path ...")
start = datetime.now()
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
coefs_ = []
for c in cs:
clf.set_params(C=c)
clf.fit(X, y)
coefs_.append(clf.coef_.ravel().copy())
print("This took ", datetime.now() - start)
coefs_ = np.array(coefs_)
plt.plot(np.log10(cs), coefs_)
ymin, ymax = plt.ylim()
plt.xlabel('log(C)')
plt.ylabel('Coefficients')
plt.title('Logistic Regression Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
Clyde-fare/scikit-learn | benchmarks/bench_plot_neighbors.py | 287 | 6433 | """
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
| bsd-3-clause |
janhahne/nest-simulator | pynest/examples/glif_cond_neuron.py | 5 | 9609 | # -*- coding: utf-8 -*-
#
# glif_cond_neuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Conductance-based generalized leaky integrate and fire (GLIF) neuron example
--------------------------------
Simple example of how to use the ``glif_cond`` neuron model for
five different levels of GLIF neurons.
Four stimulation paradigms are illustrated for the GLIF model
with externally applied current and spikes impinging
Voltage traces, injecting current traces, threshold traces, synaptic
conductance traces and spikes are shown.
KEYWORDS: glif_cond
"""
##############################################################################
# First, we import all necessary modules to simulate, analyze and plot this
# example.
import nest
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
##############################################################################
# We initialize the nest and set the simulation resolution.
nest.ResetKernel()
resolution = 0.05
nest.SetKernelStatus({"resolution": resolution})
###############################################################################
# We create the five levels of GLIF model to be tested, i.e.,
# ``lif``, ``lif_r``, ``lif_asc``, ``lif_r_asc``, ``lif_r_asc_a``.
# For each level of GLIF model, we create a ``glif_cond`` node. The node is
# created by setting relative model mechanism parameters. Other neuron
# parameters are set as default. The five ``glif_cond`` node handles are
# combined as a list. Note that the default number of synaptic ports
# is two for spike inputs. One port is excitation receptor with time
# constant being 0.2 ms and reversal potential being 0.0 mV. The other port is
# inhibition receptor with time constant being 2.0 ms and -85.0 mV.
# Note that users can set as many synaptic ports as needed for ``glif_cond``
# by setting array parameters ``tau_syn`` and ``E_rev`` of the model.
n_lif = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_r = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": False,
"adapting_threshold": False})
n_lif_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": False,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": False})
n_lif_r_asc_a = nest.Create("glif_cond",
params={"spike_dependent_threshold": True,
"after_spike_currents": True,
"adapting_threshold": True})
neurons = n_lif + n_lif_r + n_lif_asc + n_lif_r_asc + n_lif_r_asc_a
###############################################################################
# For the stimulation input to the glif_cond neurons, we create one excitation
# spike generator and one inhibition spike generator, each of which generates
# three spikes; we also create one step current generator and a Poisson
# generator, a parrot neuron(to be paired with the Poisson generator).
# The three different injections are spread to three different time periods,
# i.e., 0 ms ~ 200 ms, 200 ms ~ 500 ms, 600 ms ~ 900 ms.
# Configuration of the current generator includes the definition of the start
# and stop times and the amplitude of the injected current. Configuration of
# the Poisson generator includes the definition of the start and stop times and
# the rate of the injected spike train.
espikes = nest.Create("spike_generator",
params={"spike_times": [10., 100., 150.],
"spike_weights": [20.]*3})
ispikes = nest.Create("spike_generator",
params={"spike_times": [15., 99., 150.],
"spike_weights": [-20.]*3})
cg = nest.Create("step_current_generator",
params={"amplitude_values": [400., ],
"amplitude_times": [200., ],
"start": 200., "stop": 500.})
pg = nest.Create("poisson_generator",
params={"rate": 15000., "start": 600., "stop": 900.})
pn = nest.Create("parrot_neuron")
###############################################################################
# The generators are then connected to the neurons. Specification of
# the ``receptor_type`` uniquely defines the target receptor.
# We connect current generator to receptor 0, the excitation spike generator
# and the Poisson generator (via parrot neuron) to receptor 1, and the
# inhibition spike generator to receptor 2 of the GLIF neurons.
# Note that Poisson generator is connected to parrot neuron to transit the
# spikes to the glif_cond neuron.
nest.Connect(cg, neurons, syn_spec={"delay": resolution})
nest.Connect(espikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 1})
nest.Connect(ispikes, neurons,
syn_spec={"delay": resolution, "receptor_type": 2})
nest.Connect(pg, pn, syn_spec={"delay": resolution})
nest.Connect(pn, neurons, syn_spec={"delay": resolution, "receptor_type": 1})
###############################################################################
# A ``multimeter`` is created and connected to the neurons. The parameters
# specified for the multimeter include the list of quantities that should be
# recorded and the time interval at which quantities are measured.
mm = nest.Create("multimeter",
params={"interval": resolution,
"record_from": ["V_m", "I", "g_1", "g_2",
"threshold",
"threshold_spike",
"threshold_voltage",
"ASCurrents_sum"]})
nest.Connect(mm, neurons)
###############################################################################
# A ``spike_detector`` is created and connected to the neurons record the
# spikes generated by the glif_cond neurons.
sd = nest.Create("spike_detector")
nest.Connect(neurons, sd)
###############################################################################
# Run the simulation for 1000 ms and retrieve recorded data from
# the multimeter and spike detector.
nest.Simulate(1000.)
data = mm.events
senders = data["senders"]
spike_data = sd.events
spike_senders = spike_data["senders"]
spikes = spike_data["times"]
###############################################################################
# We plot the time traces of the membrane potential (in blue) and
# the overall threshold (in green), and the spikes (as red dots) in one panel;
# the spike component of threshold (in yellow) and the voltage component of
# threshold (in black) in another panel; the injected currents(in strong blue),
# the sum of after spike currents(in cyan) in the third panel; and the synaptic
# conductances of the two receptors (in blue and orange) in responding to the
# spike inputs to the neurons in the fourth panel. We plot all these four
# panels for each level of GLIF model in a seperated figure.
glif_models = ["lif", "lif_r", "lif_asc", "lif_r_asc", "lif_r_asc_a"]
for i in range(len(glif_models)):
glif_model = glif_models[i]
node_id = neurons[i].global_id
plt.figure(glif_model)
gs = gridspec.GridSpec(4, 1, height_ratios=[2, 1, 1, 1])
t = data["times"][senders == 1]
ax1 = plt.subplot(gs[0])
plt.plot(t, data["V_m"][senders == node_id], "b")
plt.plot(t, data["threshold"][senders == node_id], "g--")
plt.plot(spikes[spike_senders == node_id],
[max(data["threshold"][senders == node_id]) * 0.95] *
len(spikes[spike_senders == node_id]), "r.")
plt.legend(["V_m", "threshold", "spike"])
plt.ylabel("V (mV)")
plt.title("Simulation of glif_cond neuron of " + glif_model)
ax2 = plt.subplot(gs[1])
plt.plot(t, data["threshold_spike"][senders == node_id], "y")
plt.plot(t, data["threshold_voltage"][senders == node_id], "k--")
plt.legend(["threshold_spike", "threshold_voltage"])
plt.ylabel("V (mV)")
ax3 = plt.subplot(gs[2])
plt.plot(t, data["I"][senders == node_id], "--")
plt.plot(t, data["ASCurrents_sum"][senders == node_id], "c-.")
plt.legend(["I_e", "ASCurrents_sum", "I_syn"])
plt.ylabel("I (pA)")
plt.xlabel("t (ms)")
ax4 = plt.subplot(gs[3])
plt.plot(t, data["g_1"][senders == node_id], "-")
plt.plot(t, data["g_2"][senders == node_id], "--")
plt.legend(["G_1", "G_2"])
plt.ylabel("G (nS)")
plt.xlabel("t (ms)")
plt.show()
| gpl-2.0 |
ishanic/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
gangtao/dataplay | rviz/rpybuilder.py | 1 | 2303 | ## This module provide r function through rpy2
import time
import pandas
from rpy2 import robjects
from rpy2.robjects import Formula, Environment
from rpy2.robjects.vectors import IntVector, FloatVector
from rpy2.robjects.lib import grid
from rpy2.robjects.packages import importr, data
import rpy2.robjects.lib.ggplot2 as ggplot2
# The R 'print' function
rprint = robjects.globalenv.get("print")
stats = importr('stats')
grdevices = importr('grDevices')
base = importr('base')
datasets = importr('datasets')
mtcars = data(datasets).fetch('mtcars')['mtcars']
class Rpy2Builder(object):
def __init__(self, spec):
## generate png file information
self.sfilebase = "./static/viz/"
self.cfilebase = "/viz/"
self.fileid = str(time.time())
self.filename = "viz" + self.fileid + ".png"
self.sfilename = self.sfilebase + self.filename
self.cfilename = self.cfilebase + self.filename
## data set information
self.spec = spec
def build(self):
##print grdevices.palette()
if self.spec['type'] == 'csv' :
df = robjects.DataFrame.from_csvfile('./data/' + self.spec['name'] + '.csv')
else :
print type(self.spec['name'])
samplename = self.spec['name'].encode('ascii','ignore')
df = data(datasets).fetch(samplename)[samplename]
#print df
grdevices.png(file=self.sfilename, width=700, height=400)
pp = ggplot2.ggplot(df)
ppargs = {}
if len(self.spec['viz[xaxis]']) != 0 :
ppargs['x'] = self.spec['viz[xaxis]']
if len(self.spec['viz[yaxis]']) != 0 :
ppargs['y'] = self.spec['viz[yaxis]']
if len(self.spec['viz[color]']) != 0 :
ppargs['colour'] = self.spec['viz[color]']
if len(self.spec['viz[shape]']) != 0 :
ppargs['shape'] = self.spec['viz[shape]']
player1 = self.spec['viz[layer1]'] if len(self.spec['viz[layer1]']) != 0 else None
player2 = self.spec['viz[layer2]'] if len(self.spec['viz[layer2]']) != 0 else None
pp = pp + ggplot2.aes_string(**ppargs)
##pp = pp + ggplot2.geom_bar(stat="identity", fill="white", colour="darkgreen")
##pp = pp + ggplot2.scale_fill_brewer(palette="blues")
##pp = pp + ggplot2.geom_point()
pp = pp + ggplot2.geom_point(size=5)
pp.plot()
grdevices.dev_off()
return self.cfilename | mit |
datapythonista/pandas | pandas/tests/util/test_assert_index_equal.py | 1 | 7320 | import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalIndex,
Index,
MultiIndex,
NaT,
RangeIndex,
)
import pandas._testing as tm
def test_index_equal_levels_mismatch():
msg = """Index are different
Index levels are different
\\[left\\]: 1, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 2, MultiIndex\\(\\[\\('A', 1\\),
\\('A', 2\\),
\\('B', 3\\),
\\('B', 4\\)\\],
\\)"""
idx1 = Index([1, 2, 3])
idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, exact=False)
def test_index_equal_values_mismatch(check_exact):
msg = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = MultiIndex.from_tuples([("A", 2), ("A", 2), ("B", 3), ("B", 4)])
idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, check_exact=check_exact)
def test_index_equal_length_mismatch(check_exact):
msg = """Index are different
Index length are different
\\[left\\]: 3, Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: 4, Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
idx1 = Index([1, 2, 3])
idx2 = Index([1, 2, 3, 4])
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, check_exact=check_exact)
def test_index_equal_class_mismatch(check_exact):
msg = """Index are different
Index classes are different
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Float64Index\\(\\[1\\.0, 2\\.0, 3\\.0\\], dtype='float64'\\)"""
idx1 = Index([1, 2, 3])
idx2 = Index([1, 2, 3.0])
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, exact=True, check_exact=check_exact)
def test_index_equal_values_close(check_exact):
idx1 = Index([1, 2, 3.0])
idx2 = Index([1, 2, 3.0000000001])
if check_exact:
msg = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0000000001\\], dtype='float64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, check_exact=check_exact)
else:
tm.assert_index_equal(idx1, idx2, check_exact=check_exact)
def test_index_equal_values_less_close(check_exact, rtol):
idx1 = Index([1, 2, 3.0])
idx2 = Index([1, 2, 3.0001])
kwargs = {"check_exact": check_exact, "rtol": rtol}
if check_exact or rtol < 0.5e-3:
msg = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Float64Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)
\\[right\\]: Float64Index\\(\\[1.0, 2.0, 3.0001\\], dtype='float64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, **kwargs)
else:
tm.assert_index_equal(idx1, idx2, **kwargs)
def test_index_equal_values_too_far(check_exact, rtol):
idx1 = Index([1, 2, 3])
idx2 = Index([1, 2, 4])
kwargs = {"check_exact": check_exact, "rtol": rtol}
msg = """Index are different
Index values are different \\(33\\.33333 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 4\\], dtype='int64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, **kwargs)
@pytest.mark.parametrize("check_order", [True, False])
def test_index_equal_value_oder_mismatch(check_exact, rtol, check_order):
idx1 = Index([1, 2, 3])
idx2 = Index([3, 2, 1])
msg = """Index are different
Index values are different \\(66\\.66667 %\\)
\\[left\\]: Int64Index\\(\\[1, 2, 3\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[3, 2, 1\\], dtype='int64'\\)"""
if check_order:
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(
idx1, idx2, check_exact=check_exact, rtol=rtol, check_order=True
)
else:
tm.assert_index_equal(
idx1, idx2, check_exact=check_exact, rtol=rtol, check_order=False
)
def test_index_equal_level_values_mismatch(check_exact, rtol):
idx1 = MultiIndex.from_tuples([("A", 2), ("A", 2), ("B", 3), ("B", 4)])
idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])
kwargs = {"check_exact": check_exact, "rtol": rtol}
msg = """MultiIndex level \\[1\\] are different
MultiIndex level \\[1\\] values are different \\(25\\.0 %\\)
\\[left\\]: Int64Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)
\\[right\\]: Int64Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, **kwargs)
@pytest.mark.parametrize(
"name1,name2",
[(None, "x"), ("x", "x"), (np.nan, np.nan), (NaT, NaT), (np.nan, NaT)],
)
def test_index_equal_names(name1, name2):
idx1 = Index([1, 2, 3], name=name1)
idx2 = Index([1, 2, 3], name=name2)
if name1 == name2 or name1 is name2:
tm.assert_index_equal(idx1, idx2)
else:
name1 = "'x'" if name1 == "x" else name1
name2 = "'x'" if name2 == "x" else name2
msg = f"""Index are different
Attribute "names" are different
\\[left\\]: \\[{name1}\\]
\\[right\\]: \\[{name2}\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2)
def test_index_equal_category_mismatch(check_categorical):
msg = """Index are different
Attribute "dtype" are different
\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False\\)
\\[right\\]: CategoricalDtype\\(categories=\\['a', 'b', 'c'\\], \
ordered=False\\)"""
idx1 = Index(Categorical(["a", "b"]))
idx2 = Index(Categorical(["a", "b"], categories=["a", "b", "c"]))
if check_categorical:
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)
else:
tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)
@pytest.mark.parametrize("exact", [False, True])
def test_index_equal_range_categories(check_categorical, exact):
# GH41263
msg = """\
Index are different
Index classes are different
\\[left\\]: RangeIndex\\(start=0, stop=10, step=1\\)
\\[right\\]: Int64Index\\(\\[0, 1, 2, 3, 4, 5, 6, 7, 8, 9\\], dtype='int64'\\)"""
rcat = CategoricalIndex(RangeIndex(10))
icat = CategoricalIndex(list(range(10)))
if check_categorical and exact:
with pytest.raises(AssertionError, match=msg):
tm.assert_index_equal(rcat, icat, check_categorical=True, exact=True)
else:
tm.assert_index_equal(
rcat, icat, check_categorical=check_categorical, exact=exact
)
def test_assert_index_equal_mixed_dtype():
# GH#39168
idx = Index(["foo", "bar", 42])
tm.assert_index_equal(idx, idx, check_order=False)
| bsd-3-clause |
taotaocoule/stock | strategy/junxian.py | 1 | 1466 | # 均线策略
import numpy as np
import pandas as pd
import tushare as ts
datas=ts.get_k_data('600000',start='2015-06-30')
time=[15,30,60,90,120]
j=["MA_"+str(i) for i in time]
def format_data(data):
for i in time:
data["MA_"+str(i)]=data["close"].rolling(i).mean()
for x in j:
data[x+"_Change"]=1000*(data[x]-data[x].shift(30))/data[x]
data["vol_max_22"]=data.volume.rolling(10).max().shift()
data['vol_multi']=data.volume/data.vol_max_22
data['five_change']=1000*(data[j].max(axis=1)-data[j].min(axis=1))/data[j].min(axis=1)
data['is_vol_right']=data.vol_multi.rolling(22).max().shift()
data['is_five_change']=data.five_change.rolling(22).min().shift()
data=data[(data.high>data.MA_15) & (data.low < data.MA_15)]
data['price']=data.close.shift(-22)
data['price_change']=1000*(data.price-data.close)/data.close
data['type']=data.apply(get_type,axis=1)
return data
def test_strategy(data):
m=data[data.is_vol_right>5]
def get_type(data):
if data.price_change<0:
return 0
elif data.price_change<100:
return 1
elif data.price_change<200:
return 2
elif np.isnan(data.price_change):
return 5
else:
return 3
for c in code:
try:
a=ts.get_k_data(c,start='2015-06-30')
except:
print('{} is wrong'.format(c))
else:
print('{} is running'.format(c))
if(len(a)>0):
format_data(a).to_csv('data.csv',mode='a',index=False)
| mit |
PeterRochford/SkillMetrics | skill_metrics/plot_pattern_diagram_colorbar.py | 1 | 7473 | import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib import ticker
import math
def plot_pattern_diagram_colorbar(X,Y,Z,option):
'''
Plots color markers on a pattern diagram shaded according to a
supplied value.
Values are indicated via a color bar on the plot.
Plots color markers on a target diagram according their (X,Y) locations.
The color shading is accomplished by plotting the markers as a scatter
plot in (X,Y) with the colors of each point specified using Z as a
vector.
The color range is controlled by option['cmapzdata'].
option['colormap'] = 'on' :
the scatter function maps the elements in Z to colors in the
current colormap
option['colormap']= 'off' : the color axis is mapped to the range
[min(Z) max(Z)]
option.locationColorBar : location for the colorbar, 'NorthOutside'
or 'eastoutside'
The color bar is titled using the content of option['titleColorBar']
(if non-empty string).
INPUTS:
x : x-coordinates of markers
y : y-coordinates of markers
z : z-coordinates of markers (used for color shading)
option : dictionary containing option values.
option['colormap'] : 'on'/'off' switch to map color shading of markers
to colormap ('on') or min to max range of Z values ('off').
option['titleColorBar'] : title for the color bar
OUTPUTS:
None.
Created on Nov 30, 2016
Revised on Jan 1, 2019
Author: Peter A. Rochford
Symplectic, LLC
www.thesymplectic.com
[email protected]
'''
'''
Plot color shaded data points using scatter plot
Keyword s defines marker size in points^2
c defines the sequence of numbers to be mapped to colors
using the cmap and norm
'''
fontSize = rcParams.get('font.size')
cxscale = fontSize/10 # scale color bar by font size
markerSize = option['markersize']**2
hp = plt.scatter(X,Y, s=markerSize, c=Z, marker = 'd')
hp.set_facecolor(hp.get_edgecolor())
# Set parameters for color bar location
location = option['locationcolorbar'].lower()
xscale= 1.0
labelpad = -25
if location == 'northoutside':
orientation = 'horizontal'
aspect = 6
fraction = 0.04
elif location == 'eastoutside':
orientation = 'vertical'
aspect = 25
fraction = 0.15
if 'checkstats' in option:
# Taylor diagram
xscale = 0.5
cxscale = 6*fontSize/10
labelpad = -30
else:
raise ValueError('Invalid color bar location: ' + option['locationcolorbar']);
# Add color bar to plot
if option['colormap'] == 'on':
# map color shading of markers to colormap
hc = plt.colorbar(orientation = orientation, aspect = aspect,
fraction = fraction, pad=0.06)
# Limit number of ticks on color bar to reasonable number
if orientation == 'horizontal':
_setColorBarTicks(hc,5,20)
elif option['colormap'] == 'off':
# map color shading of markers to min to max range of Z values
if len(Z) > 1:
plt.clim(min(Z), max(Z))
hc = plt.colorbar(orientation = orientation, aspect = aspect,
fraction = fraction, pad=0.06, ticks=[min(Z), max(Z)])
# Label just min/max range
hc.set_ticklabels(['Min.', 'Max.'])
else:
raise ValueError('Invalid option for option.colormap: ' +
option['colormap']);
if orientation == 'horizontal':
location = _getColorBarLocation(hc, option, xscale = xscale,
yscale = 7.5, cxscale = cxscale)
else:
location = _getColorBarLocation(hc, option, xscale = xscale,
yscale = 1.0, cxscale = cxscale)
hc.ax.set_position(location) # set new position
hc.ax.tick_params(labelsize=fontSize) # set tick label size
hc.ax.xaxis.set_ticks_position('top')
hc.ax.xaxis.set_label_position('top')
# Title the color bar
if option['titlecolorbar']:
if orientation == 'horizontal':
hc.set_label(option['titlecolorbar'],fontsize=fontSize)
else:
hc.set_label(option['titlecolorbar'],fontsize=fontSize,
labelpad=labelpad, y=1.05, rotation=0)
else:
hc.set_label(hc,'Color Scale',fontsize=fontSize)
def _getColorBarLocation(hc,option,**kwargs):
'''
Determine location for color bar.
Determines location to place color bar for type of plot:
target diagram and Taylor diagram. Optional scale arguments
(xscale,yscale,cxscale) can be supplied to adjust the placement of
the colorbar to accommodate different situations.
INPUTS:
hc : handle returned by colorbar function
option : dictionary containing option values. (Refer to
display_target_diagram_options function for more
information.)
OUTPUTS:
location : x, y, width, height for color bar
KEYWORDS:
xscale : scale factor to adjust x-position of color bar
yscale : scale factor to adjust y-position of color bar
cxscale : scale factor to adjust thickness of color bar
'''
# Check for optional arguments and set defaults if required
if 'xscale' in kwargs:
xscale = kwargs['xscale']
else:
xscale = 1.0
if 'yscale' in kwargs:
yscale = kwargs['yscale']
else:
yscale = 1.0
if 'cxscale' in kwargs:
cxscale = kwargs['cxscale']
else:
cxscale = 1.0
# Get original position of color bar and not modified position
# because of Axes.apply_aspect being called.
cp = hc.ax.get_position(original=True)
# Calculate location : [left, bottom, width, height]
if 'checkstats' in option:
# Taylor diagram
location = [cp.x0 + xscale*0.5*(1+math.cos(math.radians(45)))*cp.width, yscale*cp.y0,
cxscale*cp.width/6, cp.height]
else:
# target diagram
location = [cp.x0 + xscale*0.5*(1+math.cos(math.radians(60)))*cp.width, yscale*cp.y0,
cxscale*cp.width/6, cxscale*cp.height]
return location
def _setColorBarTicks(hc,numBins,lenTick):
'''
Determine number of ticks for color bar.
Determines number of ticks for colorbar so tick labels do not
overlap.
INPUTS:
hc : handle of colorbar
numBins : number of bins to use for determining number of
tick values using ticker.MaxNLocator
lenTick : maximum number of characters for all the tick labels
OUTPUTS:
None
'''
maxChar = 10
lengthTick = lenTick
while lengthTick > maxChar:
# Limit number of ticks on color bar to numBins-1
hc.locator = ticker.MaxNLocator(nbins=numBins, prune = 'both')
hc.update_ticks()
# Check number of characters in tick labels is
# acceptable, otherwise reduce number of bins
locs = str(hc.get_ticks())
locs = locs[1:-1].split()
lengthTick = 0
for tick in locs:
tickStr = str(tick).rstrip('.')
lengthTick += len(tickStr)
if lengthTick > maxChar: numBins -=1
| gpl-3.0 |
f3r/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
wanggang3333/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 114 | 25281 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import TempMemmap
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
from sklearn.utils import check_array
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
# Check that the lasso can handle zero data without crashing
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
# Test Lasso on a toy example for various values of alpha.
# When validating this against glmnet notice that glmnet divides it
# against nobs.
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
# Test ElasticNet for various parameters of alpha and l1_ratio.
# Actually, the parameters alpha = 0 should not be allowed. However,
# we test it as a border case.
# ElasticNet is tested with and without precomputed Gram matrix
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
assert_array_almost_equal(
coef_path_cont_lasso(alphas), coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_uniform_targets():
enet = ElasticNetCV(fit_intercept=True, n_alphas=3)
m_enet = MultiTaskElasticNetCV(fit_intercept=True, n_alphas=3)
lasso = LassoCV(fit_intercept=True, n_alphas=3)
m_lasso = MultiTaskLassoCV(fit_intercept=True, n_alphas=3)
models_single_task = (enet, lasso)
models_multi_task = (m_enet, m_lasso)
rng = np.random.RandomState(0)
X_train = rng.random_sample(size=(10, 3))
X_test = rng.random_sample(size=(10, 3))
y1 = np.empty(10)
y2 = np.empty((10, 2))
for model in models_single_task:
for y_values in (0, 5):
y1.fill(y_values)
assert_array_equal(model.fit(X_train, y1).predict(X_test), y1)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
for model in models_multi_task:
for y_values in (0, 5):
y2[:, 0].fill(y_values)
y2[:, 1].fill(2 * y_values)
assert_array_equal(model.fit(X_train, y2).predict(X_test), y2)
assert_array_equal(model.alphas_, [np.finfo(float).resolution]*3)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
# Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_lasso_readonly_data():
X = np.array([[-1], [0], [1]])
Y = np.array([-1, 0, 1]) # just a straight line
T = np.array([[2], [3], [4]]) # test sample
with TempMemmap((X, Y)) as (X, Y):
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
def test_multi_task_lasso_readonly_data():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
with TempMemmap((X, Y)) as (X, Y):
Y = np.c_[y, y]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
# Test that both random and cyclic selection give the same results.
# Ensure that the test models fully converge and check a wide
# range of conditions.
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
# Test that setting precompute="auto" gives a Deprecation Warning.
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
# Test that the coefs returned by positive=True in enet_path are positive
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
def test_sparse_dense_descent_paths():
# Test that dense and sparse input give the same input for descent paths.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
csr = sparse.csr_matrix(X)
for path in [enet_path, lasso_path]:
_, coefs, _ = path(X, y, fit_intercept=False)
_, sparse_coefs, _ = path(csr, y, fit_intercept=False)
assert_array_almost_equal(coefs, sparse_coefs)
def test_check_input_false():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
X = check_array(X, order='F', dtype='float64')
y = check_array(X, order='F', dtype='float64')
clf = ElasticNet(selection='cyclic', tol=1e-8)
# Check that no error is raised if data is provided in the right format
clf.fit(X, y, check_input=False)
X = check_array(X, order='F', dtype='float32')
clf.fit(X, y, check_input=True)
# Check that an error is raised if data is provided in the wrong format,
# because of check bypassing
assert_raises(ValueError, clf.fit, X, y, check_input=False)
# With no input checking, providing X in C order should result in false
# computation
X = check_array(X, order='C', dtype='float64')
clf.fit(X, y, check_input=False)
coef_false = clf.coef_
clf.fit(X, y, check_input=True)
coef_true = clf.coef_
assert_raises(AssertionError, assert_array_almost_equal,
coef_true, coef_false)
def test_overrided_gram_matrix():
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
Gram = X.T.dot(X)
clf = ElasticNet(selection='cyclic', tol=1e-8, precompute=Gram,
fit_intercept=True)
assert_warns_message(UserWarning,
"Gram matrix was provided but X was centered"
" to fit intercept, "
"or X was normalized : recomputing Gram matrix.",
clf.fit, X, y)
| bsd-3-clause |
stargaser/astropy | astropy/visualization/tests/test_norm.py | 3 | 9643 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy import ma
from numpy.testing import assert_allclose, assert_equal
from astropy.visualization.mpl_normalize import ImageNormalize, simple_norm, imshow_norm
from astropy.visualization.interval import ManualInterval, PercentileInterval
from astropy.visualization.stretch import SqrtStretch
try:
import matplotlib # pylint: disable=W0611
from matplotlib import pyplot as plt
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
DATA = np.linspace(0., 15., 6)
DATA2 = np.arange(3)
DATA2SCL = 0.5 * DATA2
@pytest.mark.skipif('HAS_MATPLOTLIB')
def test_normalize_error_message():
with pytest.raises(ImportError) as exc:
ImageNormalize()
assert (exc.value.args[0] == "matplotlib is required in order to use "
"this class.")
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestNormalize:
def test_invalid_interval(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., interval=ManualInterval,
clip=True)
def test_invalid_stretch(self):
with pytest.raises(TypeError):
ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch,
clip=True)
def test_stretch_none(self):
with pytest.raises(ValueError):
ImageNormalize(vmin=2., vmax=10., stretch=None)
def test_scalar(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(data=6, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
assert_allclose(norm(6), 0.70710678)
assert_allclose(norm(6), norm2(6))
def test_clip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(DATA)
expected = [0., 0.35355339, 0.70710678, 0.93541435, 1., 1.]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(DATA))
def test_noclip(self):
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
expected = [np.nan, 0.35355339, 0.70710678, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output, expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(DATA))
def test_implicit_autoscale(self):
norm = ImageNormalize(vmin=None, vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(None, 10),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == np.min(DATA)
assert norm.vmax == 10.
assert_allclose(output, norm2(DATA))
norm = ImageNormalize(vmin=2., vmax=None, stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(DATA, interval=ManualInterval(2, None),
stretch=SqrtStretch(), clip=False)
output = norm(DATA)
assert norm.vmin == 2.
assert norm.vmax == np.max(DATA)
assert_allclose(output, norm2(DATA))
def test_masked_clip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=True)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=True)
output = norm(mdata)
expected = [0., 0.35355339, 1., 0.93541435, 1., 1.]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 0, 0, 0, 0])
assert_allclose(output, norm2(mdata))
def test_masked_noclip(self):
mdata = ma.array(DATA, mask=[0, 0, 1, 0, 0, 0])
norm = ImageNormalize(vmin=2., vmax=10., stretch=SqrtStretch(),
clip=False)
norm2 = ImageNormalize(mdata, interval=ManualInterval(2, 10),
stretch=SqrtStretch(), clip=False)
output = norm(mdata)
expected = [np.nan, 0.35355339, -10, 0.93541435, 1.11803399,
1.27475488]
assert_allclose(output.filled(-10), expected)
assert_allclose(output.mask, [0, 0, 1, 0, 0, 0])
assert_allclose(norm.inverse(norm(DATA))[1:], DATA[1:])
assert_allclose(output, norm2(mdata))
def test_invalid_data(self):
data = np.arange(25.).reshape((5, 5))
data[2, 2] = np.nan
data[1, 2] = np.inf
percent = 85.0
interval = PercentileInterval(percent)
# initialized without data
norm = ImageNormalize(interval=interval)
norm(data) # sets vmin/vmax
assert_equal((norm.vmin, norm.vmax), (1.65, 22.35))
# initialized with data
norm2 = ImageNormalize(data, interval=interval)
assert_equal((norm2.vmin, norm2.vmax), (norm.vmin, norm.vmax))
norm3 = simple_norm(data, 'linear', percent=percent)
assert_equal((norm3.vmin, norm3.vmax), (norm.vmin, norm.vmax))
assert_allclose(norm(data), norm2(data))
assert_allclose(norm(data), norm3(data))
norm4 = ImageNormalize()
norm4(data) # sets vmin/vmax
assert_equal((norm4.vmin, norm4.vmax), (0, 24))
norm5 = ImageNormalize(data)
assert_equal((norm5.vmin, norm5.vmax), (norm4.vmin, norm4.vmax))
@pytest.mark.skipif('not HAS_MATPLOTLIB')
class TestImageScaling:
def test_linear(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear')
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
def test_sqrt(self):
"""Test sqrt scaling."""
norm = simple_norm(DATA2, stretch='sqrt')
assert_allclose(norm(DATA2), np.sqrt(DATA2SCL), atol=0, rtol=1.e-5)
def test_power(self):
"""Test power scaling."""
power = 3.0
norm = simple_norm(DATA2, stretch='power', power=power)
assert_allclose(norm(DATA2), DATA2SCL ** power, atol=0, rtol=1.e-5)
def test_log(self):
"""Test log10 scaling."""
norm = simple_norm(DATA2, stretch='log')
ref = np.log10(1000 * DATA2SCL + 1.0) / np.log10(1001.0)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_log_with_log_a(self):
"""Test log10 scaling with a custom log_a."""
log_a = 100
norm = simple_norm(DATA2, stretch='log', log_a=log_a)
ref = np.log10(log_a * DATA2SCL + 1.0) / np.log10(log_a + 1)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh(self):
"""Test asinh scaling."""
norm = simple_norm(DATA2, stretch='asinh')
ref = np.arcsinh(10 * DATA2SCL) / np.arcsinh(10)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_asinh_with_asinh_a(self):
"""Test asinh scaling with a custom asinh_a."""
asinh_a = 0.5
norm = simple_norm(DATA2, stretch='asinh', asinh_a=asinh_a)
ref = np.arcsinh(DATA2SCL / asinh_a) / np.arcsinh(1. / asinh_a)
assert_allclose(norm(DATA2), ref, atol=0, rtol=1.e-5)
def test_min(self):
"""Test linear scaling."""
norm = simple_norm(DATA2, stretch='linear', min_cut=1., clip=True)
assert_allclose(norm(DATA2), [0., 0., 1.], atol=0, rtol=1.e-5)
def test_percent(self):
"""Test percent keywords."""
norm = simple_norm(DATA2, stretch='linear', percent=99., clip=True)
assert_allclose(norm(DATA2), DATA2SCL, atol=0, rtol=1.e-5)
norm2 = simple_norm(DATA2, stretch='linear', min_percent=0.5,
max_percent=99.5, clip=True)
assert_allclose(norm(DATA2), norm2(DATA2), atol=0, rtol=1.e-5)
def test_invalid_stretch(self):
"""Test invalid stretch keyword."""
with pytest.raises(ValueError):
simple_norm(DATA2, stretch='invalid')
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_imshow_norm():
image = np.random.randn(10, 10)
ax = plt.subplot(label='test_imshow_norm')
imshow_norm(image, ax=ax)
with pytest.raises(ValueError):
# X and data are the same, can't give both
imshow_norm(image, X=image, ax=ax)
with pytest.raises(ValueError):
# illegal to manually pass in normalization since that defeats the point
imshow_norm(image, ax=ax, norm=ImageNormalize())
imshow_norm(image, ax=ax, vmin=0, vmax=1)
# vmin/vmax "shadow" the MPL versions, so imshow_only_kwargs allows direct-setting
imshow_norm(image, ax=ax, imshow_only_kwargs=dict(vmin=0, vmax=1))
# but it should fail for an argument that is not in ImageNormalize
with pytest.raises(ValueError):
imshow_norm(image, ax=ax, imshow_only_kwargs=dict(cmap='jet'))
# make sure the pyplot version works
imres, norm = imshow_norm(image, ax=None)
assert isinstance(norm, ImageNormalize)
plt.close('all')
| bsd-3-clause |
stephenliu1989/msmbuilder | msmbuilder/lumping/pcca.py | 1 | 4096 | from __future__ import print_function, division, absolute_import
import numpy as np
from ..msm import MarkovStateModel
class PCCA(MarkovStateModel):
"""Perron Cluster Cluster Analysis (PCCA) for coarse-graining (lumping)
microstates into macrostates.
Parameters
----------
n_macrostates : int
The desired number of macrostates in the lumped model.
kwargs : optional
Additional keyword arguments to be passed to MarkovStateModel. See
msmbuilder.msm.MarkovStateModel for possible options.
Notes
-----
PCCA is a subclass of MarkovStateModel. However, the MSM properties
and attributes on PCCA refer to the MICROSTATE properties--e.g.
pcca.transmat_ is the microstate transition matrix. To get the
macrostate transition matrix, you must fit a new MarkovStateModel
object on the output (assignments) of PCCA().
"""
def __init__(self, n_macrostates, pcca_tolerance=1e-5, **kwargs):
self.n_macrostates = n_macrostates
self.pcca_tolerance = pcca_tolerance
super(PCCA, self).__init__(**kwargs)
def fit(self, sequences, y=None):
"""Fit a PCCA lumping model using a sequence of cluster assignments.
Parameters
----------
sequences : list(np.ndarray(dtype='int'))
List of arrays of cluster assignments
y : None
Unused, present for sklearn compatibility only.
Returns
-------
self
"""
super(PCCA, self).fit(sequences, y=y)
self._do_lumping()
return self
def _do_lumping(self):
"""Do the PCCA lumping.
Notes
-------
1. Iterate over the eigenvectors, starting with the slowest.
2. Calculate the spread of that eigenvector within each existing
macrostate.
3. Pick the macrostate with the largest eigenvector spread.
4. Split the macrostate based on the sign of the eigenvector.
"""
# Extract non-perron eigenvectors
right_eigenvectors = self.right_eigenvectors_[:, 1:]
assert self.n_states_ > 0
microstate_mapping = np.zeros(self.n_states_, dtype=int)
def spread(x):
return x.max() - x.min()
for i in range(self.n_macrostates - 1):
v = right_eigenvectors[:, i]
all_spreads = np.array([spread(v[microstate_mapping == k])
for k in range(i + 1)])
state_to_split = np.argmax(all_spreads)
inds = ((microstate_mapping == state_to_split) &
(v >= self.pcca_tolerance))
microstate_mapping[inds] = i + 1
self.microstate_mapping_ = microstate_mapping
def partial_transform(self, sequence, mode='clip'):
trimmed_sequence = super(PCCA, self).partial_transform(sequence, mode)
if mode == 'clip':
return [self.microstate_mapping_[seq] for seq in trimmed_sequence]
elif mode == 'fill':
def nan_get(x):
try:
x = int(x)
return self.microstate_mapping_[x]
except ValueError:
return np.nan
return np.asarray([nan_get(x) for x in trimmed_sequence])
else:
raise ValueError
@classmethod
def from_msm(cls, msm, n_macrostates):
"""Create and fit lumped model from pre-existing MSM.
Parameters
----------
msm : Mixtape.msm.MarkovStateModel
The input microstate msm to use.
n_macrostates : int
The number of macrostates
Returns
-------
lumper : cls
The fit PCCA(+) object.
"""
params = msm.get_params()
lumper = cls(n_macrostates, **params)
lumper.transmat_ = msm.transmat_
lumper.populations_ = msm.populations_
lumper.mapping_ = msm.mapping_
lumper.countsmat_ = msm.countsmat_
lumper.n_states_ = msm.n_states_
lumper._do_lumping()
return lumper
| lgpl-2.1 |
spacecowboy/article-annriskgroups-source | classensemble.py | 1 | 5786 | # -*- coding: utf-8 -*-
'''
Utility methods for doing survival group ensembles
'''
from ann.ensemble import Ensemble
import numpy as np
import pandas as pd
def ordered_bagging(length, count=None):
'''Samples len elements (with replacement) from data and returns a view of
those elements. Note that the original sorting is respected. An example
is original list [0, 1, 2, 3, 4, 5], and result [0, 0, 1, 4, 5, 5]. Note
the final result maintains the same sorting.
'''
if count is None:
count = length
r = np.random.randint(0, length, count)
r.sort() # sorts inplace
return r
class ClassEnsemble(Ensemble):
def __init__(self, high_nets=None, low_nets=None, netgen=None):
'''
Arguments, either one of these sets:
high_nets, low_nets - lists of networks which will find high risk
groups and low risk groups respectively.
netgen - A function that generates the previous two lists, with fresh networks.
'''
self.netgen = netgen
if not high_nets or not low_nets:
self.networks = []
self.high_nets = []
self.low_nets = []
else:
self.networks = high_nets + low_nets
self.high_nets = high_nets
self.low_nets = low_nets
if len(high_nets) % 2 == 0 or len(low_nets) % 2 == 0:
raise ValueError("Please supply an odd number of each network type to resolve tie issues.")
if len(high_nets) != len(low_nets):
raise ValueError("Please supply an equal amount of each network")
if 2 != self.networks[0].output_count:
raise ValueError("This class will not do what you think if networks don't have 2 output neurons.")
def predict_class(self, indata):
'''
Predict the class of data point using majority voting.
Arguments:
indata - Data to predict
midgroups (negative numbers) designates the (mapped) group which
carries no real voting power. Please map them to different negative
numbers depending on if you are selecting a low or high risk group.
This matters is if there is a tie such as:
5 votes 0, 5 votes 1, 0 votes 2. Having midgroup=1 here means that
group 0 wins. Same if 1 ties with 2.
Another example is if 0 ties with 2, as in:
5 votes 0, 5 votes 2. In this case, the winner will be the midgroup.
'''
votes = {}
hwin = None
lwin = None
for n in self.high_nets:
g = 'high' if n.predict_class(indata) == 0 else 'hr'
votes[g] = votes.get(g, 0) + 1
if hwin is None:
hwin = g
elif votes[g] > votes[hwin]:
hwin = g
for n in self.low_nets:
g = 'low' if n.predict_class(indata) == 0 else 'lr'
votes[g] = votes.get(g, 0) + 1
if lwin is None:
lwin = g
elif votes[g] > votes[lwin]:
lwin = g
if lwin == 'lr' and hwin == 'hr':
# Answer is mid=1
return 'mid'
elif lwin == 'lr':
# Answer is high risk
return 'high'
elif hwin == 'hr':
# Answer is low risk
return 'low'
# No mid group
elif votes[lwin] == votes[hwin]:
# True tie, return mid
return 'mid'
elif votes[lwin] > votes[hwin]:
return 'low'
else: # votes[hwin] > votes[lwin]
return 'high'
def learn(self, datain, dataout, limit=None):
'''
Learn using ordered bagging (maintains sort order).
Set limit to train on smaller bagging subsets.
Data is sorted before training.
'''
# First make sure data is sorted
asc = dataout[:, 0].argsort()
for net in self.networks:
# Create new data using bagging. Combine the data into one array
bag = ordered_bagging(datain.shape[0], count=limit)
net.learn(datain[asc][bag], dataout[asc][bag])
def fit(self, df, duration_col, event_col):
'''
Same as learn, but instead conforms to the interface defined by
Lifelines and accepts a data frame as the data. Also generates
new networks using self.netgen is it was defined.
'''
if self.netgen is not None:
self.high_nets, self.low_nets = self.netgen(df)
self.networks = self.high_nets + self.low_nets
# Save columns for prediction later
self.x_cols = df.columns - [duration_col, event_col]
self.learn(df[self.x_cols].values,
df[[duration_col, event_col]].values)
def predict_classes(self, df):
'''
Predict the classes of an entire DateFrame.
Returns a DataFrame.
'''
labels, m = self.label_data(df)
res = pd.DataFrame(index=df.index, columns=['group'])
res.iloc[:, 0] = labels
return res
def label_data(self, df):
'''
Returns the group labels of each input pattern in the DataFrame.
It must be a dataframe to guarantee that the same column ordering
is used.
Returns:
(grouplabels, members)
'''
grouplabels = []
members = {}
for idx, tin in enumerate(df[self.x_cols].values):
label = self.predict_class(tin)
grouplabels.append(label)
# Add index to member list
if label not in members:
members[label] = []
members[label].append(idx)
grouplabels = np.array(grouplabels)
return grouplabels, members
| gpl-3.0 |
JacekPierzchlewski/RxCS | examples/signals/randMult_ex2.py | 1 | 3859 | """
This script is an example of how to use the Random Multitone Signal
Generator module. |br|
In this example 1 random multitone signal is generated. |br|
Time of the signal is 10 us, the signal representation sampling frequency is
5 MHz. The highest possible frequency of a tone in the signal is 2 MHz,
the signal spectrum resolution is 100 kHz. |br|
There are 3 completely random tones in the signal. |br|
The power of the signal is not regulated. |br|
The noise is not added to the signal. |br|
After the generation, spectrum fo the signal is analyzed with an FFT
and ploted.
*Author*:
Jacek Pierzchlewski, Aalborg University, Denmark. <[email protected]>
*Version*:
0.1 | 15-MAY-2014 : * Initial version. |br|
0.2 | 21-MAY-2014 : * Docstrings added and PEP8 adjustments. |br|
1.0 | 21-MAY-2014 : * Version 1.0 released. |br|
1.1 | 22-MAY-2014 : * Specified frequencies removed, 3 fully random tones
in the signal. |br|
1.2 | 15-JUL-2015 : * Adjusted to new name of random multitone gen. |br|
2.0 | 21-JUL-2015 : * Version 2.0 released (adjusted to v2.0 of the generator) |br|
2.0r1 | 04-AUG-2015 : * File name changed |br|
*License*:
BSD 2-Clause
"""
from __future__ import division
import rxcs
import numpy as np
import matplotlib.pyplot as plt
def _randMult_ex2():
# Put the generator on board
gen = rxcs.sig.randMult()
# Settings for the generator
gen.tS = 10e-6 # Time of the signal is 10 us
gen.fR = 5e6 # The signal representation sampling frequency is 5 MHz
gen.fMax = 2e6 # The highest possible frequency in the signal is 2 MHz
gen.fRes = 100e3 # The signal spectrum resolution is 100 kHz
# Random tones in the signal
gen.nTones = 3 # The number of random tones
# Allowed amplitudes in the random tones:
gen.iMinAmp = 0.2 # Minimum amplitude of random tones
gen.iGraAmp = 0.1 # Gradation of amplitude of random tones
gen.iMaxAmp = 0.4 # Maximum amplitude of random tones
# Allowed phases in the random tones:
gen.iMinPhs = 0 # Minimum phase of random tones
gen.iGraPhs = 1 # Gradation of phase of random tones
gen.iMaxPhs = 90 # Maximum phase of random tones
# Run the generator and get the output
gen.run()
vSig = gen.mSig[0, :] # Get the generated signal
fFFTR = gen.fFFTR # Signal FFT frequency resolution
# -----------------------------------------------------------------
vFFT = np.fft.fft(vSig) # Analyze the spectrum of the signal
iS = vFFT.size # Get the size of the spectrum
# Compute the amplitudes of tones
vFFTa = 2*np.abs(vFFT[np.arange(iS/2).astype(int)])/iS
# Create a vector with frequencies of the signal spectrum
vF = fFFTR * np.arange(iS/2)
# -----------------------------------------------------------------
# Plot half of the spectrum
hFig1 = plt.figure(1)
hSubPlot1 = hFig1.add_subplot(111)
hSubPlot1.grid(True)
hSubPlot1.set_title('Spectrum of a random multitone signal')
hSubPlot1.set_xlabel('Frequency [Hz]')
(markerline, stemlines, baseline) = hSubPlot1.stem(vF, vFFTa,
linefmt='b-',
markerfmt='bo',
basefmt='r-')
hSubPlot1.set_xlim(-100e3, 2.5e6)
hSubPlot1.set_ylim(-0.1, 1.1)
plt.setp(stemlines, color='b', linewidth=2.0)
plt.setp(markerline, color='b', markersize=10.0)
plt.show(block=True)
# =====================================================================
# Trigger when start as a script
# =====================================================================
if __name__ == '__main__':
_randMult_ex2()
| bsd-2-clause |
droythorne/folium | folium/folium.py | 4 | 50182 | # -*- coding: utf-8 -*-
"""
Folium
-------
Make beautiful, interactive maps with Python and Leaflet.js
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import codecs
import functools
import json
from uuid import uuid4
from jinja2 import Environment, PackageLoader
from pkg_resources import resource_string
from folium import utilities
from folium.six import text_type, binary_type, iteritems
import sys
import base64
ENV = Environment(loader=PackageLoader('folium', 'templates'))
def initialize_notebook():
"""Initialize the IPython notebook display elements."""
try:
from IPython.core.display import display, HTML
except ImportError:
print("IPython Notebook could not be loaded.")
lib_css = ENV.get_template('ipynb_init_css.html')
lib_js = ENV.get_template('ipynb_init_js.html')
leaflet_dvf = ENV.get_template('leaflet-dvf.markers.min.js')
display(HTML(lib_css.render()))
display(HTML(lib_js.render({'leaflet_dvf': leaflet_dvf.render()})))
def iter_obj(type):
"""Decorator to keep count of different map object types in self.mk_cnt."""
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
self.mark_cnt[type] = self.mark_cnt.get(type, 0) + 1
func_result = func(self, *args, **kwargs)
return func_result
return wrapper
return decorator
class Map(object):
"""Create a Map with Folium."""
def __init__(self, location=None, width='100%', height='100%',
tiles='OpenStreetMap', API_key=None, max_zoom=18, min_zoom=1,
zoom_start=10, attr=None, min_lat=-90, max_lat=90,
min_lon=-180, max_lon=180):
"""Create a Map with Folium and Leaflet.js
Generate a base map of given width and height with either default
tilesets or a custom tileset URL. The following tilesets are built-in
to Folium. Pass any of the following to the "tiles" keyword:
- "OpenStreetMap"
- "MapQuest Open"
- "MapQuest Open Aerial"
- "Mapbox Bright" (Limited levels of zoom for free tiles)
- "Mapbox Control Room" (Limited levels of zoom for free tiles)
- "Stamen" (Terrain, Toner, and Watercolor)
- "Cloudmade" (Must pass API key)
- "Mapbox" (Must pass API key)
- "CartoDB" (positron and dark_matter)
You can pass a custom tileset to Folium by passing a Leaflet-style
URL to the tiles parameter:
http://{s}.yourtiles.com/{z}/{x}/{y}.png
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Map (Northing, Easting).
width: pixel int or percentage string (default: '100%')
Width of the map.
height: pixel int or percentage string (default: '100%')
Height of the map.
tiles: str, default 'OpenStreetMap'
Map tileset to use. Can use defaults or pass a custom URL.
API_key: str, default None
API key for Cloudmade or Mapbox tiles.
max_zoom: int, default 18
Maximum zoom depth for the map.
zoom_start: int, default 10
Initial zoom level for the map.
attr: string, default None
Map tile attribution; only required if passing custom tile URL.
Returns
-------
Folium Map Object
Examples
--------
>>>map = folium.Map(location=[45.523, -122.675], width=750, height=500)
>>>map = folium.Map(location=[45.523, -122.675],
tiles='Mapbox Control Room')
>>>map = folium.Map(location=(45.523, -122.675), max_zoom=20,
tiles='Cloudmade', API_key='YourKey')
>>>map = folium.Map(location=[45.523, -122.675], zoom_start=2,
tiles=('http://{s}.tiles.mapbox.com/v3/'
'mapbox.control-room/{z}/{x}/{y}.png'),
attr='Mapbox attribution')
"""
# Inits.
self.map_path = None
self.render_iframe = False
self.map_type = 'base'
self.map_id = '_'.join(['folium', uuid4().hex])
# Mark counter, JSON, Plugins.
self.mark_cnt = {}
self.json_data = {}
self.plugins = {}
# No location means we will use automatic bounds and ignore zoom
self.location = location
# If location is not passed, we center the map at 0,0
if not location:
location = [0, 0]
zoom_start = min_zoom
# Map Size Parameters.
try:
if isinstance(width, int):
width_type = 'px'
assert width > 0
else:
width_type = '%'
width = int(width.strip('%'))
assert 0 <= width <= 100
except:
msg = "Cannot parse width {!r} as {!r}".format
raise ValueError(msg(width, width_type))
self.width = width
try:
if isinstance(height, int):
height_type = 'px'
assert height > 0
else:
height_type = '%'
height = int(height.strip('%'))
assert 0 <= height <= 100
except:
msg = "Cannot parse height {!r} as {!r}".format
raise ValueError(msg(height, height_type))
self.height = height
self.map_size = {'width': width, 'height': height}
self._size = ('style="width: {0}{1}; height: {2}{3}"'
.format(width, width_type, height, height_type))
# Templates.
self.env = ENV
self.template_vars = dict(lat=location[0],
lon=location[1],
size=self._size,
max_zoom=max_zoom,
zoom_level=zoom_start,
map_id=self.map_id,
min_zoom=min_zoom,
min_lat=min_lat,
max_lat=max_lat,
min_lon=min_lon,
max_lon=max_lon)
# Tiles.
self.tiles = ''.join(tiles.lower().strip().split())
if self.tiles in ('cloudmade', 'mapbox') and not API_key:
raise ValueError('You must pass an API key if using Cloudmade'
' or non-default Mapbox tiles.')
self.default_tiles = ['openstreetmap', 'mapboxcontrolroom',
'mapquestopen', 'mapquestopenaerial',
'mapboxbright', 'mapbox', 'cloudmade',
'stamenterrain', 'stamentoner',
'stamenwatercolor',
'cartodbpositron', 'cartodbdark_matter']
self.tile_types = {}
for tile in self.default_tiles:
tile_path = 'tiles/%s' % tile
self.tile_types[tile] = {
'templ': self.env.get_template('%s/%s' % (tile_path,
'tiles.txt')),
'attr': self.env.get_template('%s/%s' % (tile_path,
'attr.txt')),
}
if self.tiles in self.tile_types:
self.template_vars['Tiles'] = (self.tile_types[self.tiles]['templ']
.render(API_key=API_key))
self.template_vars['attr'] = (self.tile_types[self.tiles]['attr']
.render())
else:
self.template_vars['Tiles'] = tiles
if not attr:
raise ValueError('Custom tiles must'
' also be passed an attribution')
if isinstance(attr, binary_type):
attr = text_type(attr, 'utf8')
self.template_vars['attr'] = attr
self.tile_types.update({'Custom': {'template': tiles,
'attr': attr}})
self.added_layers = []
self.template_vars.setdefault('wms_layers', [])
self.template_vars.setdefault('tile_layers', [])
self.template_vars.setdefault('image_layers', [])
@iter_obj('simple')
def add_tile_layer(self, tile_name=None, tile_url=None, active=False):
"""Adds a simple tile layer.
Parameters
----------
tile_name: string
name of the tile layer
tile_url: string
url location of the tile layer
active: boolean
should the layer be active when added
"""
if tile_name not in self.added_layers:
tile_name = tile_name.replace(" ", "_")
tile_temp = self.env.get_template('tile_layer.js')
tile = tile_temp.render({'tile_name': tile_name,
'tile_url': tile_url})
self.template_vars.setdefault('tile_layers', []).append((tile))
self.added_layers.append({tile_name: tile_url})
@iter_obj('simple')
def add_wms_layer(self, wms_name=None, wms_url=None, wms_format=None,
wms_layers=None, wms_transparent=True):
"""Adds a simple tile layer.
Parameters
----------
wms_name: string
name of wms layer
wms_url : string
url of wms layer
"""
if wms_name not in self.added_layers:
wms_name = wms_name.replace(" ", "_")
wms_temp = self.env.get_template('wms_layer.js')
wms = wms_temp.render({
'wms_name': wms_name,
'wms_url': wms_url,
'wms_format': wms_format,
'wms_layer_names': wms_layers,
'wms_transparent': str(wms_transparent).lower()})
self.template_vars.setdefault('wms_layers', []).append((wms))
self.added_layers.append({wms_name: wms_url})
@iter_obj('simple')
def add_layers_to_map(self):
"""
Required function to actually add the layers to the HTML packet.
"""
layers_temp = self.env.get_template('add_layers.js')
data_string = ''
for i, layer in enumerate(self.added_layers):
name = list(layer.keys())[0]
if i < len(self.added_layers)-1:
term_string = ",\n"
else:
term_string += "\n"
data_string += '\"{}\": {}'.format(name, name, term_string)
data_layers = layers_temp.render({'layers': data_string})
self.template_vars.setdefault('data_layers', []).append((data_layers))
@iter_obj('simple')
def simple_marker(self, location=None, popup=None,
marker_color='blue', marker_icon='info-sign',
clustered_marker=False, icon_angle=0, popup_width=300):
"""Create a simple stock Leaflet marker on the map, with optional
popup text or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
marker_color
color of marker you want
marker_icon
icon from (http://getbootstrap.com/components/) you want on the
marker
clustered_marker
boolean of whether or not you want the marker clustered with
other markers
Returns
-------
Marker names and HTML in obj.template_vars
Example
-------
>>>map.simple_marker(location=[45.5, -122.3], popup='Portland, OR')
>>>map.simple_marker(location=[45.5, -122.3], popup=(vis, 'vis.json'))
"""
count = self.mark_cnt['simple']
mark_temp = self.env.get_template('simple_marker.js')
marker_num = 'marker_{0}'.format(count)
add_line = "{'icon':"+marker_num+"_icon}"
icon_temp = self.env.get_template('simple_icon.js')
icon = icon_temp.render({'icon': marker_icon,
'icon_name': marker_num+"_icon",
'markerColor': marker_color,
'icon_angle': icon_angle})
# Get marker and popup.
marker = mark_temp.render({'marker': 'marker_' + str(count),
'lat': location[0],
'lon': location[1],
'icon': add_line
})
popup_out = self._popup_render(popup=popup, mk_name='marker_',
count=count, width=popup_width)
if clustered_marker:
add_mark = 'clusteredmarkers.addLayer(marker_{0})'.format(count)
name = 'cluster_markers'
else:
add_mark = 'map.addLayer(marker_{0})'.format(count)
name = 'custom_markers'
append = (icon, marker, popup_out, add_mark)
self.template_vars.setdefault(name, []).append(append)
@iter_obj('div_mark')
def div_markers(self, locations=None, popups=None,
marker_size=10, popup_width=300):
"""Create a simple div marker on the map, with optional
popup text or Vincent visualization. Useful for marking points along a
line.
Parameters
----------
locations: list of locations, where each location is an array
Latitude and Longitude of Marker (Northing, Easting)
popup: list of popups, each popup should be a string or tuple.
Default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width`.
(Leaflet default is 300px.)
marker_size
default is 5
Returns
-------
Marker names and HTML in obj.template_vars
Example
-------
>>> map.div_markers(locations=[[37.421114, -122.128314],
... [37.391637, -122.085416],
... [37.388832, -122.087709]],
... popups=['1437494575531',
... '1437492135937',
... '1437493590434'])
"""
call_cnt = self.mark_cnt['div_mark']
if locations is None or popups is None:
raise RuntimeError("Both locations and popups are mandatory")
for (point_cnt, (location, popup)) in enumerate(zip(locations,
popups)):
marker_num = 'div_marker_{0}_{1}'.format(call_cnt, point_cnt)
icon_temp = self.env.get_template('static_div_icon.js')
icon_name = marker_num+"_icon"
icon = icon_temp.render({'icon_name': icon_name,
'size': marker_size})
mark_temp = self.env.get_template('simple_marker.js')
# Get marker and popup.
marker = mark_temp.render({'marker': marker_num,
'lat': location[0],
'lon': location[1],
'icon': "{'icon':"+icon_name+"}"
})
mk_name = 'div_marker_{0}_'.format(call_cnt)
popup_out = self._popup_render(popup=popup,
mk_name=mk_name,
count=point_cnt, width=popup_width)
add_mark = 'map.addLayer(div_marker_{0}_{1})'.format(call_cnt,
point_cnt)
append = (icon, marker, popup_out, add_mark)
self.template_vars.setdefault('div_markers', []).append(append)
@iter_obj('line')
def line(self, locations,
line_color=None, line_opacity=None, line_weight=None,
popup=None, popup_width=300):
"""Add a line to the map with optional styles.
Parameters
----------
locations: list of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
line_color: string, default Leaflet's default ('#03f')
line_opacity: float, default Leaflet's default (0.5)
line_weight: float, default Leaflet's default (5)
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
Note: If the optional styles are omitted, they will not be included
in the HTML output and will obtain the Leaflet defaults listed above.
Example
-------
>>>map.line(locations=[(45.5, -122.3), (42.3, -71.0)])
>>>map.line(locations=[(45.5, -122.3), (42.3, -71.0)],
line_color='red', line_opacity=1.0)
"""
count = self.mark_cnt['line']
line_temp = self.env.get_template('polyline.js')
polyline_opts = {'color': line_color, 'weight': line_weight,
'opacity': line_opacity}
varname = 'line_{}'.format(count)
line_rendered = line_temp.render({'line': varname,
'locations': locations,
'options': polyline_opts})
popup_out = self._popup_render(popup=popup, mk_name='line_',
count=count, width=popup_width)
add_line = 'map.addLayer({});'.format(varname)
append = (line_rendered, popup_out, add_line)
self.template_vars.setdefault('lines', []).append((append))
@iter_obj('multiline')
def multiline(self, locations, line_color=None, line_opacity=None,
line_weight=None):
"""Add a multiPolyline to the map with optional styles.
A multiPolyline is single layer that consists of several polylines that
share styling/popup.
Parameters
----------
locations: list of lists of points (latitude, longitude)
Latitude and Longitude of line (Northing, Easting)
line_color: string, default Leaflet's default ('#03f')
line_opacity: float, default Leaflet's default (0.5)
line_weight: float, default Leaflet's default (5)
Note: If the optional styles are omitted, they will not be included
in the HTML output and will obtain the Leaflet defaults listed above.
Example
-------
# FIXME: Add another example.
>>> m.multiline(locations=[[(45.5236, -122.675), (45.5236, -122.675)],
[(45.5237, -122.675), (45.5237, -122.675)],
[(45.5238, -122.675), (45.5238, -122.675)]])
>>> m.multiline(locations=[[(45.5236, -122.675), (45.5236, -122.675)],
[(45.5237, -122.675), (45.5237, -122.675)],
[(45.5238, -122.675), (45.5238, -122.675)]],
line_color='red', line_weight=2,
line_opacity=1.0)
"""
count = self.mark_cnt['multiline']
multiline_temp = self.env.get_template('multi_polyline.js')
multiline_opts = {'color': line_color, 'weight': line_weight,
'opacity': line_opacity}
varname = 'multiline_{}'.format(count)
multiline_rendered = multiline_temp.render({'multiline': varname,
'locations': locations,
'options': multiline_opts})
add_multiline = 'map.addLayer({});'.format(varname)
append = (multiline_rendered, add_multiline)
self.template_vars.setdefault('multilines', []).append(append)
@iter_obj('circle')
def circle_marker(self, location=None, radius=500, popup=None,
line_color='black', fill_color='black',
fill_opacity=0.6, popup_width=300):
"""Create a simple circle marker on the map, with optional popup text
or Vincent visualization.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
radius: int, default 500
Circle radius, in pixels
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
line_color: string, default black
Line color. Can pass hex value here as well.
fill_color: string, default black
Fill color. Can pass hex value here as well.
fill_opacity: float, default 0.6
Circle fill opacity
Returns
-------
Circle names and HTML in obj.template_vars
Example
-------
>>>map.circle_marker(location=[45.5, -122.3],
radius=1000, popup='Portland, OR')
>>>map.circle_marker(location=[45.5, -122.3],
radius=1000, popup=(bar_chart, 'bar_data.json'))
"""
count = self.mark_cnt['circle']
circle_temp = self.env.get_template('circle_marker.js')
circle = circle_temp.render({'circle': 'circle_' + str(count),
'radius': radius,
'lat': location[0], 'lon': location[1],
'line_color': line_color,
'fill_color': fill_color,
'fill_opacity': fill_opacity})
popup_out = self._popup_render(popup=popup, mk_name='circle_',
count=count, width=popup_width)
add_mark = 'map.addLayer(circle_{0})'.format(count)
self.template_vars.setdefault('markers', []).append((circle,
popup_out,
add_mark))
@iter_obj('polygon')
def polygon_marker(self, location=None, line_color='black', line_opacity=1,
line_weight=2, fill_color='blue', fill_opacity=1,
num_sides=4, rotation=0, radius=15, popup=None,
popup_width=300):
"""Custom markers using the Leaflet Data Vis Framework.
Parameters
----------
location: tuple or list, default None
Latitude and Longitude of Marker (Northing, Easting)
line_color: string, default 'black'
Marker line color
line_opacity: float, default 1
Line opacity, scale 0-1
line_weight: int, default 2
Stroke weight in pixels
fill_color: string, default 'blue'
Marker fill color
fill_opacity: float, default 1
Marker fill opacity
num_sides: int, default 4
Number of polygon sides
rotation: int, default 0
Rotation angle in degrees
radius: int, default 15
Marker radius, in pixels
popup: string or tuple, default 'Pop Text'
Input text or visualization for object. Can pass either text,
or a tuple of the form (Vincent object, 'vis_path.json')
It is possible to adjust the width of text/HTML popups
using the optional keywords `popup_width` (default is 300px).
Returns
-------
Polygon marker names and HTML in obj.template_vars
"""
count = self.mark_cnt['polygon']
poly_temp = self.env.get_template('poly_marker.js')
polygon = poly_temp.render({'marker': 'polygon_' + str(count),
'lat': location[0],
'lon': location[1],
'line_color': line_color,
'line_opacity': line_opacity,
'line_weight': line_weight,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'num_sides': num_sides,
'rotation': rotation,
'radius': radius})
popup_out = self._popup_render(popup=popup, mk_name='polygon_',
count=count, width=popup_width)
add_mark = 'map.addLayer(polygon_{0})'.format(count)
self.template_vars.setdefault('markers', []).append((polygon,
popup_out,
add_mark))
# Update JS/CSS and other Plugin files.
js_temp = self.env.get_template('dvf_js_ref.txt').render()
self.template_vars.update({'dvf_js': js_temp})
polygon_js = resource_string('folium',
'plugins/leaflet-dvf.markers.min.js')
self.plugins.update({'leaflet-dvf.markers.min.js': polygon_js})
def lat_lng_popover(self):
"""Enable popovers to display Lat and Lon on each click."""
latlng_temp = self.env.get_template('lat_lng_popover.js')
self.template_vars.update({'lat_lng_pop': latlng_temp.render()})
def click_for_marker(self, popup=None):
"""Enable the addition of markers via clicking on the map. The marker
popup defaults to Lat/Lon, but custom text can be passed via the
popup parameter. Double click markers to remove them.
Parameters
----------
popup:
Custom popup text
Example
-------
>>>map.click_for_marker(popup='Your Custom Text')
"""
latlng = '"Latitude: " + lat + "<br>Longitude: " + lng '
click_temp = self.env.get_template('click_for_marker.js')
if popup:
popup_txt = ''.join(['"', popup, '"'])
else:
popup_txt = latlng
click_str = click_temp.render({'popup': popup_txt})
self.template_vars.update({'click_pop': click_str})
def fit_bounds(self, bounds, padding_top_left=None,
padding_bottom_right=None, padding=None, max_zoom=None):
"""Fit the map to contain a bounding box with the maximum zoom level possible.
Parameters
----------
bounds: list of (latitude, longitude) points
Bounding box specified as two points [southwest, northeast]
padding_top_left: (x, y) point, default None
Padding in the top left corner. Useful if some elements in
the corner, such as controls, might obscure objects you're zooming
to.
padding_bottom_right: (x, y) point, default None
Padding in the bottom right corner.
padding: (x, y) point, default None
Equivalent to setting both top left and bottom right padding to
the same value.
max_zoom: int, default None
Maximum zoom to be used.
Example
-------
>>> map.fit_bounds([[52.193636, -2.221575], [52.636878, -1.139759]])
"""
options = {
'paddingTopLeft': padding_top_left,
'paddingBottomRight': padding_bottom_right,
'padding': padding,
'maxZoom': max_zoom,
}
fit_bounds_options = {}
for key, opt in options.items():
if opt:
fit_bounds_options[key] = opt
fit_bounds = self.env.get_template('fit_bounds.js')
fit_bounds_str = fit_bounds.render({
'bounds': json.dumps(bounds),
'fit_bounds_options': json.dumps(fit_bounds_options,
sort_keys=True),
})
self.template_vars.update({'fit_bounds': fit_bounds_str})
def add_plugin(self, plugin):
"""Adds a plugin to the map.
Parameters
----------
plugin: folium.plugins object
A plugin to be added to the map. It has to implement the
methods `render_html`, `render_css` and `render_js`.
"""
plugin.add_to_map(self)
def _auto_bounds(self):
if 'fit_bounds' in self.template_vars:
return
# Get count for each feature type
ft_names = ["marker", "line", "circle", "polygon", "multiline"]
ft_names = [i for i in ft_names if i in self.mark_cnt]
# Make a comprehensive list of all the features we want to fit
feat_str = ["{name}_{count}".format(name=ft_name,
count=self.mark_cnt[ft_name])
for ft_name in ft_names for
count in range(1, self.mark_cnt[ft_name]+1)]
feat_str = "[" + ', '.join(feat_str) + "]"
fit_bounds = self.env.get_template('fit_bounds.js')
fit_bounds_str = fit_bounds.render({
'autobounds': not self.location,
'features': feat_str,
'fit_bounds_options': json.dumps({'padding': [30, 30]}),
})
self.template_vars.update({'fit_bounds': fit_bounds_str.strip()})
def _popup_render(self, popup=None, mk_name=None, count=None,
width=300):
"""Popup renderer: either text or Vincent/Vega.
Parameters
----------
popup: str or Vincent tuple, default None
String for text popup, or tuple of (Vincent object, json_path)
mk_name: str, default None
Type of marker. Simple, Circle, etc.
count: int, default None
Count of marker
"""
if not popup:
return ''
else:
if sys.version_info >= (3, 0):
utype, stype = str, bytes
else:
utype, stype = unicode, str
if isinstance(popup, (utype, stype)):
popup_temp = self.env.get_template('simple_popup.js')
if isinstance(popup, utype):
popup_txt = popup.encode('ascii', 'xmlcharrefreplace')
else:
popup_txt = popup
if sys.version_info >= (3, 0):
popup_txt = popup_txt.decode()
pop_txt = json.dumps(str(popup_txt))
return popup_temp.render({'pop_name': mk_name + str(count),
'pop_txt': pop_txt, 'width': width})
elif isinstance(popup, tuple):
# Update template with JS libs.
vega_temp = self.env.get_template('vega_ref.txt').render()
jquery_temp = self.env.get_template('jquery_ref.txt').render()
d3_temp = self.env.get_template('d3_ref.txt').render()
vega_parse = self.env.get_template('vega_parse.js').render()
self.template_vars.update({'vega': vega_temp,
'd3': d3_temp,
'jquery': jquery_temp,
'vega_parse': vega_parse})
# Parameters for Vega template.
vega = popup[0]
mark = ''.join([mk_name, str(count)])
json_out = popup[1]
div_id = popup[1].split('.')[0]
width = vega.width
height = vega.height
if isinstance(vega.padding, dict):
width += vega.padding['left']+vega.padding['right']
height += vega.padding['top']+vega.padding['bottom']
else:
width += 75
height += 50
max_width = max([self.map_size['width'], width])
vega_id = '#' + div_id
popup_temp = self.env.get_template('vega_marker.js')
return popup_temp.render({'mark': mark, 'div_id': div_id,
'width': width, 'height': height,
'max_width': max_width,
'json_out': json_out,
'vega_id': vega_id})
else:
raise TypeError("Unrecognized popup type: {!r}".format(popup))
@iter_obj('geojson')
def geo_json(self, geo_path=None, geo_str=None, data_out='data.json',
data=None, columns=None, key_on=None, threshold_scale=None,
fill_color='blue', fill_opacity=0.6, line_color='black',
line_weight=1, line_opacity=1, legend_name=None,
topojson=None, reset=False):
"""Apply a GeoJSON overlay to the map.
Plot a GeoJSON overlay on the base map. There is no requirement
to bind data (passing just a GeoJSON plots a single-color overlay),
but there is a data binding option to map your columnar data to
different feature objects with a color scale.
If data is passed as a Pandas dataframe, the "columns" and "key-on"
keywords must be included, the first to indicate which DataFrame
columns to use, the second to indicate the layer in the GeoJSON
on which to key the data. The 'columns' keyword does not need to be
passed for a Pandas series.
Colors are generated from color brewer (http://colorbrewer2.org/)
sequential palettes on a D3 threshold scale. The scale defaults to the
following quantiles: [0, 0.5, 0.75, 0.85, 0.9]. A custom scale can be
passed to `threshold_scale` of length <=6, in order to match the
color brewer range.
TopoJSONs can be passed as "geo_path", but the "topojson" keyword must
also be passed with the reference to the topojson objects to convert.
See the topojson.feature method in the TopoJSON API reference:
https://github.com/mbostock/topojson/wiki/API-Reference
Parameters
----------
geo_path: string, default None
URL or File path to your GeoJSON data
geo_str: string, default None
String of GeoJSON, alternative to geo_path
data_out: string, default 'data.json'
Path to write Pandas DataFrame/Series to JSON if binding data
data: Pandas DataFrame or Series, default None
Data to bind to the GeoJSON.
columns: dict or tuple, default None
If the data is a Pandas DataFrame, the columns of data to be bound.
Must pass column 1 as the key, and column 2 the values.
key_on: string, default None
Variable in the GeoJSON file to bind the data to. Must always
start with 'feature' and be in JavaScript objection notation.
Ex: 'feature.id' or 'feature.properties.statename'.
threshold_scale: list, default None
Data range for D3 threshold scale. Defaults to the following range
of quantiles: [0, 0.5, 0.75, 0.85, 0.9], rounded to the nearest
order-of-magnitude integer. Ex: 270 rounds to 200, 5600 to 6000.
fill_color: string, default 'blue'
Area fill color. Can pass a hex code, color name, or if you are
binding data, one of the following color brewer palettes:
'BuGn', 'BuPu', 'GnBu', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'RdPu',
'YlGn', 'YlGnBu', 'YlOrBr', and 'YlOrRd'.
fill_opacity: float, default 0.6
Area fill opacity, range 0-1.
line_color: string, default 'black'
GeoJSON geopath line color.
line_weight: int, default 1
GeoJSON geopath line weight.
line_opacity: float, default 1
GeoJSON geopath line opacity, range 0-1.
legend_name: string, default None
Title for data legend. If not passed, defaults to columns[1].
topojson: string, default None
If using a TopoJSON, passing "objects.yourfeature" to the topojson
keyword argument will enable conversion to GeoJSON.
reset: boolean, default False
Remove all current geoJSON layers, start with new layer
Output
------
GeoJSON data layer in obj.template_vars
Example
-------
>>> m.geo_json(geo_path='us-states.json', line_color='blue',
line_weight=3)
>>> m.geo_json(geo_path='geo.json', data=df,
columns=['Data 1', 'Data 2'],
key_on='feature.properties.myvalue', fill_color='PuBu',
threshold_scale=[0, 20, 30, 40, 50, 60])
>>> m.geo_json(geo_path='countries.json', topojson='objects.countries')
"""
if reset:
reset_vars = ['json_paths', 'func_vars', 'color_scales',
'geo_styles', 'gjson_layers', 'map_legends',
'topo_convert']
for var in reset_vars:
self.template_vars.update({var: []})
self.mark_cnt['geojson'] = 1
def json_style(style_cnt, line_color, line_weight, line_opacity,
fill_color, fill_opacity, quant_fill):
"""Generate JSON styling function from template"""
style_temp = self.env.get_template('geojson_style.js')
style = style_temp.render({'style': style_cnt,
'line_color': line_color,
'line_weight': line_weight,
'line_opacity': line_opacity,
'fill_color': fill_color,
'fill_opacity': fill_opacity,
'quantize_fill': quant_fill})
return style
# Set map type to geojson.
self.map_type = 'geojson'
# Get JSON map layer template pieces, convert TopoJSON if necessary.
# geo_str is really a hack.
if geo_path:
geo_path = ".defer(d3.json, '{0}')".format(geo_path)
elif geo_str:
fmt = (".defer(function(callback)"
"{{callback(null, JSON.parse('{}'))}})").format
geo_path = fmt(geo_str)
if topojson is None:
map_var = '_'.join(['gjson', str(self.mark_cnt['geojson'])])
layer_var = map_var
else:
map_var = '_'.join(['tjson', str(self.mark_cnt['geojson'])])
topo_obj = '.'.join([map_var, topojson])
layer_var = '_'.join(['topo', str(self.mark_cnt['geojson'])])
topo_templ = self.env.get_template('topo_func.js')
topo_func = topo_templ.render({'map_var': layer_var,
't_var': map_var,
't_var_obj': topo_obj})
topo_lib = self.env.get_template('topojson_ref.txt').render()
self.template_vars.update({'topojson': topo_lib})
self.template_vars.setdefault('topo_convert',
[]).append(topo_func)
style_count = '_'.join(['style', str(self.mark_cnt['geojson'])])
# Get Data binding pieces if available.
if data is not None:
import pandas as pd
# Create DataFrame with only the relevant columns.
if isinstance(data, pd.DataFrame):
data = pd.concat([data[columns[0]], data[columns[1]]], axis=1)
# Save data to JSON.
self.json_data[data_out] = utilities.transform_data(data)
# Add data to queue.
d_path = ".defer(d3.json, '{0}')".format(data_out)
self.template_vars.setdefault('json_paths', []).append(d_path)
# Add data variable to makeMap function.
data_var = '_'.join(['data', str(self.mark_cnt['geojson'])])
self.template_vars.setdefault('func_vars', []).append(data_var)
# D3 Color scale.
series = data[columns[1]]
if threshold_scale and len(threshold_scale) > 6:
raise ValueError
domain = threshold_scale or utilities.split_six(series=series)
if len(domain) > 253:
raise ValueError('The threshold scale must be length <= 253')
if not utilities.color_brewer(fill_color):
raise ValueError('Please pass a valid color brewer code to '
'fill_local. See docstring for valid codes.')
palette = utilities.color_brewer(fill_color, len(domain))
d3range = palette[0: len(domain) + 1]
tick_labels = utilities.legend_scaler(domain)
color_temp = self.env.get_template('d3_threshold.js')
d3scale = color_temp.render({'domain': domain,
'range': d3range})
self.template_vars.setdefault('color_scales', []).append(d3scale)
# Create legend.
name = legend_name or columns[1]
leg_templ = self.env.get_template('d3_map_legend.js')
legend = leg_templ.render({'lin_max': int(domain[-1]*1.1),
'tick_labels': tick_labels,
'caption': name})
self.template_vars.setdefault('map_legends', []).append(legend)
# Style with color brewer colors.
matchColor = 'color(matchKey({0}, {1}))'.format(key_on, data_var)
style = json_style(style_count, line_color, line_weight,
line_opacity, None, fill_opacity, matchColor)
else:
style = json_style(style_count, line_color, line_weight,
line_opacity, fill_color, fill_opacity, None)
layer = ('gJson_layer_{0} = L.geoJson({1}, {{style: {2},'
'onEachFeature: onEachFeature}}).addTo(map)'
.format(self.mark_cnt['geojson'], layer_var, style_count))
self.template_vars.setdefault('json_paths', []).append(geo_path)
self.template_vars.setdefault('func_vars', []).append(map_var)
self.template_vars.setdefault('geo_styles', []).append(style)
self.template_vars.setdefault('gjson_layers', []).append(layer)
@iter_obj('image_overlay')
def image_overlay(self, data, opacity=0.25, min_lat=-90.0, max_lat=90.0,
min_lon=-180.0, max_lon=180.0, image_name=None,
filename=None):
"""
Simple image overlay of raster data from a numpy array. This is a
lightweight way to overlay geospatial data on top of a map. If your
data is high res, consider implementing a WMS server and adding a WMS
layer.
This function works by generating a PNG file from a numpy array. If
you do not specify a filename, it will embed the image inline.
Otherwise, it saves the file in the current directory, and then adds
it as an image overlay layer in leaflet.js. By default, the image is
placed and stretched using bounds that cover the entire globe.
Parameters
----------
data: numpy array OR url string, required.
if numpy array, must be a image format,
i.e., NxM (mono), NxMx3 (rgb), or NxMx4 (rgba)
if url, must be a valid url to a image (local or external)
opacity: float, default 0.25
Image layer opacity in range 0 (transparent) to 1 (opaque)
min_lat: float, default -90.0
max_lat: float, default 90.0
min_lon: float, default -180.0
max_lon: float, default 180.0
image_name: string, default None
The name of the layer object in leaflet.js
filename: string, default None
Optional file name of output.png for image overlay.
Use `None` for inline PNG.
Output
------
Image overlay data layer in obj.template_vars
Examples
-------
# assumes a map object `m` has been created
>>> import numpy as np
>>> data = np.random.random((100,100))
# to make a rgba from a specific matplotlib colormap:
>>> import matplotlib.cm as cm
>>> cmapper = cm.cm.ColorMapper('jet')
>>> data2 = cmapper.to_rgba(np.random.random((100,100)))
>>> # Place the data over all of the globe (will be pretty pixelated!)
>>> m.image_overlay(data)
>>> # Put it only over a single city (Paris).
>>> m.image_overlay(data, min_lat=48.80418, max_lat=48.90970,
... min_lon=2.25214, max_lon=2.44731)
"""
if isinstance(data, str):
filename = data
else:
try:
png_str = utilities.write_png(data)
except Exception as e:
raise e
if filename is not None:
with open(filename, 'wb') as fd:
fd.write(png_str)
else:
png = "data:image/png;base64,{}".format
filename = png(base64.b64encode(png_str).decode('utf-8'))
if image_name not in self.added_layers:
if image_name is None:
image_name = "Image_Overlay"
else:
image_name = image_name.replace(" ", "_")
image_url = filename
image_bounds = [[min_lat, min_lon], [max_lat, max_lon]]
image_opacity = opacity
image_temp = self.env.get_template('image_layer.js')
image = image_temp.render({'image_name': image_name,
'image_url': image_url,
'image_bounds': image_bounds,
'image_opacity': image_opacity})
self.template_vars['image_layers'].append(image)
self.added_layers.append(image_name)
def _build_map(self, html_templ=None, templ_type='string'):
self._auto_bounds()
"""Build HTML/JS/CSS from Templates given current map type."""
if html_templ is None:
map_types = {'base': 'fol_template.html',
'geojson': 'geojson_template.html'}
# Check current map type.
type_temp = map_types[self.map_type]
html_templ = self.env.get_template(type_temp)
else:
if templ_type == 'string':
html_templ = self.env.from_string(html_templ)
self.HTML = html_templ.render(self.template_vars, plugins=self.plugins)
def create_map(self, path='map.html', plugin_data_out=True, template=None):
"""Write Map output to HTML and data output to JSON if available.
Parameters:
-----------
path: string, default 'map.html'
Path for HTML output for map
plugin_data_out: boolean, default True
If using plugins such as awesome markers, write all plugin
data such as JS/CSS/images to path
template: string, default None
Custom template to render
"""
self.map_path = path
self._build_map(template)
with codecs.open(path, 'w', 'utf8') as f:
f.write(self.HTML)
if self.json_data:
for path, data in iteritems(self.json_data):
with open(path, 'w') as g:
json.dump(data, g)
if self.plugins and plugin_data_out:
for name, plugin in iteritems(self.plugins):
with open(name, 'w') as f:
if isinstance(plugin, binary_type):
plugin = text_type(plugin, 'utf8')
f.write(plugin)
def _repr_html_(self):
"""Build the HTML representation for IPython."""
map_types = {'base': 'ipynb_repr.html',
'geojson': 'ipynb_iframe.html'}
# Check current map type.
type_temp = map_types[self.map_type]
if self.render_iframe:
type_temp = 'ipynb_iframe.html'
templ = self.env.get_template(type_temp)
self._build_map(html_templ=templ, templ_type='temp')
if self.map_type == 'geojson' or self.render_iframe:
if not self.map_path:
raise ValueError('Use create_map to set the path!')
return templ.render(path=self.map_path, width=self.width,
height=self.height)
return self.HTML
def display(self):
"""Display the visualization inline in the IPython notebook.
This is deprecated, use the following instead::
from IPython.display import display
display(viz)
"""
from IPython.core.display import display, HTML
display(HTML(self._repr_html_()))
| mit |
deeplook/bokeh | bokeh/charts/builder/histogram_builder.py | 43 | 9142 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Histogram class which lets you build your histograms just passing
the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
try:
import scipy.special
_is_scipy = True
except ImportError as e:
_is_scipy = False
import numpy as np
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, GlyphRenderer, Range1d
from ...models.glyphs import Line, Quad
from ...properties import Bool, Float, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Histogram(values, bins, mu=None, sigma=None, density=True, **kws):
""" Create a histogram chart using :class:`HistogramBuilder <bokeh.charts.builder.histogram_builder.HistogramBuilder>`
to render the geometry from values, bins, sigma and density.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
bins (int): number of bins to use in the Histogram building.
mu (float, optional): theoretical mean value for the normal
distribution. (default: None)
sigma (float, optional): theoretical sigma value for the
normal distribution. (default: None)
density (bool, optional): If False, the result will contain
the number of samples in each bin. If True, the result
is the value of the probability *density* function at
the bin, normalized such that the *integral* over the
range is 1. For more info check numpy.histogram
function documentation. (default: True)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import pandas as pd
from bokeh.charts import Histogram, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
xyvalues = pd.DataFrame(dict(normal=[1, 2, 3, 1], lognormal=[5, 4, 4, 1]))
hm = Histogram(xyvalues, bins=5, title='Histogram')
output_file('histogram.html')
show(hm)
"""
return create_and_build(
HistogramBuilder, values, bins=bins, mu=mu, sigma=sigma, density=density,
**kws
)
class HistogramBuilder(Builder):
"""This is the Histogram class and it is in charge of plotting
histograms in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed glyphs (quads and lines) taking the
references from the source.
"""
bins = Int(10, help="""
Number of bins to use for the histogram. (default: 10)
""")
mu = Float(help="""
Theoretical mean value for the normal distribution. (default: None)
""")
sigma = Float(help="""
Theoretical standard deviation value for the normal distribution.
(default: None)
""")
density = Bool(True, help="""
Whether to normalize the histogram. (default: True)
If True, the result is the value of the probability *density* function
at the bin, normalized such that the *integral* over the range is 1. If
False, the result will contain the number of samples in each bin.
For more info check ``numpy.histogram`` function documentation.
""")
def _process_data(self):
"""Take the Histogram data from the input **value.
It calculates the chart properties accordingly. Then build a dict
containing references to all the calculated points to be used by
the quad and line glyphs inside the ``_yield_renderers`` method.
"""
# list to save all the groups available in the incomming input
self._groups.extend(self._values.keys())
# fill the data dictionary with the proper values
for i, (val, values) in enumerate(self._values.items()):
self.set_and_get("", val, values)
#build the histogram using the set bins number
hist, edges = np.histogram(
np.array(values), density=self.density, bins=self.bins
)
self.set_and_get("hist", val, hist)
self.set_and_get("edges", val, edges)
self.set_and_get("left", val, edges[:-1])
self.set_and_get("right", val, edges[1:])
self.set_and_get("bottom", val, np.zeros(len(hist)))
self._mu_and_sigma = False
if self.mu is not None and self.sigma is not None:
if _is_scipy:
self._mu_and_sigma = True
self.set_and_get("x", val, np.linspace(-2, 2, len(self._data[val])))
den = 2 * self.sigma ** 2
x_val = self._data["x" + val]
x_val_mu = x_val - self.mu
sigsqr2pi = self.sigma * np.sqrt(2 * np.pi)
pdf = 1 / (sigsqr2pi) * np.exp(-x_val_mu ** 2 / den)
self.set_and_get("pdf", val, pdf)
self._groups.append("pdf")
cdf = (1 + scipy.special.erf(x_val_mu / np.sqrt(den))) / 2
self.set_and_get("cdf", val, cdf)
self._groups.append("cdf")
else:
print("You need scipy to get the theoretical probability distributions.")
def _set_sources(self):
"""Push the Histogram data into the ColumnDataSource and calculate
the proper ranges."""
self._source = ColumnDataSource(data=self._data)
if not self._mu_and_sigma:
x_names, y_names = self._attr[2::6], self._attr[1::6]
else:
x_names, y_names = self._attr[2::9], self._attr[1::9]
endx = max(max(self._data[i]) for i in x_names)
startx = min(min(self._data[i]) for i in x_names)
self.x_range = Range1d(start=startx - 0.1 * (endx - startx),
end=endx + 0.1 * (endx - startx))
endy = max(max(self._data[i]) for i in y_names)
self.y_range = Range1d(start=0, end=1.1 * endy)
def _yield_renderers(self):
"""Use the several glyphs to display the Histogram and pdf/cdf.
It uses the quad (and line) glyphs to display the Histogram
bars, taking as reference points the data loaded at the
ColumnDataSurce.
"""
if not self._mu_and_sigma:
sextets = list(chunk(self._attr, 6))
colors = cycle_colors(sextets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# sextet: values, his, edges, left, right, bottom
for i, sextet in enumerate(sextets):
glyph = Quad(
top=sextet[1], bottom=sextet[5], left=sextet[3], right=sextet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
else:
nonets = list(chunk(self._attr, 9))
colors = cycle_colors(nonets, self.palette)
# TODO (bev) this is a perfect use for a namedtuple
# nonet: values, his, edges, left, right, bottom, x, pdf, cdf
for i, nonet in enumerate(nonets):
glyph = Quad(
top=nonet[1], bottom=nonet[5], left=nonet[3], right=nonet[4],
fill_color=colors[i], fill_alpha=0.7,
line_color="white", line_alpha=1.0
)
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i], [renderer]))
yield renderer
glyph = Line(x=nonet[6], y=nonet[7], line_color="black")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
glyph = Line(x=nonet[6], y=nonet[8], line_color="blue")
yield GlyphRenderer(data_source=self._source, glyph=glyph)
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/backends/backend_ps.py | 2 | 60197 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
# PY3KTODO: Get rid of "print >>fh" syntax
from __future__ import division, print_function
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
import io
if sys.version_info[0] < 3:
import cStringIO
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import mkstemp
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.backends.backend_mixed import MixedModeRenderer
import numpy as np
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
class PsBackendHelper(object):
def __init__(self):
self._cached = {}
@property
def gs_exe(self):
"""
excutable name of ghostscript.
"""
try:
return self._cached["gs_exe"]
except KeyError:
pass
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
self._cached["gs_exe"] = gs_exe
return gs_exe
@property
def gs_version(self):
"""
version of ghostscript.
"""
try:
return self._cached["gs_version"]
except KeyError:
pass
from subprocess import Popen, PIPE
pipe = Popen(self.gs_exe + " --version",
shell=True, stdout=PIPE).stdout
if sys.version_info[0] >= 3:
ver = pipe.read().decode('ascii')
else:
ver = pipe.read()
gs_version = tuple(map(int, ver.strip().split(".")))
self._cached["gs_version"] = gs_version
return gs_version
@property
def supports_ps2write(self):
"""
True if the installed ghostscript supports ps2write device.
"""
return self.gs_version[0] >= 9
ps_backend_helper = PsBackendHelper()
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return np.alltrue(np.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self._hatches = {}
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
self._afm_font_dir = os.path.join(
rcParams['datapath'], 'fonts', 'afm')
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.iteritems():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def create_hatch(self, hatch):
sidelen = 72
if hatch in self._hatches:
return self._hatches[hatch]
name = 'H%d' % len(self._hatches)
self._pswriter.write("""\
<< /PatternType 1
/PaintType 2
/TilingType 2
/BBox[0 0 %(sidelen)d %(sidelen)d]
/XStep %(sidelen)d
/YStep %(sidelen)d
/PaintProc {
pop
0 setlinewidth
""" % locals())
self._pswriter.write(
self._convert_path(Path.hatch(hatch), Affine2D().scale(72.0),
simplify=False))
self._pswriter.write("""\
stroke
} bind
>>
matrix
makepattern
/%(name)s exch def
""" % locals())
self._hatches[hatch] = name
return name
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm', directory=self._afm_font_dir)
if fname is None:
fname = findfont(
"Helvetica", fontext='afm', directory=self._afm_font_dir)
font = self.afmfontd.get(fname)
if font is None:
with open(fname, 'rb') as fh:
font = AFM(fh)
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = np.fromstring(rgbat[2], np.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(np.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(np.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def option_scale_image(self):
"""
ps backend support arbitrary scaling of image.
"""
return True
def _get_image_h_w_bits_command(self, im):
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
return h, w, bits, imagecmd
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
dx, dy is the width and height of the image. If a transform
(which must be an affine transform) is given, x, y, dx, dy are
interpreted as the coordinate of the transform.
"""
im.flipud_out()
h, w, bits, imagecmd = self._get_image_h_w_bits_command(im)
hexlines = b'\n'.join(self._hex_lines(bits)).decode('ascii')
if dx is None:
xscale = w / self.image_magnification
else:
xscale = dx
if dy is None:
yscale = h/self.image_magnification
else:
yscale = dy
if transform is None:
matrix = "1 0 0 1 0 0"
else:
matrix = " ".join(map(str, transform.to_values()))
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
bbox = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
[%(matrix)s] concat
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, clip=False, simplify=None):
ps = []
last_points = None
if clip:
clip = (0.0, 0.0, self.width * 72.0,
self.height * 72.0)
else:
clip = None
for points, code in path.iter_segments(transform, clip=clip,
simplify=simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
elif last_points is None:
# The other operations require a previous point
raise ValueError('Path lacks initial MOVETO')
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform,
simplify=False))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
ps = self._convert_path(
path, transform, clip=clip, simplify=simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
lw = gc.get_linewidth()
stroke = lw != 0.0
if stroke:
ps_cmd.append('%.1f setlinewidth' % lw)
jint = gc.get_joinstyle()
ps_cmd.append('%d setlinejoin' % jint)
cint = gc.get_capstyle()
ps_cmd.append('%d setlinecap' % cint)
ps_cmd.append(self._convert_path(marker_path, marker_trans,
simplify=False))
if rgbFace:
if stroke:
ps_cmd.append('gsave')
ps_cmd.extend([ps_color, 'fill'])
if stroke:
ps_cmd.append('grestore')
if stroke:
ps_cmd.append('stroke')
ps_cmd.extend(['grestore', '} bind def'])
for vertices, code in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform, simplify=False))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc0, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
corr = 0#w/2*(fontsize-10)/10
if rcParams['text.latex.preview']:
# use baseline alignment!
pos = _nums_to_str(x-corr, y+bl)
self.psfrag.append(r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
else:
# stick to the bottom alignment, but this may give incorrect baseline some times.
pos = _nums_to_str(x-corr, y)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def new_gc(self):
return GraphicsContextPS()
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
points = trans.transform(points)
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = float(0xffffffff) / (points_max - points_min)
xmin, ymin = points_min
xmax, ymax = points_max
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
stream = quote_ps_string(streamarr.tostring())
self._pswriter.write("""
gsave
<< /ShadingType 4
/ColorSpace [/DeviceRGB]
/BitsPerCoordinate 32
/BitsPerComponent 8
/BitsPerFlag 8
/AntiAlias true
/Decode [ %(xmin)f %(xmax)f %(ymin)f %(ymax)f 0 1 0 1 0 1 ]
/DataSource (%(stream)s)
>>
shfill
grestore
""" % locals())
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = gc.shouldstroke()
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
if stroke:
write("grestore\n")
hatch = gc.get_hatch()
if hatch:
hatch_name = self.create_hatch(hatch)
write("gsave\n")
write("[/Pattern [/DeviceRGB]] setcolorspace %f %f %f " % gc.get_rgb()[:3])
write("%s setcolor fill grestore\n" % hatch_name)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def shouldstroke(self):
return (self.get_linewidth() > 0.0 and
(len(self.get_rgb()) <= 3 or self.get_rgb()[3] != 0.0))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPS(figure)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
_renderer_class = RendererPS
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.pop("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.iterkeys() )) )
orientation = kwargs.pop("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.pop("dpi", 72)
facecolor = kwargs.pop("facecolor", "w")
edgecolor = kwargs.pop("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None,
**kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
if sys.version_info[0] >= 3:
self._pswriter = io.StringIO()
else:
self._pswriter = cStringIO.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height, self._pswriter,
imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
fd, tmpfile = mkstemp()
with io.open(fd, 'wb') as raw_fh:
if sys.version_info[0] >= 3:
fh = io.TextIOWrapper(raw_fh, encoding="ascii")
else:
fh = raw_fh
# write the PostScript headers
if isEPSF: print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
else: print("%!PS-Adobe-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%Orientation: " + orientation, file=fh)
if not isEPSF: print("%%DocumentPaperSizes: "+papertype, file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
if not isEPSF: print("%%Pages: 1", file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
if not rcParams['ps.useafm']:
Ndict += len(ps_renderer.used_characters)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
if not rcParams['ps.useafm']:
for font_filename, chars in ps_renderer.used_characters.itervalues():
if len(chars):
font = FT2Font(str(font_filename))
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
fonttype = rcParams['ps.fonttype']
# Can not use more than 255 characters from a
# single font for Type 3
if len(glyph_ids) > 255:
fonttype = 42
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fh.flush()
convert_ttf_to_ps(font_filename, raw_fh, fonttype, glyph_ids)
print("end", file=fh)
print("%%EndProlog", file=fh)
if not isEPSF: print("%%Page: 1 1", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
if rotation: print("%d rotate"%rotation, file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
if not isEPSF: print("%%EOF", file=fh)
fh.flush()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
with open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with open(outfile, 'w') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
if sys.version_info[0] >= 3:
self._pswriter = io.StringIO()
else:
self._pswriter = cStringIO.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height,
self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write to a temp file, we'll move it to outfile when done
fd, tmpfile = mkstemp()
if sys.version_info[0] >= 3:
fh = io.open(fd, 'w', encoding='ascii')
else:
fh = io.open(fd, 'wb')
with fh:
# write the Encapsulated PostScript headers
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
print("end", file=fh)
print("%%EndProlog", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
fh.flush()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if isEPSF. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if isEPSF:
paperWidth, paperHeight = self.figure.get_size_inches()
if isLandscape:
paperWidth, paperHeight = paperHeight, paperWidth
else:
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
is_file = False
if sys.version_info[0] >= 3:
if isinstance(outfile, io.IOBase):
is_file = True
else:
if isinstance(outfile, file):
is_file = True
if is_file:
with open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with open(outfile, 'wb') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = r"""\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
with io.open(latexfile, 'wb') as latexh:
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s.encode('ascii'))
except UnicodeEncodeError:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in,5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
with open(tmpfile) as fh:
if "Landscape" in fh.read(1000):
psfrag_rotated = True
else:
psfrag_rotated = False
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
return psfrag_rotated
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
gs_exe = ps_backend_helper.gs_exe
if ps_backend_helper.supports_ps2write: # gs version >= 9
device_name = "ps2write"
else:
device_name = "pswrite"
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=%s %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, device_name,
paper_option, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in an ps file
# where the original bbox is no more correct. Do not adjust
# bbox for now.
if ps_backend_helper.supports_ps2write:
# fo gs version >= 9 w/ ps2write device
pstoeps(tmpfile, bbox, rotated=rotated)
else:
pstoeps(tmpfile)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode %s "%s" "%s" > "%s"'% \
(paper_option, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox_header(lbrt, rotated=False):
"""
return a postscript header stringfor the given bbox lbrt=(l, b, r, t).
Optionally, return rotate command.
"""
l, b, r, t = lbrt
if rotated:
rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
else:
rotate = ""
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info]), rotate
# get_bbox is deprecated. I don't see any reason to use ghostscript to
# find the bounding box, as the required bounding box is alread known.
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
# if rotated==True, the output eps file need to be rotated
if bbox:
bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
else:
bbox_info, rotate = None, None
epsfile = tmpfile + '.eps'
with io.open(epsfile, 'wb') as epsh:
write = epsh.write
with io.open(tmpfile, 'rb') as tmph:
line = tmph.readline()
# Modify the header:
while line:
if line.startswith(b'%!PS'):
write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
if bbox:
write(bbox_info.encode('ascii') + b'\n')
elif line.startswith(b'%%EndComments'):
write(line)
write(b'%%BeginProlog\n')
write(b'save\n')
write(b'countdictstack\n')
write(b'mark\n')
write(b'newpath\n')
write(b'/showpage {} def\n')
write(b'/setpagedevice {pop} def\n')
write(b'%%EndProlog\n')
write(b'%%Page 1 1\n')
if rotate:
write(rotate.encode('ascii') + b'\n')
break
elif bbox and (line.startswith(b'%%Bound') \
or line.startswith(b'%%HiResBound') \
or line.startswith(b'%%DocumentMedia') \
or line.startswith(b'%%Pages')):
pass
else:
write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith(b'%%Trailer'):
write(b'%%Trailer\n')
write(b'cleartomark\n')
write(b'countdictstack\n')
write(b'exch sub { end } repeat\n')
write(b'restore\n')
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
elif line.startswith(b'%%PageBoundingBox'):
pass
else:
write(line)
line = tmph.readline()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| mit |
muLAn-project/muLAn | muLAn/models/aimc.py | 1 | 7412 | # -*-coding:Utf-8 -*
import os
os.environ["OMP_NUM_THREADS"] = "1"
import sys
import copy
import emcee
import pickle
import glob
import shutil
import datetime
import importlib
import subprocess
from multiprocessing import Pool
import numpy as np
import pandas as pd
from scipy import stats
from scipy import interpolate
from sklearn import linear_model
import muLAn.models as mulanmodels
import muLAn.packages.algebra as algebra
# Global variables to speed up the code
# -------------------------------------
global time_serie, fitp, constp, ndim
global fitpn, constpn, ndim, all_params
global instrument, algo, model_lib
# Ugly part of the code: some static arguments are loaded and declared as
# global to speed up the algorithms.
fname = 'args.h5'
data = pd.read_hdf(fname, 'data')
fitp = pd.read_hdf(fname, 'fit_params')
constp = pd.read_hdf(fname, 'const_params')
ndim = len(fitp)
fitpn = list(fitp.index)
constpn = list(constp.index)
instrument = np.unique(data['obs'])
algo = np.unique(data['model'])
all_params = fitp.to_dict()
all_params.update(constp.to_dict())
# Load library of models
model_lib = dict()
for i in range(algo.shape[0]):
name = 'muLAn.models.{:s}'.format(algo[i])
model_lib.update({algo[i]: importlib.import_module(name)})
# End of declaration of the global variables
def help():
text = "AIMC - Affine Invariant MCMC."
return text
# ----------------------------------------------------------------------
def communicate(cfg, verbose, text, opts=False, prefix=False, newline=False, tab=False):
if cfg.getint('Modelling', 'Verbose') >= verbose:
if prefix:
text = "[muLAn] " + text
if opts!=False:
text2=''
for a in opts:
text2 = text2 + a
text = text2 + text + printoption.reset
if tab:
text = " " + text
if newline:
text = "\n" + text
print(text)
else:
if tab:
text = " " + text
if newline:
text = "\n" + text
print(text)
# ----------------------------------------------------------------------
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def logprior(param_model):
p = 0
if param_model['t0'] < 0:
p = 1e12
if param_model['rho'] < 0:
p = 1e12
if param_model['rho'] > 1.0:
p = 1e12
if param_model['tE'] < 1e-10:
p = 1e12
if param_model['q'] < 1e-9:
p = 1e12
# if param_model['q'] > 1.0:
# p = 1e12
if param_model['s'] < 1e-10:
p = 1e12
if param_model['s'] > 10:
p = 1e12
return p
def loglike(all_params, cfg):
# For backwards compatibility (Deprecated)
tb = all_params['tb']
# Compute magnification
for j in range(len(instrument)):
mask1 = data['obs'] == instrument[j]
for i in range(algo.shape[0]):
mask = (data['obs'] == instrument[j])\
& (data['model'] == algo[i])
if mask.sum() > 0:
epochs = data.loc[mask, 'dates'].values
DsN = data.loc[mask, 'DsN'].values
DsE = data.loc[mask, 'DsE'].values
Ds = dict({'N': DsN, 'E': DsE})
try:
kwargs_method = dict(parser.items(algo[i]))
except:
kwargs_method = dict()
mag = model_lib[algo[i]].magnifcalc(epochs, all_params, Ds=Ds, tb=tb, **kwargs_method)
data.loc[mask,'amp'] = mag
fs, fb = algebra.fsfbwsig(data[mask1], None, blending=True)
data.loc[mask1,'fs'] = fs
data.loc[mask1,'fb'] = fb
data['flux_model'] = data['fs'] * data['amp'] + data['fb']
chi2pp = np.power((data['flux']-data['flux_model'])/data['err_flux'], 2)
chi2 = np.sum(chi2pp)
result = - 0.5 * chi2
return result
def logprob(theta, cfg):
# Update the parameters
for i in range(ndim):
all_params[fitpn[i]] = theta[i]
# Evaluate log likelihood
return loglike(all_params, cfg) + logprior(all_params)
# ----------------------------------------------------------------------
def ini_chains_gene(cfg):
nwalkers = cfg.getint('AIMC', 'walkers')
ninstr = len(instrument)
result = []
j = 0
while(j<nwalkers):
table = np.array([])
for i in range(ndim):
if i < ndim - 2*ninstr:
l = cfg.get('Modelling', fitpn[i])
a = abs(float(l.split(',')[1]))
b = abs(float(l.split(',')[2]))
c = float(l.split(',')[3])
aa = c-a
bb = c+b
x = (bb - aa) * np.random.random_sample() + aa
table = np.append(table, x)
else:
table = np.append(table, np.random.random_sample())
result.append(table)
j+=1
return result
def search(**kwargs):
# Declare global variables
global ndim, fitp
# Parse the arguments
if 'cfgsetup' in kwargs: cfgsetup = kwargs['cfgsetup']
# Create alias for paths
path_event = cfgsetup.get('FullPaths', 'Event')
path_mcmc = f"{path_event}/{cfgsetup.get('RelativePaths', 'Chains')}"
archive_name = cfgsetup.get('Controls', 'Archive')
# Extract flags from parser
flag_resume = cfgsetup.getboolean('AIMC', 'resume')
# Extract MCMC settings
nwalkers = cfgsetup.getint('AIMC', 'walkers')
length = cfgsetup.getint('AIMC', 'length')
ncpu = cfgsetup.getint('AIMC', 'cpu')
if nwalkers < 2*ndim:
nwalkers = 2*ndim
cfgsetup.set('AIMC', 'walkers', f"{nwalkers}")
txt = "{ndim} parameters will be fit (including the flux)."
txt = "{txt}\n The number of walkers increased to 2*nb_params."
# Create a file to check if user wants to stop MCMC
fn_lock = cfgsetup.get('FullPaths', 'Event') + '.lock'
if not os.path.exists(fn_lock): open(fn_lock, 'w').close()
if not flag_resume:
# Initialize folders tree
shutil.rmtree(path_mcmc)
if not os.path.exists(path_mcmc): os.makedirs(path_mcmc)
# Set up the backend
backend = emcee.backends.HDFBackend(f'{path_mcmc}/mcmc_sampler.h5')
backend.reset(nwalkers, ndim)
# Initialize chains
pos = ini_chains_gene(cfgsetup)
with Pool(processes=ncpu) as pool:
# Initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, logprob, args=[cfgsetup], backend=backend, pool=pool)
# Now we start sampling
for sample in sampler.sample(pos, iterations=length, progress=True):
# Check if user wants to stop the MCMC
if not os.path.exists(fn_lock): break
else:
backend = emcee.backends.HDFBackend(f'{path_mcmc}/mcmc_sampler.h5')
print("Initial size: {0}".format(backend.iteration))
with Pool(processes=ncpu) as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, logprob, args=[cfgsetup], backend=backend, pool=pool)
sampler.run_mcmc(None, length, progress=True)
if os.path.exists(fn_lock): os.remove(fn_lock)
else: sys.exit("\nProcess stopped by the user.\n")
| mit |
sinhrks/scikit-learn | examples/ensemble/plot_voting_probas.py | 316 | 2824 | """
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
| bsd-3-clause |
mayblue9/scikit-learn | sklearn/svm/tests/test_svm.py | 70 | 31674 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
@ignore_warnings
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
bloyl/mne-python | mne/viz/_brain/_scraper.py | 12 | 4173 | import os
import os.path as op
from distutils.version import LooseVersion
from ._brain import Brain
class _BrainScraper(object):
"""Scrape Brain objects."""
def __repr__(self):
return '<BrainScraper>'
def __call__(self, block, block_vars, gallery_conf):
rst = ''
for brain in list(block_vars['example_globals'].values()):
# Only need to process if it's a brain with a time_viewer
# with traces on and shown in the same window, otherwise
# PyVista and matplotlib scrapers can just do the work
if (not isinstance(brain, Brain)) or brain._closed:
continue
import matplotlib
from matplotlib import animation, pyplot as plt
from sphinx_gallery.scrapers import matplotlib_scraper
img = brain.screenshot(time_viewer=True)
dpi = 100.
figsize = (img.shape[1] / dpi, img.shape[0] / dpi)
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = plt.Axes(fig, [0, 0, 1, 1])
fig.add_axes(ax)
img = ax.imshow(img)
movie_key = '# brain.save_movie'
if movie_key in block[1]:
kwargs = dict()
# Parse our parameters
lines = block[1].splitlines()
for li, line in enumerate(block[1].splitlines()):
if line.startswith(movie_key):
line = line[len(movie_key):].replace('..., ', '')
for ni in range(1, 5): # should be enough
if len(lines) > li + ni and \
lines[li + ni].startswith('# '):
line = line + lines[li + ni][1:].strip()
else:
break
assert line.startswith('(') and line.endswith(')')
kwargs.update(eval(f'dict{line}'))
for key, default in [('time_dilation', 4),
('framerate', 24),
('tmin', None),
('tmax', None),
('interpolation', None),
('time_viewer', False)]:
if key not in kwargs:
kwargs[key] = default
kwargs.pop('filename', None) # always omit this one
if brain.time_viewer:
assert kwargs['time_viewer'], 'Must use time_viewer=True'
frames = brain._make_movie_frames(callback=None, **kwargs)
# Turn them into an animation
def func(frame):
img.set_data(frame)
return [img]
anim = animation.FuncAnimation(
fig, func=func, frames=frames, blit=True,
interval=1000. / kwargs['framerate'])
# Out to sphinx-gallery:
#
# 1. A static image but hide it (useful for carousel)
if LooseVersion(matplotlib.__version__) >= \
LooseVersion('3.3.1') and \
animation.FFMpegWriter.isAvailable():
writer = 'ffmpeg'
elif animation.ImageMagickWriter.isAvailable():
writer = 'imagemagick'
else:
writer = None
static_fname = next(block_vars['image_path_iterator'])
static_fname = static_fname[:-4] + '.gif'
anim.save(static_fname, writer=writer, dpi=dpi)
rel_fname = op.relpath(static_fname, gallery_conf['src_dir'])
rel_fname = rel_fname.replace(os.sep, '/').lstrip('/')
rst += f'\n.. image:: /{rel_fname}\n :class: hidden\n'
# 2. An animation that will be embedded and visible
block_vars['example_globals']['_brain_anim_'] = anim
brain.close()
rst += matplotlib_scraper(block, block_vars, gallery_conf)
return rst
| bsd-3-clause |
e-sr/SDWirkungNi | DSP/functions.py | 1 | 7876 | import numpy as np
import matplotlib.pyplot as plt
from scipy.io import loadmat, wavfile
import json
import acoustics
from acoustics import Signal
import peakutils
import statsmodels
import statsmodels.api as sm
def signal_from_mat(matfile):
"""
Return signals contained in .mat file from EMPA
- for sections Q1 and Q4 the files contain the LS and MIC signals with time vector
- for sections Q2 and Q3 the files contain the MIC signal with time vector
param:
-----
matfile: str
path of the .mat file
return:
------
d: dict
dict containing the signals
"""
dataMat = loadmat(matfile)
d = {}
# structure of .mat files defined by EMPA
varKey =['LSVorbeifahrt', 'Sound_', 'Vorbeifahrt']
tKey =['LSVorbeifahrt_time', 'Sound__time', 'Vorbeifahrt_time']
print(dataMat.keys())
varName = ['LS', 'MIC', 'MIC']
for vk, tk, name in zip(varKey,tKey,varName):
try:
data = dataMat[vk]
except KeyError:
print('no key ' , vk )
else:
t = dataMat[tk].ravel()
sR = int(np.round(1/(t[1]-t[0])))
assert(sR == int(np.round(1/(t[-1]-t[-2]))))
d[name] = Signal(data.ravel(), sR, t.min())
return(d)
def fill_passby_with_signals(passby):
"""
load signals in passby dict
param:
-----
passby: dict
dict containing passby
effect:
------
fill passby with MIC and LS signals for given passby and section
"""
#for abschnitt and values in E1
for k, pB in passby.items():
print('ereigniss ', k ,':')
for abschnitt,v in pB.items():
if not abschnitt == 'Zugstyp':
#signal
print( 'initaite abschnitt: ' + abschnitt + '; Index: '+v['ID'], end=' | ')
matfile = 'Ereignisse/'+ v['ID'] + '.mat'
v['signals'] = signal_from_mat(matfile)
#correct LAF FAMOS time
# t0 = v['signals']['MIC'].t0
# v['LAF']['FAMOS'][0] = np.array(v['LAF']['FAMOS'][0])+t0
print('\n---------')
def detect_weel_times(LSsn, decimation = 8):
"""
Return the passby times of the train axes
param:
------
LSsn: Signal
LS signal
decimation: int pow of 2
decimate and filter the signal
return:
------
- tPeaks: np.array
passby times of the train axes
"""
s = LSsn.decimate(decimation)
y = np.array(s)
t = s.times()
# define the minimal possible distance in time between two axes
maxSpeed = 200/3.6
axleDistance = 1.5
dt = axleDistance/maxSpeed
# minimal distance in frames
min_dist = int(np.round(dt * s.fs))
# use peaksUtils module for detecting maxima
indexes = peakutils.indexes(y, thres = 0.05, min_dist = min_dist)
tPeaks = peakutils.interpolate(t, y, ind=indexes)
print('Minimal time interval between maxima is: ', dt,',which is equvalent to ', min_dist,' samples')
return tPeaks
def train_speed(tPeaks, axleDistance = 1.8, plot = False):
"""
Calculate average train speed and delta speed using robust regression
params:
--------
tPeaks: np.array
array of axle passby times
axleDistance: float
distance in meter between two axes in a boogie
https://de.wikipedia.org/wiki/Drehgestelltypen_%28Schweiz%29
plot: bool
visualize calculations
return:
--------
meanV: float
average speed of the train
dt: float
delta speed of the train
(t, vkmh, f, est): np.array, np.array, matplotlib.figure, statistics sm model
bogie times, bogies speeds,
if plot is 'True'
figure, fitted statistical model
"""
v_calc = lambda t1, t2: 3.6*axleDistance/abs(t1-t2)
#control if nAxes even
nAxes = len(tPeaks)
try:
assert(nAxes%2==0)
except AssertionError:
print('number of detected axle not multipe of 2 ',file= sys.stderr)
#calc speed
vkmh = np.array([v_calc(t1,t2) for t1,t2 in tPeaks.reshape(nAxes//2,2)])
#calt time
t = np.array([np.mean(t) for t in tPeaks.reshape(nAxes//2,2)])
#stats sm
#weightning
#http://statsmodels.sourceforge.net/devel/examples/
X = sm.add_constant(t)
M = sm.robust.norms.TrimmedMean(1)
est = sm.RLM(vkmh, X, M)
est = est.fit()
#calculate the predicted values
vkmh_hat = est.predict(X)
# calculate delta speed
deltaV = np.round(3.6*est.params[1]*abs(tPeaks.min()-tPeaks.max()),1)
# calculate mean speed
meanV = vkmh_hat.mean()
# output if plot = True
if plot:
print(est.summary())
# plot
f,ax = plt.subplots()
ax.scatter(t,vkmh,label='bogiespeeds')
ax.set_ybound(80,120)
# Add the mean speed
ax.axhline(vkmh_hat.mean(),alpha = 0.5, lw=2, color = 'green',\
label='average speed of estimate')
# Add the regression line, colored in red
t_prime = np.linspace(tPeaks.min(), tPeaks.max(), 100)
ax.plot(t_prime, est.predict(sm.add_constant(t_prime)), 'r', alpha=0.7,\
lw=2, label='estimate')
# legend
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0.)
return meanV, deltaV, ( t, vkmh,f, est)
else:
return meanV, deltaV, ( t, vkmh)
def level_from_octBank( octFilterBank, lType = 'leq' ):
"""
calculate level and spektrum given filtered signals
param:
-----
octFiltBank: Signal
multichannel signal where channels are the output of a filter bank
lType: str `sel` `leq`
type of integration on the signals
return:
------
spektrum: np.array
integrated filterbank
level: float
integrated spektrum levels
"""
if lType == 'sel':
spektrum = octFilterBank.sound_exposure_level().T
elif lType == 'leq':
spektrum = octFilterBank.leq().T
level = 10*np.log10((10**(spektrum/10)).sum())
return(np.round(spektrum,2) , np.round(level,2))
def cut_third_oct_spectrum(octFilterBank, tInterval , lType = 'sel'):
"""
integrate octFilterBank signals to obtain spektrum and levels. Integration intervals for intervals passed with dict tIntervals
param:
-----
octFilterBank: Signal
multichannel signal where channels are the output of a filter bank
lType: str `sel` `leq`
type of integration on the signals
tInterval:dict
dict with integration intervals
return:
------
spektrum: dict
dict containig spectrum calculations for given intervals
levels: dict
dict containig levels calculations
"""
level = {}
spektrum = {}
if not isinstance(tInterval, dict):
print('tInterval has to be a dict of tuples')
raise( TypeError())
t = octFilterBank.times()
for k,(t1,t2) in tInterval.items():
if k == 'full':
tInterval[k] = (t.min(),t.max())
mask = np.logical_and(t>t1 ,t<t2)
spektrum[k] , level[k] = level_from_octBank(octFilterBank[:,mask], lType)
return spektrum, level
class MyEncoder(json.JSONEncoder):
def default(self, obj):
"""If input object is an ndarray it will be converted into a dict
holding dtype, shape and the data, base64 encoded.
"""
if isinstance(obj, Signal):
return None
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder(self, obj)
REF_P = 2e-5
def level_from_spect(spect):
return 10*np.log10((10**(spect/10)).sum())
def level_from_LAF(LAF, dt=0.1):
return 10*np.log10((10**(LAF/10)).sum()/len(LAF))
| cc0-1.0 |
WangWenjun559/Weiss | summary/sumy/sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| apache-2.0 |
afloren/nipype | nipype/algorithms/metrics.py | 9 | 26230 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
'''
Image assessment algorithms. Typical overlap and error computation
measures to evaluate results from other processing units.
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
'''
import os
import os.path as op
import nibabel as nb
import numpy as np
from math import floor, ceil
from scipy.ndimage.morphology import grey_dilation
from scipy.ndimage.morphology import binary_erosion
from scipy.spatial.distance import cdist, euclidean, dice, jaccard
from scipy.ndimage.measurements import center_of_mass, label
from scipy.special import legendre
import scipy.io as sio
import itertools
import scipy.stats as stats
from .. import logging
from ..utils.misc import package_check
from ..interfaces.base import (BaseInterface, traits, TraitedSpec, File,
InputMultiPath, OutputMultiPath,
BaseInterfaceInputSpec, isdefined)
from ..utils.filemanip import fname_presuffix, split_filename
iflogger = logging.getLogger('interface')
class DistanceInputSpec(BaseInterfaceInputSpec):
volume1 = File(exists=True, mandatory=True,
desc="Has to have the same dimensions as volume2.")
volume2 = File(
exists=True, mandatory=True,
desc="Has to have the same dimensions as volume1."
)
method = traits.Enum(
"eucl_min", "eucl_cog", "eucl_mean", "eucl_wmean", "eucl_max",
desc='""eucl_min": Euclidean distance between two closest points\
"eucl_cog": mean Euclidian distance between the Center of Gravity\
of volume1 and CoGs of volume2\
"eucl_mean": mean Euclidian minimum distance of all volume2 voxels\
to volume1\
"eucl_wmean": mean Euclidian minimum distance of all volume2 voxels\
to volume1 weighted by their values\
"eucl_max": maximum over minimum Euclidian distances of all volume2\
voxels to volume1 (also known as the Hausdorff distance)',
usedefault=True
)
mask_volume = File(
exists=True, desc="calculate overlap only within this mask.")
class DistanceOutputSpec(TraitedSpec):
distance = traits.Float()
point1 = traits.Array(shape=(3,))
point2 = traits.Array(shape=(3,))
histogram = File()
class Distance(BaseInterface):
"""Calculates distance between two volumes.
"""
input_spec = DistanceInputSpec
output_spec = DistanceOutputSpec
_hist_filename = "hist.pdf"
def _find_border(self, data):
eroded = binary_erosion(data)
border = np.logical_and(data, np.logical_not(eroded))
return border
def _get_coordinates(self, data, affine):
if len(data.shape) == 4:
data = data[:, :, :, 0]
indices = np.vstack(np.nonzero(data))
indices = np.vstack((indices, np.ones(indices.shape[1])))
coordinates = np.dot(affine, indices)
return coordinates[:3, :]
def _eucl_min(self, nii1, nii2):
origdata1 = nii1.get_data().astype(np.bool)
border1 = self._find_border(origdata1)
origdata2 = nii2.get_data().astype(np.bool)
border2 = self._find_border(origdata2)
set1_coordinates = self._get_coordinates(border1, nii1.get_affine())
set2_coordinates = self._get_coordinates(border2, nii2.get_affine())
dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T)
(point1, point2) = np.unravel_index(
np.argmin(dist_matrix), dist_matrix.shape)
return (euclidean(set1_coordinates.T[point1, :],
set2_coordinates.T[point2, :]),
set1_coordinates.T[point1, :],
set2_coordinates.T[point2, :])
def _eucl_cog(self, nii1, nii2):
origdata1 = nii1.get_data().astype(np.bool)
cog_t = np.array(center_of_mass(origdata1)).reshape(-1, 1)
cog_t = np.vstack((cog_t, np.array([1])))
cog_t_coor = np.dot(nii1.get_affine(), cog_t)[:3, :]
origdata2 = nii2.get_data().astype(np.bool)
(labeled_data, n_labels) = label(origdata2)
cogs = np.ones((4, n_labels))
for i in range(n_labels):
cogs[:3, i] = np.array(center_of_mass(origdata2,
labeled_data, i + 1))
cogs_coor = np.dot(nii2.get_affine(), cogs)[:3, :]
dist_matrix = cdist(cog_t_coor.T, cogs_coor.T)
return np.mean(dist_matrix)
def _eucl_mean(self, nii1, nii2, weighted=False):
origdata1 = nii1.get_data().astype(np.bool)
border1 = self._find_border(origdata1)
origdata2 = nii2.get_data().astype(np.bool)
set1_coordinates = self._get_coordinates(border1, nii1.get_affine())
set2_coordinates = self._get_coordinates(origdata2, nii2.get_affine())
dist_matrix = cdist(set1_coordinates.T, set2_coordinates.T)
min_dist_matrix = np.amin(dist_matrix, axis=0)
import matplotlib.pyplot as plt
plt.figure()
plt.hist(min_dist_matrix, 50, normed=1, facecolor='green')
plt.savefig(self._hist_filename)
plt.clf()
plt.close()
if weighted:
return np.average(
min_dist_matrix,
weights=nii2.get_data()[origdata2].flat
)
else:
return np.mean(min_dist_matrix)
def _eucl_max(self, nii1, nii2):
origdata1 = nii1.get_data()
origdata1 = np.logical_not(
np.logical_or(origdata1 == 0, np.isnan(origdata1)))
origdata2 = nii2.get_data()
origdata2 = np.logical_not(
np.logical_or(origdata2 == 0, np.isnan(origdata2)))
if isdefined(self.inputs.mask_volume):
maskdata = nb.load(self.inputs.mask_volume).get_data()
maskdata = np.logical_not(
np.logical_or(maskdata == 0, np.isnan(maskdata)))
origdata1 = np.logical_and(maskdata, origdata1)
origdata2 = np.logical_and(maskdata, origdata2)
if origdata1.max() == 0 or origdata2.max() == 0:
return np.NaN
border1 = self._find_border(origdata1)
border2 = self._find_border(origdata2)
set1_coordinates = self._get_coordinates(border1, nii1.get_affine())
set2_coordinates = self._get_coordinates(border2, nii2.get_affine())
distances = cdist(set1_coordinates.T, set2_coordinates.T)
mins = np.concatenate(
(np.amin(distances, axis=0), np.amin(distances, axis=1)))
return np.max(mins)
def _run_interface(self, runtime):
nii1 = nb.load(self.inputs.volume1)
nii2 = nb.load(self.inputs.volume2)
if self.inputs.method == "eucl_min":
self._distance, self._point1, self._point2 = self._eucl_min(
nii1, nii2)
elif self.inputs.method == "eucl_cog":
self._distance = self._eucl_cog(nii1, nii2)
elif self.inputs.method == "eucl_mean":
self._distance = self._eucl_mean(nii1, nii2)
elif self.inputs.method == "eucl_wmean":
self._distance = self._eucl_mean(nii1, nii2, weighted=True)
elif self.inputs.method == "eucl_max":
self._distance = self._eucl_max(nii1, nii2)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['distance'] = self._distance
if self.inputs.method == "eucl_min":
outputs['point1'] = self._point1
outputs['point2'] = self._point2
elif self.inputs.method in ["eucl_mean", "eucl_wmean"]:
outputs['histogram'] = os.path.abspath(self._hist_filename)
return outputs
class OverlapInputSpec(BaseInterfaceInputSpec):
volume1 = File(exists=True, mandatory=True,
desc='Has to have the same dimensions as volume2.')
volume2 = File(exists=True, mandatory=True,
desc='Has to have the same dimensions as volume1.')
mask_volume = File(exists=True,
desc='calculate overlap only within this mask.')
bg_overlap = traits.Bool(False, usedefault=True, mandatory=True,
desc='consider zeros as a label')
out_file = File('diff.nii', usedefault=True)
weighting = traits.Enum('none', 'volume', 'squared_vol', usedefault=True,
desc=('\'none\': no class-overlap weighting is '
'performed. \'volume\': computed class-'
'overlaps are weighted by class volume '
'\'squared_vol\': computed class-overlaps '
'are weighted by the squared volume of '
'the class'))
vol_units = traits.Enum('voxel', 'mm', mandatory=True, usedefault=True,
desc='units for volumes')
class OverlapOutputSpec(TraitedSpec):
jaccard = traits.Float(desc='averaged jaccard index')
dice = traits.Float(desc='averaged dice index')
roi_ji = traits.List(traits.Float(),
desc=('the Jaccard index (JI) per ROI'))
roi_di = traits.List(traits.Float(), desc=('the Dice index (DI) per ROI'))
volume_difference = traits.Float(desc=('averaged volume difference'))
roi_voldiff = traits.List(traits.Float(),
desc=('volume differences of ROIs'))
labels = traits.List(traits.Int(),
desc=('detected labels'))
diff_file = File(exists=True,
desc='error map of differences')
class Overlap(BaseInterface):
"""
Calculates Dice and Jaccard's overlap measures between two ROI maps.
The interface is backwards compatible with the former version in
which only binary files were accepted.
The averaged values of overlap indices can be weighted. Volumes
now can be reported in :math:`mm^3`, although they are given in voxels
to keep backwards compatibility.
Example
-------
>>> overlap = Overlap()
>>> overlap.inputs.volume1 = 'cont1.nii'
>>> overlap.inputs.volume2 = 'cont2.nii'
>>> res = overlap.run() # doctest: +SKIP
"""
input_spec = OverlapInputSpec
output_spec = OverlapOutputSpec
def _bool_vec_dissimilarity(self, booldata1, booldata2, method):
methods = {'dice': dice, 'jaccard': jaccard}
if not (np.any(booldata1) or np.any(booldata2)):
return 0
return 1 - methods[method](booldata1.flat, booldata2.flat)
def _run_interface(self, runtime):
nii1 = nb.load(self.inputs.volume1)
nii2 = nb.load(self.inputs.volume2)
scale = 1.0
if self.inputs.vol_units == 'mm':
voxvol = nii1.get_header().get_zooms()
for i in xrange(nii1.get_data().ndim-1):
scale = scale * voxvol[i]
data1 = nii1.get_data()
data1[np.logical_or(data1 < 0, np.isnan(data1))] = 0
max1 = int(data1.max())
data1 = data1.astype(np.min_scalar_type(max1))
data2 = nii2.get_data().astype(np.min_scalar_type(max1))
data2[np.logical_or(data1 < 0, np.isnan(data1))] = 0
max2 = data2.max()
maxlabel = max(max1, max2)
if isdefined(self.inputs.mask_volume):
maskdata = nb.load(self.inputs.mask_volume).get_data()
maskdata = ~np.logical_or(maskdata == 0, np.isnan(maskdata))
data1[~maskdata] = 0
data2[~maskdata] = 0
res = []
volumes1 = []
volumes2 = []
labels = np.unique(data1[data1 > 0].reshape(-1)).tolist()
if self.inputs.bg_overlap:
labels.insert(0, 0)
for l in labels:
res.append(self._bool_vec_dissimilarity(data1 == l,
data2 == l, method='jaccard'))
volumes1.append(scale * len(data1[data1 == l]))
volumes2.append(scale * len(data2[data2 == l]))
results = dict(jaccard=[], dice=[])
results['jaccard'] = np.array(res)
results['dice'] = 2.0*results['jaccard'] / (results['jaccard'] + 1.0)
weights = np.ones((len(volumes1),), dtype=np.float32)
if self.inputs.weighting != 'none':
weights = weights / np.array(volumes1)
if self.inputs.weighting == 'squared_vol':
weights = weights**2
weights = weights / np.sum(weights)
both_data = np.zeros(data1.shape)
both_data[(data1 - data2) != 0] = 1
nb.save(nb.Nifti1Image(both_data, nii1.get_affine(),
nii1.get_header()), self.inputs.out_file)
self._labels = labels
self._ove_rois = results
self._vol_rois = ((np.array(volumes1) - np.array(volumes2)) /
np.array(volumes1))
self._dice = round(np.sum(weights*results['dice']), 5)
self._jaccard = round(np.sum(weights*results['jaccard']), 5)
self._volume = np.sum(weights*self._vol_rois)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['labels'] = self._labels
outputs['jaccard'] = self._jaccard
outputs['dice'] = self._dice
outputs['volume_difference'] = self._volume
outputs['roi_ji'] = self._ove_rois['jaccard'].tolist()
outputs['roi_di'] = self._ove_rois['dice'].tolist()
outputs['roi_voldiff'] = self._vol_rois.tolist()
outputs['diff_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class FuzzyOverlapInputSpec(BaseInterfaceInputSpec):
in_ref = InputMultiPath( File(exists=True), mandatory=True,
desc='Reference image. Requires the same dimensions as in_tst.')
in_tst = InputMultiPath( File(exists=True), mandatory=True,
desc='Test image. Requires the same dimensions as in_ref.')
weighting = traits.Enum('none', 'volume', 'squared_vol', usedefault=True,
desc=('\'none\': no class-overlap weighting is '
'performed. \'volume\': computed class-'
'overlaps are weighted by class volume '
'\'squared_vol\': computed class-overlaps '
'are weighted by the squared volume of '
'the class'))
out_file = File('diff.nii', desc='alternative name for resulting difference-map', usedefault=True)
class FuzzyOverlapOutputSpec(TraitedSpec):
jaccard = traits.Float( desc='Fuzzy Jaccard Index (fJI), all the classes' )
dice = traits.Float( desc='Fuzzy Dice Index (fDI), all the classes' )
diff_file = File(exists=True, desc='resulting difference-map of all classes, using the chosen weighting' )
class_fji = traits.List( traits.Float(), desc='Array containing the fJIs of each computed class' )
class_fdi = traits.List( traits.Float(), desc='Array containing the fDIs of each computed class' )
class FuzzyOverlap(BaseInterface):
"""Calculates various overlap measures between two maps, using the fuzzy
definition proposed in: Crum et al., Generalized Overlap Measures for
Evaluation and Validation in Medical Image Analysis, IEEE Trans. Med.
Ima. 25(11),pp 1451-1461, Nov. 2006.
in_ref and in_tst are lists of 2/3D images, each element on the list
containing one volume fraction map of a class in a fuzzy partition
of the domain.
Example
-------
>>> overlap = FuzzyOverlap()
>>> overlap.inputs.in_ref = [ 'ref_class0.nii', 'ref_class1.nii' ]
>>> overlap.inputs.in_tst = [ 'tst_class0.nii', 'tst_class1.nii' ]
>>> overlap.inputs.weighting = 'volume'
>>> res = overlap.run() # doctest: +SKIP
"""
input_spec = FuzzyOverlapInputSpec
output_spec = FuzzyOverlapOutputSpec
def _run_interface(self, runtime):
ncomp = len(self.inputs.in_ref)
assert( ncomp == len(self.inputs.in_tst) )
weights = np.ones( shape=ncomp )
img_ref = np.array( [ nb.load( fname ).get_data() for fname in self.inputs.in_ref ] )
img_tst = np.array( [ nb.load( fname ).get_data() for fname in self.inputs.in_tst ] )
msk = np.sum(img_ref, axis=0)
msk[msk>0] = 1.0
tst_msk = np.sum(img_tst, axis=0)
tst_msk[tst_msk>0] = 1.0
#check that volumes are normalized
#img_ref[:][msk>0] = img_ref[:][msk>0] / (np.sum( img_ref, axis=0 ))[msk>0]
#img_tst[tst_msk>0] = img_tst[tst_msk>0] / np.sum( img_tst, axis=0 )[tst_msk>0]
self._jaccards = []
volumes = []
diff_im = np.zeros( img_ref.shape )
for ref_comp, tst_comp, diff_comp in zip( img_ref, img_tst, diff_im ):
num = np.minimum( ref_comp, tst_comp )
ddr = np.maximum( ref_comp, tst_comp )
diff_comp[ddr>0]+= 1.0-(num[ddr>0]/ddr[ddr>0])
self._jaccards.append( np.sum( num ) / np.sum( ddr ) )
volumes.append( np.sum( ref_comp ) )
self._dices = 2.0*np.array(self._jaccards) / (np.array(self._jaccards) +1.0 )
if self.inputs.weighting != "none":
weights = 1.0 / np.array(volumes)
if self.inputs.weighting == "squared_vol":
weights = weights**2
weights = weights / np.sum( weights )
setattr( self, '_jaccard', np.sum( weights * self._jaccards ) )
setattr( self, '_dice', np.sum( weights * self._dices ) )
diff = np.zeros( diff_im[0].shape )
for w,ch in zip(weights,diff_im):
ch[msk==0] = 0
diff+= w* ch
nb.save(nb.Nifti1Image(diff, nb.load( self.inputs.in_ref[0]).get_affine(),
nb.load(self.inputs.in_ref[0]).get_header()), self.inputs.out_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
for method in ("dice", "jaccard"):
outputs[method] = getattr(self, '_' + method)
#outputs['volume_difference'] = self._volume
outputs['diff_file'] = os.path.abspath(self.inputs.out_file)
outputs['class_fji'] = np.array(self._jaccards).astype(float).tolist();
outputs['class_fdi']= self._dices.astype(float).tolist();
return outputs
class ErrorMapInputSpec(BaseInterfaceInputSpec):
in_ref = File(exists=True, mandatory=True,
desc="Reference image. Requires the same dimensions as in_tst.")
in_tst = File(exists=True, mandatory=True,
desc="Test image. Requires the same dimensions as in_ref.")
mask = File(exists=True, desc="calculate overlap only within this mask.")
metric = traits.Enum("sqeuclidean", "euclidean",
desc='error map metric (as implemented in scipy cdist)',
usedefault=True, mandatory=True)
out_map = File(desc="Name for the output file")
class ErrorMapOutputSpec(TraitedSpec):
out_map = File(exists=True, desc="resulting error map")
distance = traits.Float(desc="Average distance between volume 1 and 2")
class ErrorMap(BaseInterface):
""" Calculates the error (distance) map between two input volumes.
Example
-------
>>> errormap = ErrorMap()
>>> errormap.inputs.in_ref = 'cont1.nii'
>>> errormap.inputs.in_tst = 'cont2.nii'
>>> res = errormap.run() # doctest: +SKIP
"""
input_spec = ErrorMapInputSpec
output_spec = ErrorMapOutputSpec
_out_file = ''
def _run_interface(self, runtime):
# Get two numpy data matrices
nii_ref = nb.load(self.inputs.in_ref)
ref_data = np.squeeze(nii_ref.get_data())
tst_data = np.squeeze(nb.load(self.inputs.in_tst).get_data())
assert(ref_data.ndim == tst_data.ndim)
# Load mask
comps = 1
mapshape = ref_data.shape
if (ref_data.ndim == 4):
comps = ref_data.shape[-1]
mapshape = ref_data.shape[:-1]
if isdefined(self.inputs.mask):
msk = nb.load( self.inputs.mask ).get_data()
if (mapshape != msk.shape):
raise RuntimeError("Mask should match volume shape, \
mask is %s and volumes are %s" %
(list(msk.shape), list(mapshape)))
else:
msk = np.ones(shape=mapshape)
# Flatten both volumes and make the pixel differennce
mskvector = msk.reshape(-1)
msk_idxs = np.where(mskvector==1)
refvector = ref_data.reshape(-1,comps)[msk_idxs].astype(np.float32)
tstvector = tst_data.reshape(-1,comps)[msk_idxs].astype(np.float32)
diffvector = (refvector-tstvector)
# Scale the difference
if self.inputs.metric == 'sqeuclidean':
errvector = diffvector**2
if (comps > 1):
errvector = np.sum(errvector, axis=1)
else:
errvector = np.squeeze(errvector)
elif self.inputs.metric == 'euclidean':
errvector = np.linalg.norm(diffvector, axis=1)
errvectorexp = np.zeros_like(mskvector, dtype=np.float32) # The default type is uint8
errvectorexp[msk_idxs] = errvector
# Get averaged error
self._distance = np.average(errvector) # Only average the masked voxels
errmap = errvectorexp.reshape(mapshape)
hdr = nii_ref.get_header().copy()
hdr.set_data_dtype(np.float32)
hdr['data_type'] = 16
hdr.set_data_shape(mapshape)
if not isdefined(self.inputs.out_map):
fname,ext = op.splitext(op.basename(self.inputs.in_tst))
if ext=='.gz':
fname,ext2 = op.splitext(fname)
ext = ext2 + ext
self._out_file = op.abspath(fname + "_errmap" + ext)
else:
self._out_file = self.inputs.out_map
nb.Nifti1Image(errmap.astype(np.float32), nii_ref.get_affine(),
hdr).to_filename(self._out_file)
return runtime
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_map'] = self._out_file
outputs['distance'] = self._distance
return outputs
class SimilarityInputSpec(BaseInterfaceInputSpec):
volume1 = File(exists=True, desc="3D/4D volume", mandatory=True)
volume2 = File(exists=True, desc="3D/4D volume", mandatory=True)
mask1 = File(exists=True, desc="3D volume")
mask2 = File(exists=True, desc="3D volume")
metric = traits.Either(traits.Enum('cc', 'cr', 'crl1', 'mi', 'nmi', 'slr'),
traits.Callable(),
desc="""str or callable
Cost-function for assessing image similarity. If a string,
one of 'cc': correlation coefficient, 'cr': correlation
ratio, 'crl1': L1-norm based correlation ratio, 'mi': mutual
information, 'nmi': normalized mutual information, 'slr':
supervised log-likelihood ratio. If a callable, it should
take a two-dimensional array representing the image joint
histogram as an input and return a float.""", usedefault=True)
class SimilarityOutputSpec(TraitedSpec):
similarity = traits.List( traits.Float(desc="Similarity between volume 1 and 2, frame by frame"))
class Similarity(BaseInterface):
"""Calculates similarity between two 3D or 4D volumes. Both volumes have to be in
the same coordinate system, same space within that coordinate system and
with the same voxel dimensions.
.. note:: This interface is an extension of
:py:class:`nipype.interfaces.nipy.utils.Similarity` to support 4D files.
Requires :py:mod:`nipy`
Example
-------
>>> from nipype.algorithms.metrics import Similarity
>>> similarity = Similarity()
>>> similarity.inputs.volume1 = 'rc1s1.nii'
>>> similarity.inputs.volume2 = 'rc1s2.nii'
>>> similarity.inputs.mask1 = 'mask.nii'
>>> similarity.inputs.mask2 = 'mask.nii'
>>> similarity.inputs.metric = 'cr'
>>> res = similarity.run() # doctest: +SKIP
"""
input_spec = SimilarityInputSpec
output_spec = SimilarityOutputSpec
_have_nipy = True
def __init__(self, **inputs):
try:
package_check('nipy')
except Exception, e:
self._have_nipy = False
super(Similarity,self).__init__(**inputs)
def _run_interface(self, runtime):
if not self._have_nipy:
raise RuntimeError('nipy is not installed')
from nipy.algorithms.registration.histogram_registration import HistogramRegistration
from nipy.algorithms.registration.affine import Affine
vol1_nii = nb.load(self.inputs.volume1)
vol2_nii = nb.load(self.inputs.volume2)
dims = vol1_nii.get_data().ndim
if dims==3 or dims==2:
vols1 = [ vol1_nii ]
vols2 = [ vol2_nii ]
if dims==4:
vols1 = nb.four_to_three( vol1_nii )
vols2 = nb.four_to_three( vol2_nii )
if dims<2 or dims>4:
raise RuntimeError( 'Image dimensions not supported (detected %dD file)' % dims )
if isdefined(self.inputs.mask1):
mask1 = nb.load(self.inputs.mask1).get_data() == 1
else:
mask1 = None
if isdefined(self.inputs.mask2):
mask2 = nb.load(self.inputs.mask2).get_data() == 1
else:
mask2 = None
self._similarity = []
for ts1,ts2 in zip( vols1, vols2 ):
histreg = HistogramRegistration(from_img = ts1,
to_img = ts2,
similarity=self.inputs.metric,
from_mask = mask1,
to_mask = mask2)
self._similarity.append( histreg.eval(Affine()) )
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['similarity'] = self._similarity
return outputs
| bsd-3-clause |
moutai/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
ch3ll0v3k/scikit-learn | sklearn/metrics/tests/test_classification.py | 28 | 53546 | from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, [(), ()]), 0)
assert_equal(accuracy_score(y1, y2, normalize=False), 1)
assert_equal(accuracy_score(y1, y1, normalize=False), 2)
assert_equal(accuracy_score(y2, y2, normalize=False), 2)
assert_equal(accuracy_score(y2, [(), ()], normalize=False), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
@ignore_warnings
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
@ignore_warnings
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
@ignore_warnings # sequence of sequences is deprecated
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
make_ml = make_multilabel_classification
_, y_true_ll = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y_pred_ll = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
lb = MultiLabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, [(), ()]), 1)
assert_equal(zero_one_loss(y2, [tuple(), (10, )]), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, [(), ()]), 0.75)
assert_equal(hamming_loss(y1, [tuple(), (10, )]), 0.625)
assert_almost_equal(hamming_loss(y2, [tuple(), (10, )],
classes=np.arange(11)), 0.1818, 2)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
with ignore_warnings(): # sequence of sequences is deprecated
# List of tuple of label
y1 = [(1, 2,), (0, 2,)]
y2 = [(2,), (0, 2,)]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, [(), ()]), 0)
# |y3 inter y4 | = [0, 1, 1]
# |y3 union y4 | = [2, 1, 3]
y3 = [(0,), (1,), (3,)]
y4 = [(4,), (4,), (5, 6)]
assert_almost_equal(jaccard_similarity_score(y3, y4), 0)
# |y5 inter y6 | = [0, 1, 1]
# |y5 union y6 | = [2, 1, 3]
y5 = [(0,), (1,), (2, 3)]
y6 = [(1,), (1,), (2, 0)]
assert_almost_equal(jaccard_similarity_score(y5, y6), (1 + 1 / 3) / 3)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true_ll = [(0,), (1,), (2, 3)]
y_pred_ll = [(1,), (1,), (2, 0)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
#tp = [0, 1, 1, 0]
#fn = [1, 0, 0, 1]
#fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weigted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check weigted
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true_ll = [(1,), (2,), (2, 3)]
y_pred_ll = [(4,), (4,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(1, 5)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check weigted
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
@ignore_warnings
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true_ll = [(1,), (0,), (2, 1,)]
y_pred_ll = [tuple(), (3,), (2, 1)]
lb = LabelBinarizer()
lb.fit([range(4)])
y_true_bi = lb.transform(y_true_ll)
y_pred_bi = lb.transform(y_pred_ll)
for y_true, y_pred in [(y_true_ll, y_pred_ll), (y_true_bi, y_pred_bi)]:
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
@ignore_warnings # sequence of sequences is deprecated
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
SEQ = 'multilabel-sequences'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(SEQ, [[2, 3], [1], [3]]),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(SEQ, SEQ): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(IND, SEQ): None,
(MC, SEQ): None,
(BIN, SEQ): None,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(SEQ, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(SEQ, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(SEQ, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, SEQ, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
| bsd-3-clause |
CforED/Machine-Learning | sklearn/neighbors/approximate.py | 30 | 22370 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X, y=None):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=8,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimenstion as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, opitonal (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
madjelan/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
nelango/ViralityAnalysis | model/lib/pandas/io/pytables.py | 9 | 156275 | """
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
# pylint: disable-msg=E1101,W0613,W0603
from datetime import datetime, date
import time
import re
import copy
import itertools
import warnings
import os
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, Panel, Panel4D, Index,
MultiIndex, Int64Index, Timestamp)
from pandas.sparse.api import SparseSeries, SparseDataFrame, SparsePanel
from pandas.sparse.array import BlockIndex, IntIndex
from pandas.tseries.api import PeriodIndex, DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
from pandas.core.base import StringMixin
from pandas.core.common import adjoin, pprint_thing
from pandas.core.algorithms import match, unique
from pandas.core.categorical import Categorical
from pandas.core.common import _asarray_tuplesafe
from pandas.core.internals import (BlockManager, make_block, _block2d_to_blocknd,
_factor_indexer, _block_shape)
from pandas.core.index import _ensure_index
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
import pandas.core.common as com
from pandas.tools.merge import concat
from pandas import compat
from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter
from pandas.io.common import PerformanceWarning
from pandas.core.config import get_option
from pandas.computation.pytables import Expr, maybe_expression
import pandas.lib as lib
import pandas.algos as algos
import pandas.tslib as tslib
from contextlib import contextmanager
from distutils.version import LooseVersion
# versioning attribute
_version = '0.15.2'
### encoding ###
# PY3 encoding if we don't specify
_default_encoding = 'UTF-8'
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
if PY3:
encoding = _default_encoding
return encoding
Term = Expr
def _ensure_term(where, scope_level):
"""
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automaticaly a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
wlist = []
for w in filter(lambda x: x is not None, where):
if not maybe_expression(w):
wlist.append(w)
else:
wlist.append(Term(w, scope_level=level))
where = wlist
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {
u('f'): 'fixed',
u('fixed'): 'fixed',
u('t'): 'table',
u('table'): 'table',
}
format_deprecate_doc = """
the table keyword has been deprecated
use the format='fixed(f)|table(t)' keyword instead
fixed(f) : specifies the Fixed format
and is the default for put operations
table(t) : specifies the Table format
and is the default for append operations
"""
# map object types
_TYPE_MAP = {
Series: u('series'),
SparseSeries: u('sparse_series'),
pd.TimeSeries: u('series'),
DataFrame: u('frame'),
SparseDataFrame: u('sparse_frame'),
Panel: u('wide'),
Panel4D: u('ndim'),
SparsePanel: u('sparse_panel')
}
# storer class map
_STORER_MAP = {
u('TimeSeries'): 'LegacySeriesFixed',
u('Series'): 'LegacySeriesFixed',
u('DataFrame'): 'LegacyFrameFixed',
u('DataMatrix'): 'LegacyFrameFixed',
u('series'): 'SeriesFixed',
u('sparse_series'): 'SparseSeriesFixed',
u('frame'): 'FrameFixed',
u('sparse_frame'): 'SparseFrameFixed',
u('wide'): 'PanelFixed',
u('sparse_panel'): 'SparsePanelFixed',
}
# table class map
_TABLE_MAP = {
u('generic_table'): 'GenericTable',
u('appendable_series'): 'AppendableSeriesTable',
u('appendable_multiseries'): 'AppendableMultiSeriesTable',
u('appendable_frame'): 'AppendableFrameTable',
u('appendable_multiframe'): 'AppendableMultiFrameTable',
u('appendable_panel'): 'AppendablePanelTable',
u('appendable_ndim'): 'AppendableNDimTable',
u('worm'): 'WORMTable',
u('legacy_frame'): 'LegacyFrameTable',
u('legacy_panel'): 'LegacyPanelTable',
}
# axes map
_AXES_MAP = {
DataFrame: [0],
Panel: [1, 2],
Panel4D: [1, 2, 3],
}
# register our configuration options
from pandas.core import config
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix('io.hdf'):
config.register_option('dropna_table', False, dropna_doc,
validator=config.is_bool)
config.register_option(
'default_format', None, format_doc,
validator=config.is_one_of_factory(['fixed', 'table', None])
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# version requirements
if LooseVersion(tables.__version__) < '3.0.0':
raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
try:
_table_file_open_policy_is_strict = tables.file._FILE_OPEN_POLICY == 'strict'
except:
pass
return _table_mod
# interface to/from ###
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
append=None, **kwargs):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(key, value, **kwargs)
else:
f = lambda store: store.put(key, value, **kwargs)
if isinstance(path_or_buf, string_types):
with HDFStore(path_or_buf, mode=mode, complevel=complevel,
complib=complib) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(path_or_buf, key=None, **kwargs):
""" read from the store, close it if we opened it
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : path (string), or buffer to read from
key : group identifier in the store. Can be omitted a HDF file contains
a single pandas object.
where : list of Term (or convertable) objects, optional
start : optional, integer (defaults to None), row number to start
selection
stop : optional, integer (defaults to None), row number to stop
selection
columns : optional, a list of columns that if not None, will limit the
return columns
iterator : optional, boolean, return an iterator, default False
chunksize : optional, nrows to include in iteration, return an iterator
Returns
-------
The selected object
"""
# grab the scope
if 'where' in kwargs:
kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)
if isinstance(path_or_buf, string_types):
try:
exists = os.path.exists(path_or_buf)
#if filepath is too long
except (TypeError,ValueError):
exists = False
if not exists:
raise IOError('File %s does not exist' % path_or_buf)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
store = HDFStore(path_or_buf, **kwargs)
auto_close = True
elif isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise IOError('The HDFStore must be open for reading.')
store = path_or_buf
auto_close = False
else:
raise NotImplementedError('Support for generic buffers has not been '
'implemented.')
try:
if key is None:
keys = store.keys()
if len(keys) != 1:
raise ValueError('key must be provided when HDF file contains '
'multiple datasets.')
key = keys[0]
return store.select(key, auto_close=auto_close, **kwargs)
except:
# if there is an error, close the store
try:
store.close()
except:
pass
raise
class HDFStore(StringMixin):
"""
dict-like IO interface for storing pandas objects in PyTables
either Fixed or Table format.
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 1-9, default 0
If a complib is specified compression will be applied
where possible
complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel is > 0 apply compression to objects written
in the store wherever possible
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> from pandas import DataFrame
>>> from numpy.random import randn
>>> bar = DataFrame(randn(10, 4))
>>> store = HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
def __init__(self, path, mode=None, complevel=None, complib=None,
fletcher32=False, **kwargs):
try:
import tables
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex}" problem importing'.format(ex=str(ex)))
if complib not in (None, 'blosc', 'bzip2', 'lzo', 'zlib'):
raise ValueError("complib only supports 'blosc', 'bzip2', lzo' "
"or 'zlib' compression.")
self._path = path
if mode is None:
mode = 'a'
self._mode = mode
self._handle = None
self._complevel = complevel
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
@property
def root(self):
""" return the root node """
self._check_if_open()
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __delitem__(self, key):
return self.remove(key)
def __getattr__(self, name):
""" allow attribute access to get stores """
self._check_if_open()
try:
return self.get(name)
except:
pass
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __contains__(self, key):
""" check for existance of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self):
return len(self.groups())
def __unicode__(self):
output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path))
if self.is_open:
lkeys = sorted(list(self.keys()))
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(
pprint_thing(s or 'invalid_HDFStore node'))
except Exception as detail:
keys.append(k)
values.append("[invalid_HDFStore node: %s]"
% pprint_thing(detail))
output += adjoin(12, keys, values)
else:
output += 'Empty'
else:
output += "File is CLOSED"
return output
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self):
"""
Return a (potentially unordered) list of the keys corresponding to the
objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
have the leading '/'
"""
return [n._v_pathname for n in self.groups()]
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode='a', **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ['a', 'w'] and mode in ['r', 'r+']:
pass
elif mode in ['w']:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
"Re-opening the file [{0}] with mode [{1}] "
"will delete the current file!"
.format(self._path, self._mode)
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complib is not None:
if self._complevel is None:
self._complevel = 9
self._filters = _tables().Filters(self._complevel,
self._complib,
fletcher32=self._fletcher32)
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
except (IOError) as e: # pragma: no cover
if 'can not be written' in str(e):
print('Opening %s in read-only mode' % self._path)
self._handle = tables.open_file(self._path, 'r', **kwargs)
else:
raise
except (ValueError) as e:
# trap PyTables >= 3.1 FILE_OPEN_POLICY exception
# to provide an updated message
if 'FILE_OPEN_POLICY' in str(e):
e = ValueError("PyTables [{version}] no longer supports opening multiple files\n"
"even in read-only mode on this HDF5 version [{hdf_version}]. You can accept this\n"
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 which allows\n"
"files to be opened multiple times at once\n".format(version=tables.__version__,
hdf_version=tables.get_hdf5_version()))
raise e
except (Exception) as e:
# trying to read from a non-existant file causes an error which
# is not part of IOError, make it one
if self._mode == 'r' and 'Unable to open/create file' in str(e):
raise IOError(str(e))
raise
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self):
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync=False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
try:
os.fsync(self._handle.fileno())
except:
pass
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : type of object stored in file
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
return self._read_group(group)
def select(self, key, where=None, start=None, stop=None, columns=None,
iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop,
where=_where,
columns=columns, **kwargs)
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=s.nrows, start=start,
stop=stop, iterator=iterator, chunksize=chunksize,
auto_close=auto_close)
return it.get_result()
def select_as_coordinates(
self, key, where=None, start=None, stop=None, **kwargs):
"""
return the selection as an Index
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
return self.get_storer(key).read_coordinates(where=where, start=start,
stop=stop, **kwargs)
def select_column(self, key, column, **kwargs):
"""
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : object
column: the column of interest
Exceptions
----------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
return self.get_storer(key).read_column(column=column, **kwargs)
def select_as_multiple(self, keys, where=None, selector=None, columns=None,
start=None, stop=None, iterator=False,
chunksize=None, auto_close=False, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
Exceptions
----------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, string_types):
return self.select(key=keys, where=where, columns=columns,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, **kwargs)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s,selector)], zip(tbls, keys)):
if t is None:
raise KeyError("Invalid table [%s]" % k)
if not t.is_table:
raise TypeError(
"object [%s] is not a table, and cannot be used in all "
"select as multiple" % t.pathname
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError(
"all tables must have exactly the same nrows!")
# axis is the concentation axes
axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of coordinates here
objs = [t.read(where=_where, columns=columns, **kwargs) for t in tbls]
# concat and return
return concat(objs, axis=axis,
verify_integrity=False).consolidate()
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=nrows, start=start,
stop=stop, iterator=iterator, chunksize=chunksize,
auto_close=auto_close)
return it.get_result(coordinates=True)
def put(self, key, value, format=None, append=False, **kwargs):
"""
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
This will force Table format, append the input data to the
existing.
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
"""
if format is None:
format = get_option("io.hdf.default_format") or 'fixed'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, **kwargs)
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
if s is None:
raise KeyError('No object named %s in the file' % key)
# remove the node
if where is None and start is None and stop is None:
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop)
def append(self, key, value, format=None, append=True, columns=None,
dropna=None, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame, Panel, Panel4D}
format: 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default True, append the input data to the
existing
data_columns : list of columns to create as data columns, or True to
use all columns
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError("columns is not a supported keyword in append, "
"try data_columns")
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or 'table'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, dropna=dropna,
**kwargs)
def append_to_multiple(self, d, value, selector, data_columns=None,
axes=None, dropna=False, **kwargs):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead")
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how='all').index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.ix[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex_axis(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs)
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
Paramaters
----------
key : object (the node to index)
Exceptions
----------
raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not s.is_table:
raise TypeError(
"cannot create table index on a Fixed format store")
s.create_index(**kwargs)
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g for g in self._handle.walk_nodes()
if (getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
g._v_name != u('table')))
]
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
try:
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.root, key)
except:
return None
def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
return None
s = self._create_storer(group)
s.infer_axes()
return s
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,
complevel=None, fletcher32=False, overwrite=True):
""" copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) existing nodes in the
new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file,
mode=mode,
complib=complib,
complevel=complevel,
fletcher32=fletcher32)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if s.is_table:
index = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k, data, index=index,
data_columns=getattr(s, 'data_columns', None),
encoding=s.encoding
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
# private methods ######
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError("{0} file is not open!".format(self._path))
def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except:
raise TypeError("invalid HDFStore format specified [{0}]"
.format(format))
return kwargs
def _create_storer(self, group, format=None, value=None, append=False,
**kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
"cannot properly create the storer for: [%s] [group->%s,"
"value->%s,format->%s,append->%s,kwargs->%s]"
% (t, group, type(value), format, append, kwargs)
)
pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))
tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
if (getattr(group, 'table', None) or
isinstance(group, _table_mod.table.Table)):
pt = u('frame_table')
tt = u('generic_table')
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed")
else:
try:
pt = _TYPE_MAP[type(value)]
except:
error('_TYPE_MAP')
# we are actually a table
if format == 'table':
pt += u('_table')
# a storer node
if u('table') not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except:
error('_STORER_MAP')
# existing node (and must be a table)
if tt is None:
# if we are a writer, determin the tt
if value is not None:
if pt == u('series_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_series')
elif index.nlevels > 1:
tt = u('appendable_multiseries')
elif pt == u('frame_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_frame')
elif index.nlevels > 1:
tt = u('appendable_multiframe')
elif pt == u('wide_table'):
tt = u('appendable_panel')
elif pt == u('ndim_table'):
tt = u('appendable_ndim')
else:
# distiguish between a frame/table
tt = u('legacy_panel')
try:
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == u('value'):
tt = u('legacy_frame')
except:
pass
try:
return globals()[_TABLE_MAP[tt]](self, group, **kwargs)
except:
error('_TABLE_MAP')
def _write_to_group(self, key, value, format, index=True, append=False,
complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if are object is 0-len
# as there are not dtypes
if getattr(value, 'empty', None) and (format == 'table' or append):
return
if group is None:
paths = key.split('/')
# recursively create the groups
path = '/'
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith('/'):
new_path += '/'
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, append=append,
encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if (not s.is_table or
(s.is_table and format == 'fixed' and s.is_exists)):
raise ValueError('Can only append to Tables')
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError(
'Compression not supported on Fixed format stores'
)
# write the object
s.write(obj=value, append=append, complib=complib, **kwargs)
if s.is_table and index:
s.create_index(columns=index)
def _read_group(self, group, **kwargs):
s = self._create_storer(group)
s.infer_axes()
return s.read(**kwargs)
def get_store(path, **kwargs):
""" Backwards compatible alias for ``HDFStore``
"""
return HDFStore(path, **kwargs)
class TableIterator(object):
""" define the iteration interface on a table
Parameters
----------
store : the reference store
s : the refered storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : boolean, whether to use the default iterator
chunksize : the passed chunking value (default is 50000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
kwargs : the passed kwargs
"""
def __init__(self, store, s, func, where, nrows, start=None, stop=None,
iterator=False, chunksize=None, auto_close=False):
self.store = store
self.s = s
self.func = func
self.where = where
self.nrows = nrows or 0
self.start = start or 0
if stop is None:
stop = self.nrows
self.stop = min(self.nrows, stop)
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates=False):
# return the actual iterator
if self.chunksize is not None:
if not self.s.is_table:
raise TypeError(
"can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
where = self.s.read_coordinates(where=self.where)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol(StringMixin):
""" an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ['freq', 'tz', 'index_name']
def __init__(self, values=None, kind=None, typ=None, cname=None,
itemsize=None, name=None, axis=None, kind_attr=None,
pos=None, freq=None, tz=None, index_name=None, **kwargs):
self.values = values
self.kind = kind
self.typ = typ
self.itemsize = itemsize
self.name = name
self.cname = cname
self.kind_attr = kind_attr
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.table = None
self.meta = None
self.metadata = None
if name is not None:
self.set_name(name, kind_attr)
if pos is not None:
self.set_pos(pos)
def set_name(self, name, kind_attr=None):
""" set the name of this indexer """
self.name = name
self.kind_attr = kind_attr or "%s_kind" % name
if self.cname is None:
self.cname = name
return self
def set_axis(self, axis):
""" set the axis over which I index """
self.axis = axis
return self
def set_pos(self, pos):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
return self
def set_table(self, table):
self.table = table
return self
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.axis,
self.pos,
self.kind)))
return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'axis', 'pos']])
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
except:
False
def copy(self):
new_self = copy.copy(self)
return new_self
def infer(self, handler):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
new_self.read_metadata(handler)
return new_self
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
try:
values = values[self.cname]
except:
pass
values = _maybe_convert(values, self.kind, encoding)
kwargs = dict()
if self.freq is not None:
kwargs['freq'] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs['name'] = _ensure_decoded(self.index_name)
try:
self.values = Index(values, **kwargs)
except:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if 'freq' in kwargs:
kwargs['freq'] = None
self.values = Index(values, **kwargs)
self.values = _set_tz(self.values, self.tz)
return self
def take_data(self):
""" return the values & release the memory """
self.values, values = None, self.values
return values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an interger or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == u('string'):
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables(
).StringCol(itemsize=min_itemsize, pos=self.pos)
def validate(self, handler, append, **kwargs):
self.validate_names()
def validate_names(self):
pass
def validate_and_set(self, handler, append, **kwargs):
self.set_table(handler.table)
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == u('string'):
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [%s] in [%s] "
"column but\nthis column has a limit of [%s]!\n"
"Consider using min_itemsize to preset the sizes on "
"these columns" % (itemsize, self.cname, c.itemsize))
return c.itemsize
return None
def validate_attr(self, append):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError("incompatible kind in col [%s - %s]" %
(existing_kind, self.kind))
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
value = getattr(self, key, None)
idx = _get_info(info, self.name)
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ['freq', 'index_name']:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
"invalid info for [%s] for [%s], existing_value [%s] "
"conflicts with new value [%s]"
% (self.name, key, existing_value, value))
else:
if value is not None or existing_value is not None:
idx[key] = value
return self
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def get_attr(self):
""" set the kind for this colummn """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
""" set the kind for this colummn """
setattr(self.attrs, self.kind_attr, self.kind)
def read_metadata(self, handler):
""" retrieve the metadata for this columns """
self.metadata = handler.read_metadata(self.cname)
def validate_metadata(self, handler):
""" validate that kind=category does not change the categories """
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if new_metadata is not None and cur_metadata is not None \
and not com.array_equivalent(new_metadata, cur_metadata):
raise ValueError("cannot append a categorical with different categories"
" to the existing")
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname,self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self):
return False
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
return self
def get_attr(self):
pass
def set_attr(self):
pass
class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ['tz','ordered']
@classmethod
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_%d' % i
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search("values_block_(\d+)", name)
if m:
name = "values_%s" % m.groups()[0]
except:
pass
return cls(name=name, cname=cname, **kwargs)
def __init__(self, values=None, kind=None, typ=None,
cname=None, data=None, meta=None, metadata=None, block=None, **kwargs):
super(DataCol, self).__init__(
values=values, kind=kind, typ=typ, cname=cname, **kwargs)
self.dtype = None
self.dtype_attr = u("%s_dtype" % self.name)
self.meta = meta
self.meta_attr = u("%s_meta" % self.name)
self.set_data(data)
self.set_metadata(metadata)
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.dtype,
self.kind,
self.shape)))
return "name->%s,cname->%s,dtype->%s,kind->%s,shape->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'dtype', 'pos']])
def set_data(self, data, dtype=None):
self.data = data
if data is not None:
if dtype is not None:
self.dtype = dtype
self.set_kind()
elif self.dtype is None:
self.dtype = data.dtype.name
self.set_kind()
def take_data(self):
""" return the data & release the memory """
self.data, data = None, self.data
return data
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata,copy=False).ravel()
self.metadata = metadata
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):
self.kind = 'string'
elif dtype.startswith(u('float')):
self.kind = 'float'
elif dtype.startswith(u('complex')):
self.kind = 'complex'
elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):
self.kind = 'integer'
elif dtype.startswith(u('date')):
self.kind = 'datetime'
elif dtype.startswith(u('timedelta')):
self.kind = 'timedelta'
elif dtype.startswith(u('bool')):
self.kind = 'bool'
else:
raise AssertionError(
"cannot interpret dtype of [%s] in [%s]" % (dtype, self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description, self.cname, None)
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, **kwargs):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items, info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding)
# set as a data block
else:
self.set_atom_data(block)
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data.ravel())
if inferred_type != 'string':
# we cannot serialize this data, so report an exception on a column
# by column basis
for i, item in enumerate(block_items):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel())
if inferred_type != 'string':
raise TypeError(
"Cannot serialize the column [%s] because\n"
"its data contents are [%s] object dtype"
% (item, inferred_type)
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(
self.name) or min_itemsize.get('values') or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt%sCol" % kind[4:]
else:
col_name = "%sCol" % kind.capitalize()
return getattr(_tables(), col_name)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)(shape=block.shape[0])
def set_atom_complex(self, block):
self.kind = block.dtype.name
itemsize = int(self.kind.split('complex')[-1]) // 8
self.typ = _tables().ComplexCol(itemsize=itemsize, shape=block.shape[0])
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_data(self, block):
self.kind = block.dtype.name
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_categorical(self, block, items, info=None, values=None):
# currently only supports a 1-D categorical
# in a 1-D block
values = block.values
codes = values.codes
self.kind = 'integer'
self.dtype = codes.dtype.name
if values.ndim > 1:
raise NotImplementedError("only support 1-d categoricals")
if len(items) > 1:
raise NotImplementedError("only support single block categoricals")
# write the codes; must be in a block shape
self.ordered = values.ordered
self.typ = self.get_atom_data(block, kind=codes.dtype.name)
self.set_data(_block_shape(codes))
# write the categories
self.meta = 'category'
self.set_metadata(block.values.categories)
# update the info
self.update_info(info)
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_datetime64(self, block, values=None):
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'datetime64')
def set_atom_datetime64tz(self, block, info, values=None):
if values is None:
values = block.values
# convert this column to i8 in UTC, and save the tz
values = values.asi8.reshape(block.shape)
# store a converted timezone
self.tz = _get_tz(block.values.tz)
self.update_info(info)
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
self.set_data(values, 'datetime64')
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_timedelta64(self, block, values=None):
self.kind = 'timedelta64'
self.typ = self.get_atom_timedelta64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'timedelta64')
@property
def shape(self):
return getattr(self.data, 'shape', None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
def convert(self, values, nan_rep, encoding):
"""set the data from this selection (and convert to the correct dtype
if we can)
"""
try:
values = values[self.cname]
except:
pass
self.set_data(values)
# use the meta if needed
meta = _ensure_decoded(self.meta)
# convert to the correct dtype
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
# reverse converts
if dtype == u('datetime64'):
# recreate with tz if indicated
self.data = _set_tz(self.data, self.tz, coerce=True)
elif dtype == u('timedelta64'):
self.data = np.asarray(self.data, dtype='m8[ns]')
elif dtype == u('date'):
try:
self.data = np.asarray(
[date.fromordinal(v) for v in self.data], dtype=object)
except ValueError:
self.data = np.asarray(
[date.fromtimestamp(v) for v in self.data],
dtype=object)
elif dtype == u('datetime'):
self.data = np.asarray(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
elif meta == u('category'):
# we have a categorical
categories = self.metadata
self.data = Categorical.from_codes(self.data.ravel(),
categories=categories,
ordered=self.ordered)
else:
try:
self.data = self.data.astype(dtype, copy=False)
except:
self.data = self.data.astype('O', copy=False)
# convert nans / decode
if _ensure_decoded(self.kind) == u('string'):
self.data = _unconvert_string_array(
self.data, nan_rep=nan_rep, encoding=encoding)
return self
def get_attr(self):
""" get the data for this colummn """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
def set_attr(self):
""" set the data for this colummn """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
raise ValueError("cannot have non-object label DataIndexableCol")
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)()
def get_atom_datetime64(self, block):
return _tables().Int64Col()
def get_atom_timedelta64(self, block):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
def get_attr(self):
pass
class Fixed(StringMixin):
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : my parent HDFStore
group : the group node where the table resides
"""
pandas_kind = None
obj_type = None
ndim = None
is_table = False
def __init__(self, parent, group, encoding=None, **kwargs):
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.set_version()
@property
def is_old_version(self):
return (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1)
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple([int(x) for x in version.split('.')])
if len(self.version) == 2:
self.version = self.version + (0,)
except:
self.version = (0, 0, 0)
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs,
'pandas_type', None))
@property
def format_type(self):
return 'fixed'
def __unicode__(self):
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
s = "[%s]" % ','.join([pprint_thing(x) for x in s])
return "%-12.12s (shape->%s)" % (self.pandas_type, s)
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def storage_obj_type(self):
return self.obj_type
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self):
return self.parent._complevel
@property
def _fletcher32(self):
return self.parent._fletcher32
@property
def _complib(self):
return self.parent._complib
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self):
return False
@property
def nrows(self):
return getattr(self.storable, 'nrows', None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(self, **kwargs):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement")
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: sublcasses should implement")
def delete(self, where=None, start=None, stop=None, **kwargs):
""" support fully deleting the node in its entirety (only) - where specification must be None """
if where is None and start is None and stop is None:
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
_reverse_index_map = dict([(v, k)
for k, v in compat.iteritems(_index_type_map)])
attributes = []
# indexer helpders
def _class_to_alias(self, cls):
return self._index_type_map.get(cls, '')
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
return DatetimeIndex._simple_new(values, None, freq=freq,
tz=tz)
return f
return klass
def validate_read(self, kwargs):
if kwargs.get('columns') is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
if kwargs.get('where') is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
@property
def is_exists(self):
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(self, key):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
data = node[:]
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = data[0]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = data
if dtype == u('datetime64'):
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == u('timedelta64'):
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
def read_index(self, key):
variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
if variety == u('multi'):
return self.read_multi_index(key)
elif variety == u('block'):
return self.read_block_index(key)
elif variety == u('sparseint'):
return self.read_sparse_intindex(key)
elif variety == u('regular'):
_, index = self.read_index_node(getattr(self.group, key))
return index
else: # pragma: no cover
raise TypeError('unrecognized index variety: %s' % variety)
def write_index(self, key, index):
if isinstance(index, MultiIndex):
setattr(self.attrs, '%s_variety' % key, 'multi')
self.write_multi_index(key, index)
elif isinstance(index, BlockIndex):
setattr(self.attrs, '%s_variety' % key, 'block')
self.write_block_index(key, index)
elif isinstance(index, IntIndex):
setattr(self.attrs, '%s_variety' % key, 'sparseint')
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
converted = _convert_index(index, self.encoding,
self.format_type).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if hasattr(index, 'freq'):
node._v_attrs.freq = index.freq
if hasattr(index, 'tz') and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_block_index(self, key, index):
self.write_array('%s_blocs' % key, index.blocs)
self.write_array('%s_blengths' % key, index.blengths)
setattr(self.attrs, '%s_length' % key, index.length)
def read_block_index(self, key):
length = getattr(self.attrs, '%s_length' % key)
blocs = self.read_array('%s_blocs' % key)
blengths = self.read_array('%s_blengths' % key)
return BlockIndex(length, blocs, blengths)
def write_sparse_intindex(self, key, index):
self.write_array('%s_indices' % key, index.indices)
setattr(self.attrs, '%s_length' % key, index.length)
def read_sparse_intindex(self, key):
length = getattr(self.attrs, '%s_length' % key)
indices = self.read_array('%s_indices' % key)
return IntIndex(length, indices)
def write_multi_index(self, key, index):
setattr(self.attrs, '%s_nlevels' % key, index.nlevels)
for i, (lev, lab, name) in enumerate(zip(index.levels,
index.labels,
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
conv_level = _convert_index(lev, self.encoding,
self.format_type).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, '%s_name%d' % (key, i), name)
# write the labels
label_key = '%s_label%d' % (key, i)
self.write_array(label_key, lab)
def read_multi_index(self, key):
nlevels = getattr(self.attrs, '%s_nlevels' % key)
levels = []
labels = []
names = []
for i in range(nlevels):
level_key = '%s_level%d' % (key, i)
name, lev = self.read_index_node(getattr(self.group, level_key))
levels.append(lev)
names.append(name)
label_key = '%s_label%d' % (key, i)
lab = self.read_array(label_key)
labels.append(lab)
return MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=True)
def read_index_node(self, node):
data = node[:]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
if ('shape' in node._v_attrs and
self._is_empty_array(getattr(node._v_attrs, 'shape'))):
data = np.empty(getattr(node._v_attrs, 'shape'),
dtype=getattr(node._v_attrs, 'value_type'))
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
name = node._v_attrs.name
index_class = self._alias_to_class(getattr(node._v_attrs,
'index_class', ''))
factory = self._get_index_factory(index_class)
kwargs = {}
if u('freq') in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
if u('tz') in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
if kind in (u('date'), u('datetime')):
index = factory(
_unconvert_index(data, kind, encoding=self.encoding),
dtype=object, **kwargs)
else:
index = factory(
_unconvert_index(data, kind, encoding=self.encoding), **kwargs)
index.name = name
return name, index
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
def _is_empty_array(self, shape):
"""Returns true if any axis is zero length."""
return any(x == 0 for x in shape)
def write_array(self, key, value, items=None):
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = self._is_empty_array(value.shape)
transposed = False
if com.is_categorical_dtype(value):
raise NotImplementedError("cannot store a category dtype")
if not empty_array:
value = value.T
transposed = True
if self._filters is not None:
atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(self.group, key, atom,
value.shape,
filters=self._filters)
ca[:] = value
getattr(self.group, key)._v_attrs.transposed = transposed
else:
self.write_array_empty(key, value)
return
if value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value.ravel())
if empty_array:
pass
elif inferred_type == 'string':
pass
else:
try:
items = list(items)
except:
pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
if empty_array:
self.write_array_empty(key, value)
else:
if com.is_datetime64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'datetime64'
elif com.is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key,
value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = 'datetime64'
elif com.is_timedelta64_dtype(value.dtype):
self._handle.create_array(self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'timedelta64'
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class LegacyFixed(GenericFixed):
def read_index_legacy(self, key):
node = getattr(self.group, key)
data = node[:]
kind = node._v_attrs.kind
return _unconvert_index_legacy(data, kind, encoding=self.encoding)
class LegacySeriesFixed(LegacyFixed):
def read(self, **kwargs):
self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
class LegacyFrameFixed(LegacyFixed):
def read(self, **kwargs):
self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
return DataFrame(values, index=index, columns=columns)
class SeriesFixed(GenericFixed):
pandas_kind = u('series')
attributes = ['name']
@property
def shape(self):
try:
return len(getattr(self.group, 'values')),
except:
return None
def read(self, **kwargs):
self.validate_read(kwargs)
index = self.read_index('index')
values = self.read_array('values')
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super(SeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseSeriesFixed(GenericFixed):
pandas_kind = u('sparse_series')
attributes = ['name', 'fill_value', 'kind']
def read(self, **kwargs):
self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
kind=self.kind or u('block'),
fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
super(SparseSeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
self.attrs.name = obj.name
self.attrs.fill_value = obj.fill_value
self.attrs.kind = obj.kind
class SparseFrameFixed(GenericFixed):
pandas_kind = u('sparse_frame')
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
key = 'sparse_series_%s' % c
s = SparseSeriesFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[c] = s.read()
return SparseDataFrame(sdict, columns=columns,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super(SparseFrameFixed, self).write(obj, **kwargs)
for name, ss in compat.iteritems(obj):
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
class SparsePanelFixed(GenericFixed):
pandas_kind = u('sparse_panel')
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
self.validate_read(kwargs)
items = self.read_index('items')
sdict = {}
for name in items:
key = 'sparse_frame_%s' % name
s = SparseFrameFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[name] = s.read()
return SparsePanel(sdict, items=items, default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
super(SparsePanelFixed, self).write(obj, **kwargs)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('items', obj.items)
for name, sdf in compat.iteritems(obj):
key = 'sparse_frame_%s' % name
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseFrameFixed(self.parent, node)
s.write(sdf)
class BlockManagerFixed(GenericFixed):
attributes = ['ndim', 'nblocks']
is_shape_reversed = False
@property
def shape(self):
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, 'block%d_items' % i)
shape = getattr(node, 'shape', None)
if shape is not None:
items += shape[0]
# data shape
node = getattr(self.group, 'block0_values')
shape = getattr(node, 'shape', None)
if shape is not None:
shape = list(shape[0:(ndim - 1)])
else:
shape = []
shape.append(items)
# hacky - this works for frames, but is reversed for panels
if self.is_shape_reversed:
shape = shape[::-1]
return shape
except:
return None
def read(self, **kwargs):
self.validate_read(kwargs)
axes = []
for i in range(self.ndim):
ax = self.read_index('axis%d' % i)
axes.append(ax)
items = axes[0]
blocks = []
for i in range(self.nblocks):
blk_items = self.read_index('block%d_items' % i)
values = self.read_array('block%d_values' % i)
blk = make_block(values,
placement=items.get_indexer(blk_items))
blocks.append(blk)
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
super(BlockManagerFixed, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0:
if not ax.is_unique:
raise ValueError("Columns index has to be unique for fixed format")
self.write_index('axis%d' % i, ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array('block%d_values' % i, blk.values, items=blk_items)
self.write_index('block%d_items' % i, blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = u('frame')
obj_type = DataFrame
class PanelFixed(BlockManagerFixed):
pandas_kind = u('wide')
obj_type = Panel
is_shape_reversed = True
def write(self, obj, **kwargs):
obj._consolidate_inplace()
return super(PanelFixed, self).write(obj, **kwargs)
class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = u('wide_table')
table_type = None
levels = 1
is_table = True
is_shape_reversed = False
def __init__(self, *args, **kwargs):
super(Table, self).__init__(*args, **kwargs)
self.index_axes = []
self.non_index_axes = []
self.values_axes = []
self.data_columns = []
self.metadata = []
self.info = dict()
self.nan_rep = None
self.selection = None
@property
def table_type_short(self):
return self.table_type.split('_')[0]
@property
def format_type(self):
return 'table'
def __unicode__(self):
""" return a pretty representatgion of myself """
self.infer_axes()
dc = ",dc->[%s]" % ','.join(
self.data_columns) if len(self.data_columns) else ''
ver = ''
if self.is_old_version:
ver = "[%s]" % '.'.join([str(x) for x in self.version])
return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % (
self.pandas_type, ver, self.table_type_short, self.nrows,
self.ncols, ','.join([a.name for a in self.index_axes]), dc
)
def __getitem__(self, c):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError("incompatible table_type with existing [%s - %s]" %
(other.table_type, self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [%s] on appending data [%s] "
"vs current table [%s]" % (c, sax, oax))
# should never get here
raise Exception(
"invalid combinate of [%s] on appending data [%s] vs "
"current table [%s]" % (c, sv, ov))
@property
def is_multi_index(self):
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_metadata(self, existing):
""" create / validate metadata """
self.metadata = [ c.name for c in self.values_axes if c.metadata is not None ]
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
@property
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self):
""" has this table been created """
return u('table') in self.group
@property
def storable(self):
return getattr(self.group, 'table', None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self):
""" the number of total columns in the values axes """
return sum([len(a.values) for a in self.values_axes])
@property
def is_transposed(self):
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
def index_cols(self):
""" return a list of my index cols """
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self):
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group,'meta',None),key,None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_info(self):
""" update our table index info """
self.attrs.info = self.info
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(
self.attrs, 'non_index_axes', None) or []
self.data_columns = getattr(
self.attrs, 'data_columns', None) or []
self.info = getattr(
self.attrs, 'info', None) or dict()
self.nan_rep = getattr(self.attrs, 'nan_rep', None)
self.encoding = _ensure_encoding(
getattr(self.attrs, 'encoding', None))
self.levels = getattr(
self.attrs, 'levels', None) or []
self.index_axes = [
a.infer(self) for a in self.indexables if a.is_an_indexable
]
self.values_axes = [
a.infer(self) for a in self.indexables if not a.is_an_indexable
]
self.metadata = getattr(
self.attrs, 'metadata', None) or []
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [%s] which is not an axis or "
"data_column" % k)
@property
def indexables(self):
""" create/cache the indexables if they don't exist """
if self._indexables is None:
self._indexables = []
# index columns
self._indexables.extend([
IndexCol(name=name, axis=axis, pos=i)
for i, (axis, name) in enumerate(self.attrs.index_cols)
])
# values columns
dc = set(self.data_columns)
base_pos = len(self._indexables)
def f(i, c):
klass = DataCol
if c in dc:
klass = DataIndexableCol
return klass.create_for_block(i=i, name=c, pos=base_pos + i,
version=self.version)
self._indexables.extend(
[f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return self._indexables
def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Paramaters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
Exceptions
----------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw['optlevel'] = optlevel
if kind is not None:
kw['kind'] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith('complex'):
raise TypeError('Columns containing complex values can be stored but cannot'
' be indexed when using table format. Either use fixed '
'format, set index=False, or do not include the columns '
'containing complex values to data_columns when '
'initializing the table.')
v.create_index(**kw)
def read_axes(self, where, **kwargs):
"""create and return the axes sniffed from the table: return boolean
for success
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(self, where=where, **kwargs)
values = self.selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)
return True
def get_object(self, obj):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = axis_labels
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns=None, min_itemsize=None, **kwargs):
""" create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except:
raise TypeError("cannot properly create the storer for: "
"[group->%s,value->%s]"
% (self.group._v_name, type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
a, self.encoding, self.format_type
).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if append_axis != exist_axis:
# ahah! -> reindex
if sorted(append_axis) == sorted(exist_axis):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info['names'] = list(a.names)
info['type'] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info)
for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
if validate:
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj).consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(
data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex_axis(
Index(axis_labels).difference(Index(data_columns)),
axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex_axis([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = dict([(tuple(b_items.tolist()), (b, b_items))
for b, b_items in zip(blocks, blk_items)])
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except:
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ','.join(com.pprint_thing(item) for
item in items))
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if (data_columns and len(b_items) == 1 and
b_items[0] in data_columns):
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except:
raise ValueError("Incompatible appended table [%s] with "
"existing table [%s]"
% (blocks, existing_table.values_axes))
else:
existing_col = None
try:
col = klass.create_for_block(
i=i, name=name, version=self.version)
col.set_atom(block=b, block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
info=self.info,
**kwargs)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->%s,items->%s] %s"
% (b.dtype.name, b_items, str(detail))
)
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.ix._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = _ensure_index(getattr(obj, field).values)
filt = _ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.ix._getitem_axis(takers,
axis=axis_number)
raise ValueError(
"cannot find the field [%s] for filtering!" % field)
obj = process_filter(field, filt)
return obj
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = dict([(a.cname, a.typ) for a in self.axes])
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(field, start=coords.min(), stop=coords.max()+1)
coords = coords[op(data.iloc[coords-coords.min()], filt).values]
return Index(coords)
def read_column(self, column, where=None, start=None, stop=None, **kwargs):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [%s] can not be extracted individually; it is "
"not data indexable" % column)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding
).take_data(),
a.tz, True), name=column)
raise KeyError("column [%s] not found in the table" % column)
class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = u('worm')
def read(self, **kwargs):
""" read the indicies and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indicies and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORKTable needs to implement write")
class LegacyTable(Table):
""" an appendable table: allow append/query/delete operations to a
(possibily) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
that can be easily searched
"""
_indexables = [
IndexCol(name='index', axis=1, pos=0),
IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
]
table_type = u('legacy')
ndim = 3
def write(self, **kwargs):
raise TypeError("write operations are not allowed on legacy tables!")
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
factors = [Categorical.from_array(a.values, ordered=True) for a in self.index_axes]
levels = [f.categories for f in factors]
N = [len(f.categories) for f in factors]
labels = [f.codes for f in factors]
# compute the key
key = _factor_indexer(N[1:], labels)
objs = []
if len(unique(key)) == len(key):
sorter, _ = algos.groupsort_indexer(
com._ensure_int64(key), np.prod(N))
sorter = com._ensure_platform_int(sorter)
# create the objs
for c in self.values_axes:
# the data need to be sorted
sorted_values = c.take_data().take(sorter, axis=0)
if sorted_values.ndim == 1:
sorted_values = sorted_values.reshape((sorted_values.shape[0],1))
take_labels = [l.take(sorter) for l in labels]
items = Index(c.values)
block = _block2d_to_blocknd(
values=sorted_values, placement=np.arange(len(items)),
shape=tuple(N), labels=take_labels, ref_items=items)
# create the object
mgr = BlockManager([block], [items] + levels)
obj = self.obj_type(mgr)
# permute if needed
if self.is_transposed:
obj = obj.transpose(
*tuple(Series(self.data_orientation).argsort()))
objs.append(obj)
else:
warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
# reconstruct
long_index = MultiIndex.from_arrays(
[i.values for i in self.index_axes])
for c in self.values_axes:
lp = DataFrame(c.data, index=long_index, columns=c.values)
# need a better algorithm
tuple_index = long_index._tuple_index
unique_tuples = lib.fast_unique(tuple_index.values)
unique_tuples = _asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
indexer = com._ensure_platform_int(indexer)
new_index = long_index.take(indexer)
new_values = lp.values.take(indexer, axis=0)
lp = DataFrame(new_values, index=new_index, columns=lp.columns)
objs.append(lp.to_panel())
# create the composite object
if len(objs) == 1:
wp = objs[0]
else:
wp = concat(objs, axis=0, verify_integrity=False).consolidate()
# apply the selection filters & axis orderings
wp = self.process_axes(wp, columns=columns)
return wp
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
pandas_kind = u('frame_table')
table_type = u('legacy_frame')
obj_type = Panel
def read(self, *args, **kwargs):
return super(LegacyFrameTable, self).read(*args, **kwargs)['value']
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
table_type = u('legacy_panel')
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
table_type = u('appendable')
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None,
chunksize=None, expectedrows=None, dropna=False, **kwargs):
if not append and self.is_exists:
self._handle.remove_node(self.group, 'table')
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
min_itemsize=min_itemsize,
**kwargs)
for a in self.axes:
a.validate(self, append)
if not self.is_exists:
# create the table
options = self.create_description(complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows)
# set the table attributes
self.set_attrs()
# create the table
table = self._handle.create_table(self.group, **options)
else:
table = self.table
# update my info
self.set_info()
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(self, append)
# add the rows
self.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
if dropna:
masks = []
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = com.isnull(a.data).all(axis=0)
masks.append(mask.astype('u1', copy=False))
# consolidate masks
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize,nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
def write_data_chunk(self, rows, indexes, mask, values):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
try:
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
except Exception as detail:
raise Exception("cannot create row-data -> %s" % detail)
try:
if len(rows):
self.table.append(rows)
self.table.flush()
except Exception as detail:
raise TypeError("tables cannot write this data -> %s" % detail)
def delete(self, where=None, start=None, stop=None, **kwargs):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
self.selection = Selection(self, where, start=start, stop=stop, **kwargs)
values = self.selection.select_coords()
# delete the rows in reverse order
l = Series(values).sort_values()
ln = len(l)
if ln:
# construct groups of consecutive rows
diff = l.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = l.take(lrange(g, pg))
table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
pandas_kind = u('frame_table')
table_type = u('appendable_frame')
ndim = 2
obj_type = DataFrame
@property
def is_transposed(self):
return self.index_axes[0].axis == 1
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.T
return obj
def read(self, where=None, columns=None, **kwargs):
if not self.read_axes(where=where, **kwargs):
return None
info = (self.info.get(self.non_index_axes[0][0], dict())
if len(self.non_index_axes) else dict())
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
# we could have a multi-index constructor here
# _ensure_index doesn't recognized our list-of-tuples here
if info.get('type') == 'MultiIndex':
cols = MultiIndex.from_tuples(a.values)
else:
cols = Index(a.values)
names = info.get('names')
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, 'name', None))
else:
values = a.cvalues.T
index_ = Index(index, name=getattr(index, 'name', None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
block = make_block(values, placement=np.arange(len(cols_)))
mgr = BlockManager([block], [cols_, index_])
frames.append(DataFrame(mgr))
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1, verify_integrity=False).consolidate()
# apply the selection filters & axis orderings
df = self.process_axes(df, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_series')
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@property
def is_transposed(self):
return False
def get_object(self, obj):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or 'values'
obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
return super(AppendableSeriesTable, self).write(
obj=obj, data_columns=obj.columns, **kwargs)
def read(self, columns=None, **kwargs):
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == 'values':
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_multiseries')
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = u('frame_table')
table_type = u('generic_table')
ndim = 2
obj_type = DataFrame
@property
def pandas_type(self):
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, 'table', None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a.infer(self)
for a in self.indexables if a.is_an_indexable]
self.values_axes = [a.infer(self)
for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@property
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = u('appendable_multiframe')
obj_type = DataFrame
ndim = 2
_re_levels = re.compile("^level_\d+$")
@property
def table_type_short(self):
return u('appendable_multi')
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns[:]
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super(AppendableMultiFrameTable, self).write(
obj=obj, data_columns=data_columns, **kwargs)
def read(self, **kwargs):
df = super(AppendableMultiFrameTable, self).read(**kwargs)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names([
None if self._re_levels.search(l) else l for l in df.index.names
])
return df
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
table_type = u('appendable_panel')
ndim = 3
obj_type = Panel
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.transpose(*self.data_orientation)
return obj
@property
def is_transposed(self):
return self.data_orientation != tuple(range(self.ndim))
class AppendableNDimTable(AppendablePanelTable):
""" suppor the new appendable table formats """
table_type = u('appendable_ndim')
ndim = 4
obj_type = Panel4D
def _reindex_axis(obj, axis, labels, other=None):
ax = obj._get_axis(axis)
labels = _ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = _ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = _ensure_index(labels.unique())
if other is not None:
labels = labels & _ensure_index(other.unique())
if not labels.equals(ax):
slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
except:
idx = info[name] = dict()
return idx
### tz to/from coercion ###
def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = tslib.get_timezone(tz)
if zone is None:
zone = tslib.tot_seconds(tz.utcoffset())
return zone
def _set_tz(values, tz, preserve_UTC=False, coerce=False):
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray
tz : string/pickled tz object
preserve_UTC : boolean,
preserve the UTC of the result
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if tz is not None:
values = values.ravel()
tz = tslib.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values)
if values.tz is None:
values = values.tz_localize('UTC').tz_convert(tz)
if preserve_UTC:
if tz == 'UTC':
values = list(values)
elif coerce:
values = np.asarray(values, dtype='M8[ns]')
return values
def _convert_index(index, encoding=None, format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
converted = index.asi8
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif isinstance(index, TimedeltaIndex):
converted = index.asi8
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
return IndexCol(
index.values, 'integer', atom, freq=getattr(index, 'freq', None),
index_name=index_name)
if isinstance(index, MultiIndex):
raise TypeError('MultiIndex not supported here!')
inferred_type = lib.infer_dtype(index)
values = np.asarray(index)
if inferred_type == 'datetime64':
converted = values.view('i8')
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif inferred_type == 'timedelta64':
converted = values.view('i8')
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif inferred_type == 'datetime':
converted = np.asarray([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return IndexCol(converted, 'datetime', _tables().Time64Col(),
index_name=index_name)
elif inferred_type == 'date':
converted = np.asarray([v.toordinal() for v in values],
dtype=np.int32)
return IndexCol(converted, 'date', _tables().Time32Col(),
index_name=index_name)
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = _convert_string_array(values, encoding)
itemsize = converted.dtype.itemsize
return IndexCol(
converted, 'string', _tables().StringCol(itemsize),
itemsize=itemsize, index_name=index_name
)
elif inferred_type == 'unicode':
if format_type == 'fixed':
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
raise TypeError(
"[unicode] is not supported as a in index type for [{0}] formats"
.format(format_type)
)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,
index_name=index_name)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,
index_name=index_name)
else: # pragma: no cover
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
def _unconvert_index(data, kind, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime64'):
index = DatetimeIndex(data)
elif kind == u('timedelta64'):
index = TimedeltaIndex(data)
elif kind == u('datetime'):
index = np.asarray([datetime.fromtimestamp(v) for v in data],
dtype=object)
elif kind == u('date'):
try:
index = np.asarray(
[date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray(
[date.fromtimestamp(v) for v in data], dtype=object)
elif kind in (u('integer'), u('float')):
index = np.asarray(data)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
elif kind == u('object'):
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _unconvert_index_legacy(data, kind, legacy=False, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime'):
index = lib.time64_to_datetime(data)
elif kind in (u('integer')):
index = np.asarray(data, dtype=object)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _convert_string_array(data, encoding, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = Series(data.ravel()).str.encode(encoding).values.reshape(data.shape)
# create the sized dtype
if itemsize is None:
itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
data = np.asarray(data, dtype="S%d" % itemsize)
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None):
"""
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
Returns
-------
an object array of the decoded data
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
# guard against a None encoding in PY3 (because of a legacy
# where the passed encoding is actually None)
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
itemsize = lib.max_len_string_array(com._ensure_object(data))
if compat.PY3:
dtype = "U{0}".format(itemsize)
else:
dtype = "S{0}".format(itemsize)
if isinstance(data[0], compat.binary_type):
data = Series(data).str.decode(encoding).values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = 'nan'
data = lib.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values, val_kind, encoding):
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
def _get_converter(kind, encoding):
kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.asarray(x, dtype='M8[ns]')
elif kind == 'datetime':
return lib.convert_timestamps
elif kind == 'string':
return lambda x: _unconvert_string_array(x, encoding=encoding)
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
kind = _ensure_decoded(kind)
if kind in (u('datetime'), u('datetime64'), u('string')):
return True
return False
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertable to)
start, stop: indicies to start and/or stop selection
"""
def __init__(self, table, where=None, start=None, stop=None, **kwargs):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if com.is_list_like(where):
# see if we have a passed coordinate like
try:
inferred = lib.infer_dtype(where)
if inferred == 'integer' or inferred == 'boolean':
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if ((self.start is not None and
(where < self.start).any()) or
(self.stop is not None and
(where >= self.stop).any())):
raise ValueError(
"where must have index locations >= start and "
"< stop"
)
self.coordinates = where
except:
pass
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return Expr(where, queryables=q, encoding=self.table.encoding)
except NameError as detail:
# raise a nice message, suggesting that the user should use
# data_columns
raise ValueError(
"The passed where expression: {0}\n"
" contains an invalid variable reference\n"
" all of the variable refrences must be a "
"reference to\n"
" an axis (e.g. 'index' or 'columns'), or a "
"data_column\n"
" The currently defined references are: {1}\n"
.format(where, ','.join(q.keys()))
)
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(self.condition.format(),
start=self.start, stop=self.stop)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
# utilities ###
def timeit(key, df, fn=None, remove=True, **kwargs):
if fn is None:
fn = 'timeit.h5'
store = HDFStore(fn, mode='w')
store.append(key, df, **kwargs)
store.close()
if remove:
os.remove(fn)
| mit |
JosmanPS/scikit-learn | sklearn/datasets/__init__.py | 176 | 3671 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
deepchem/deepchem | examples/low_data/tox_rf_one_fold.py | 9 | 2037 | """
Train low-data Tox21 models with random forests. Test last fold only.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import tempfile
import numpy as np
import deepchem as dc
from datasets import load_tox21_ecfp
from sklearn.ensemble import RandomForestClassifier
# 4-fold splits
K = 4
# num positive/negative ligands
n_pos = 10
n_neg = 10
# 10 trials on test-set
n_trials = 20
tox21_tasks, dataset, transformers = load_tox21_ecfp()
# Define metric
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, mode="classification")
task_splitter = dc.splits.TaskSplitter()
fold_datasets = task_splitter.k_fold_split(dataset, K)
train_folds = fold_datasets[:-1]
train_dataset = dc.splits.merge_fold_datasets(train_folds)
test_dataset = fold_datasets[-1]
# Get supports on test-set
support_generator = dc.data.SupportGenerator(
test_dataset, n_pos, n_neg, n_trials)
# Compute accuracies
task_scores = {task: [] for task in range(len(test_dataset.get_task_names()))}
for (task, support) in support_generator:
# Train model on support
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=100)
model = dc.models.SklearnModel(sklearn_model)
model.fit(support)
# Test model
task_dataset = dc.data.get_task_dataset_minus_support(
test_dataset, support, task)
y_pred = model.predict_proba(task_dataset)
score = metric.compute_metric(
task_dataset.y, y_pred, task_dataset.w)
print("Score on task %s is %s" % (str(task), str(score)))
task_scores[task].append(score)
# Join information for all tasks.
mean_task_scores = {}
std_task_scores = {}
for task in range(len(test_dataset.get_task_names())):
mean_task_scores[task] = np.mean(np.array(task_scores[task]))
std_task_scores[task] = np.std(np.array(task_scores[task]))
print("Mean scores")
print(mean_task_scores)
print("Standard Deviations")
print(std_task_scores)
print("Median of Mean Scores")
print(np.median(np.array(mean_task_scores.values())))
| mit |
ComputoCienciasUniandes/MetodosComputacionalesLaboratorio | 2017-1/lab7_EJ2/lab7SOL_EJ2/pca_room.py | 1 | 3617 |
# coding: utf-8
# In[7]:
import matplotlib.pyplot as plt
import numpy as np
# Carga los datos
data = np.loadtxt('room-temperature.csv', skiprows=1, usecols=[1,2,3,4],delimiter=',')
front_left = data[:,0]
front_right = data[:,1]
back_left = data[:,2]
back_right = data[:,3]
# Grafica los datos en funcion del tiempo
figur, axarr = plt.subplots(4, 1, figsize= (10,10))
axarr[0].plot(front_left)
axarr[0].set_title('Front left')
axarr[0].set_ylabel('Temperature (AU)')
axarr[0].set_xlim([0,len(front_left)])
axarr[0].set_ylim([290,300])
axarr[1].plot(front_right)
axarr[1].set_title('Front right')
axarr[1].set_ylabel('Temperature (AU)')
axarr[1].set_xlim([0,len(front_left)])
axarr[1].set_ylim([290,300])
axarr[2].plot(back_left)
axarr[2].set_title('Back left')
axarr[2].set_ylabel('Temperature (AU)')
axarr[2].set_xlim([0,len(front_left)])
axarr[2].set_ylim([290,300])
axarr[3].plot(back_right)
axarr[3].set_title('Back right')
axarr[3].set_xlabel('Time (AU)')
axarr[3].set_ylabel('Temperature (AU)')
axarr[3].set_xlim([0,len(front_left)])
axarr[3].set_ylim([290,300])
figur.subplots_adjust(hspace=0.5)
plt.savefig('room.pdf')
plt.close()
# Realiza PCA, retorna los valores propios, vectores propios y los datos en la base de los vectores propios (scores)
# Tambien imprime los mensajes necesarios
def pca(data_matrix):
'''data_matrix must be the data matrix by COLUMNS i.e. a column is a variable and a row is an observation'''
data_matrix = data_matrix.T
cov_matrix = np.cov(data_matrix)
print('La matriz de covarianza es:')
print(cov_matrix)
print('')
values, vectors = np.linalg.eig(cov_matrix.T)
print('Las dos componentes principales en orden ascendente son:')
print(vectors[:,0], ' con valor ', values[0])
print(vectors[:,1], ' con valor ', values[1])
total_values = np.sum(values)
print('\nLa primera componente explica el', values[0]/total_values * 100, '% de la varianza')
print('La segunda componente explica el', values[1]/total_values * 100, '% de la varianza')
scores = np.dot(data_matrix.T, vectors)
return values, vectors, scores
# Escala y centra los datos, retorna la matriz de datos reescalada.
def center_scale(data):
data_scaled = np.zeros_like(data)
for i in range(len(data[0])):
av_col = np.mean(data[:,i])
std_col = np.std(data[:,i])
for j in range(len(data)):
data_scaled[j,i] = ( data[j,i] - av_col )/ std_col
return data_scaled
# Grafica la variable j vs la variable i del los datos junto con las dos componentes principales proyectadas en
# el plano de dichas variables.
def plot_eigen(data, i, j, vectors, labels, name):
'''Grafica las variables i, j de los datos junto con las dos componentes principales'''
plt.scatter(data[:,i], data[:,j])
x = np.linspace(min(data[:,i]), max(data[:,i]))
plt.plot(x, x*vectors[j,0]/vectors[i,0], linewidth = 1.0, c='r', label = 'Primer vector')
plt.plot(x, x*vectors[j,1]/vectors[i,1], linewidth = 1.0, c='y', label = 'Segundo Vector')
plt.title(labels[j]+ ' vs. '+ labels[i])
plt.xlabel(labels[i])
plt.ylabel(labels[j])
plt.ylim(min(data[:,j])-1, max(data[:,j])+1)
plt.legend(loc=0)
plt.savefig(name)
plt.close()
# Escala los datos
data_matrix = center_scale(data)
# Realiza PCA
values, vectors, scores = pca(data_matrix)
# Realiza las graficas de los vectores
labels = ['Front Left', 'Front Right', 'Back left', 'Back right']
plot_eigen(data_matrix, 0, 1, vectors, labels, 'pca_fr_fl.pdf')
plot_eigen(data_matrix, 0, 2, vectors, labels, 'pca_bl_fl.pdf')
| mit |
mbalassi/streaming-performance | src/test/resources/Performance/PerformanceTracker.py | 1 | 9203 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 30 15:40:17 2014
@author: gyfora
"""
import matplotlib.pyplot as plt
import pandas as pd
import os
import operator
import sys
linestyles = ['_', '-', '--', ':']
markers=['D','s', '|', '', 'x', '_', '^', ' ', 'd', 'h', '+', '*', ',', 'o', '.', '1', 'p', 'H', 'v', '>'];
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
def readFiles(csv_dir):
counters=[]
for fname in os.listdir(csv_dir):
if '.csv' in fname:
counters.append((fname.rstrip('.csv'),int(fname.rstrip('.csv').split('-')[-1])-1,pd.read_csv(os.path.join(csv_dir,fname),index_col='Time')))
return counters
def plotCounter(csv_dir, sname='', smooth=5,savePath=''):
counters= readFiles(csv_dir)
addSpeed(counters)
print counters
selectedCounters=[]
for (name, number, df) in counters:
if sname in name:
selectedCounters.append((name, number, df))
if sname=='':
sname='counters'
save=savePath!=''
plotDfs(selectedCounters,smooth,save,savePath+'/'+sname)
def plotDfs(counters,smooth,save,saveFile):
plt.figure(figsize=(12, 8), dpi=80)
plt.title('Counter')
for (name, number, df) in counters:
m=markers[number%len(markers)]
df.ix[:,0].plot(marker=m,markevery=10,markersize=10)
plt.legend([x[0] for x in counters])
if save:
plt.savefig(saveFile+'C.png')
plt.figure(figsize=(12, 8), dpi=80)
plt.title('dC/dT')
for (name, number, df) in counters:
m=markers[number%len(markers)]
pd.rolling_mean(df.speed,smooth).plot(marker=m,markevery=10,markersize=10)
plt.legend([x[0] for x in counters])
if save:
plt.savefig(saveFile+'D.png')
def addSpeed(counters):
for (tname, number, df) in counters:
speed=[0]
values=list(df.ix[:,0])
for i in range(1,len(values)):
speed.append(float(values[i]-values[i-1])/float(df.index[i]-df.index[i-1]+0.01))
df['speed']=speed
return counters
def plotThroughput(csv_dir,tasknames, smooth=5,savePath=''):
if type(tasknames)!=list:
tasknames=[tasknames]
for taskname in tasknames:
counters= readFiles(csv_dir)
addSpeed(counters)
selected={}
for (tname, number, df) in counters:
if taskname in tname:
if number in selected:
selected[number].append(df)
else:
selected[number]=[df]
plt.figure()
plt.title(taskname)
for i in selected:
if len(selected[i])>1:
selected[i]=reduce(operator.add,selected[i])
else:
selected[i]=selected[i][0]
m=markers[i%len(markers)]
selected[i].ix[:,0].plot(marker=m,markevery=10,markersize=10)
plt.legend(selected.keys())
if savePath !='':
plt.savefig(savePath+'/'+taskname+'C.png')
plt.figure()
plt.title(taskname+" - dC/dT")
for i in selected:
m=markers[i%len(markers)]
pd.rolling_mean(selected[i].speed,smooth).plot(marker=m,markevery=10,markersize=10)
plt.legend(selected.keys())
if savePath !='':
plt.savefig(savePath+'/'+taskname+'D.png')
def plotTimer(csv_dir,smooth=5,std=50):
dataframes= readFiles(csv_dir)
plt.figure(figsize=(12, 8), dpi=80)
plt.title('Timer')
for dataframe in dataframes:
m=markers[dataframe[1]%len(markers)]
pd.rolling_mean(dataframe[2].ix[:,0],smooth).plot(marker=m,markevery=10,markersize=10)
plt.legend([x[0] for x in dataframes])
plt.figure(figsize=(12, 8), dpi=80)
plt.title('Standard deviance')
for dataframe in dataframes:
m=markers[dataframe[1]%len(markers)]
pd.rolling_std(dataframe[2].ix[:,0],std).plot(marker=m,markevery=10,markersize=10)
plt.legend([x[0] for x in dataframes])
def findInterval(num):
interv = 1
diff = 1
current = 1
while True:
if num < current + diff:
return interv
elif (current + diff) % (diff * 10) == 0:
interv = interv * 10
current = interv
diff = diff * 10
else:
current += diff
def csvJoin(csv_dir, sname = ''):
fnameList = []
for fname in os.listdir(csv_dir):
if '.csv' in fname and sname in fname:
if "-0" not in fname:
fnameList.append(fname)
if len(fnameList) == 0:
return
l = fnameList[0].split('-')
newFileName = l[0] + '-' + l[1] + "-0" + ".csv"
firstLine = ""
fObjList = []
for fname in fnameList:
fObjList.append(open(csv_dir + "/" + fname, "rt"))
firstLine = fObjList[-1].readline()
newFile = open(csv_dir + "/" + newFileName, "wt")
newFile.write(firstLine)
if len(fObjList) == 1:
fObj = fObjList[0]
for line in fObj:
newFile.write(line)
else:
currentInterval = 0
first = True
stop = False
while True:
lines = []
for fObj in fObjList:
line = fObj.readline()
if line == "":
stop = True
break
else:
lines.append(line)
if stop:
break
if first:
intervalDiff = findInterval(int(lines[0].split(',')[0]))
first = False
label = ""
valuePairs = []
for line in lines:
l = line.split(',')
time = int(l[0])
counter = int(l[1])
label = l[2]
valuePairs.append((time, counter))
newCounter = 0
for pair in valuePairs:
newCounter += pair[1]
newLine = ""
newLine += str(currentInterval)
newLine += ","
newLine += str(newCounter)
newLine += ","
newLine += label
newFile.write(newLine)
currentInterval += intervalDiff
for fObj in fObjList:
fObj.close()
newFile.close()
def joinAndPlotAll(csv_dir, smooth, save_dir):
fnameList = []
for fname in os.listdir(csv_dir):
if '.csv' in fname:
fnameList.append(fname)
argsList = []
for fname in fnameList:
l = fname.split('-')
if l[1] not in argsList:
argsList.append(l[1])
for args in argsList:
csvJoin(csv_dir, args)
plotCounter(csv_dir, args + '-0', smooth, save_dir)
plotCounter(csv_dir, '-0', smooth, save_dir)
def doAllFolders(folder_path, smooth):
for fname in os.listdir(folder_path):
path = folder_path + "/" + fname
joinAndPlotAll(path, smooth, path)
if __name__ == "__main__":
csv_dir = sys.argv[1]
smooth = int(sys.argv[2])
save_dir = sys.argv[3]
mode = "singleFolder"
if len(sys.argv) > 4:
mode = sys.argv[4]
if mode == "singleFolder":
joinAndPlotAll(csv_dir, smooth, save_dir)
elif mode == "multipleFolders":
doAllFolders(csv_dir, smooth)
#folder = '../testdata/0.6/newList'
#doAllFolders(folder, 5)
#joinAndPlotAll(folder, 5, folder)
#csvJoin('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '1_1_4_2_2_2')
#plotCounter('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '-0', 5, '/home/tofi/git/stratosphere-streaming/src/test/resources/testdata')
#print findInterval(1005)
#plotCounter('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '8_2_2_2', 5, '/home/tofi/git/stratosphere-streaming/src/test/resources/testdata')
#plotCounter('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '4_2_2_2-flush', 5, '/home/tofi/git/stratosphere-streaming/src/test/resources/testdata')
#plotCounter('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '4_2_2_2-noflush', 5, '/home/tofi/git/stratosphere-streaming/src/test/resources/testdata')
#plotCounter('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '4_4_4_2', 5, '/home/tofi/git/stratosphere-streaming/src/test/resources/testdata')
#plotCounter('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '4_4_2_4', 5, '/home/tofi/git/stratosphere-streaming/src/test/resources/testdata')
#plotCounter('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '4_2_4_4', 5, '/home/tofi/git/stratosphere-streaming/src/test/resources/testdata')
#plotCounter('/home/tofi/git/stratosphere-streaming/src/test/resources/testdata', '4_4_4_4', 5, '/home/tofi/git/stratosphere-streaming/src/test/resources/testdata') | apache-2.0 |
madjelan/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
carrillo/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
VHarisop/PyMP | experiments/plot_surface.py | 1 | 4585 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# plots the "border" enforced by a MorphologicalPerceptron using
# unitary parameters
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import pymp, argparse
def check_erosion_dilation_mp(limit, save_file=None):
"""
Generates a set of points in the grid [-limit, limit] to plot the decision
surface of an ErosionDilationPerceptron.
Optionally, saves the resulting plot to an .eps file.
Args:
limit (int, int) -- a tuple containing the range of the grid
save_file (str) -- the name of the file to save the figure in
"""
xs = (np.random.uniform(*limit) for _ in range(10000))
ys = (np.random.uniform(*limit) for _ in range(10000))
# Create an ErosionDilationPerceptron and compute its outputs
slmp = pymp.ErosionDilationPerceptron((-0.81, -0.2), (0.4, 0.31))
outputs = [
(slmp.response_at((x, y)), x, y) for x, y in zip(xs, ys)
]
green_points = [(x, y) for p, x, y in outputs if p >= 0]
red_points = [(x, y) for p, x, y in outputs if p < 0]
# plot all accepted points
plt.scatter(
*zip(*green_points),
color = 'g'
)
# plot all rejected points
plt.scatter(
*zip(*red_points),
color = 'r'
)
# Adjust the size of the bounding boxes
box = plt.subplot(111).get_position()
plt.subplot(111).set_xlim(list(limit))
plt.subplot(111).set_ylim(list(limit))
plt.grid()
min_w = '$w_x = %.2f, w_y = %.2f$' % tuple(slmp.erosion_weights)
max_w = '$m_x = %.2f, m_y = %.2f$' % tuple(slmp.dilation_weights)
plt.title('Decision surface by EDMP with: {0}, {1}'.format(min_w, max_w))
if save_file:
plt.savefig('{0}_edmp.eps'.format(save_file), format='eps')
else:
plt.show()
plt.clf()
def check_morphological_perceptron(limit, save_file = None):
"""
Generates a set of points in the grid [-limit, limit] to demonstrate
the decision surface generated by a morphological perceptron that is
initialized by randomly chosen parameters.
Optionally, saves the resulting figure to an .eps file.
Args:
limit (int, int) -- a tuple containing the limits of the grid
save_file (str) -- the name of the file to save the figure in
"""
# Choose 10,000 points randomly
xs = (np.random.uniform(*limit) for _ in range(10000))
ys = (np.random.uniform(*limit) for _ in range(10000))
# Create a randomly initialized morphological perceptron
slmp = pymp.MorphologicalPerceptron.random_init(2, limit)
# Calculate all outputs of the perceptron
outputs = [
(slmp.output_at(point), point[0], point[1]) \
for point in zip(xs, ys)
]
# Get the accepted and reject points
green_points = list((x, y) for p, x, y in outputs if p > 0)
red_points = list((x, y) for p, x, y in outputs if p <= 0)
# plot all red points
plt.scatter(
*zip(*red_points),
color = 'r'
)
# plot all green points
plt.scatter(
*zip(*green_points),
color = 'g'
)
# Adjust the size of the bounding boxes
box = plt.subplot(111).get_position()
plt.subplot(111).set_xlim(list(limit))
plt.subplot(111).set_ylim(list(limit))
min_w = '$w_x = %.2f, w_y = %.2f$' % tuple(slmp.min_weights)
max_w = '$m_x = %.2f, m_y = %.2f$' % tuple(slmp.max_weights)
plt.title('Decision surface by SLMP with: {0}, {1}'.format(min_w, max_w))
plt.grid()
if save_file:
plt.savefig('{0}_slmp.eps'.format(save_file), format='eps')
else:
plt.show()
plt.clf()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = 'Plots the decision surface for 2D SLMPs with \
randomly initialized weights.'
)
# optional range for the grid coordinates
parser.add_argument('-r', '--range',
help = 'The range of the grid ([-range, range] for each dimension)',
required = False,
type = int,
default = 1
)
# optional output file for the plot
parser.add_argument('-o', '--output',
help = 'The name of file to save the plot in EPS format',
required = False,
type = str
)
args = vars(parser.parse_args())
# set grid point limits
limit = (-args['range'], args['range'])
rc('text', usetex=True)
rc('font', family='serif')
check_morphological_perceptron(limit, args['output'])
plt.clf()
check_erosion_dilation_mp(limit, args['output'])
| gpl-3.0 |
adammenges/statsmodels | examples/python/kernel_density.py | 33 | 1805 |
## Kernel Density Estimation
import numpy as np
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.distributions.mixture_rvs import mixture_rvs
##### A univariate example.
np.random.seed(12345)
obs_dist1 = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
kde = sm.nonparametric.KDEUnivariate(obs_dist1)
kde.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.hist(obs_dist1, bins=50, normed=True, color='red')
ax.plot(kde.support, kde.density, lw=2, color='black');
obs_dist2 = mixture_rvs([.25,.75], size=10000, dist=[stats.norm, stats.beta],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=1,args=(1,.5))))
kde2 = sm.nonparametric.KDEUnivariate(obs_dist2)
kde2.fit()
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.hist(obs_dist2, bins=50, normed=True, color='red')
ax.plot(kde2.support, kde2.density, lw=2, color='black');
# The fitted KDE object is a full non-parametric distribution.
obs_dist3 = mixture_rvs([.25,.75], size=1000, dist=[stats.norm, stats.norm],
kwargs = (dict(loc=-1,scale=.5),dict(loc=1,scale=.5)))
kde3 = sm.nonparametric.KDEUnivariate(obs_dist3)
kde3.fit()
kde3.entropy
kde3.evaluate(-1)
##### CDF
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde3.support, kde3.cdf);
##### Cumulative Hazard Function
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde3.support, kde3.cumhazard);
##### Inverse CDF
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde3.support, kde3.icdf);
##### Survival Function
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.plot(kde3.support, kde3.sf);
| bsd-3-clause |
gnuradio/gnuradio | gr-fec/python/fec/polar/decoder.py | 6 | 9766 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
import numpy as np
from .common import PolarCommon
# for dev
from .encoder import PolarEncoder
from matplotlib import pyplot as plt
class PolarDecoder(PolarCommon):
def __init__(self, n, k, frozen_bit_position, frozenbits=None):
PolarCommon.__init__(self, n, k, frozen_bit_position, frozenbits)
self.error_probability = 0.1 # this is kind of a dummy value. usually chosen individually.
self.lrs = ((1 - self.error_probability) / self.error_probability, self.error_probability / (1 - self.error_probability))
self.llrs = np.log(self.lrs)
def _llr_bit(self, bit):
return self.llrs[bit]
def _llr_odd(self, la, lb):
# this functions uses the min-sum approximation
# exact formula: np.log((np.exp(la + lb) + 1) / (np.exp(la) + np.exp(lb)))
return np.sign(la) * np.sign(lb) * np.minimum(np.abs(la), np.abs(lb))
_f_vals = np.array((1.0, -1.0), dtype=float)
def _llr_even(self, la, lb, f):
return (la * self._f_vals[f]) + lb
def _llr_bit_decision(self, llr):
if llr < 0.0:
ui = int(1)
else:
ui = int(0)
return ui
def _retrieve_bit_from_llr(self, lr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._llr_bit_decision(lr)
return ui
def _lr_bit(self, bit):
return self.lrs[bit]
def _lr_odd(self, la, lb):
# la is upper branch and lb is lower branch
return (la * lb + 1) / (la + lb)
def _lr_even(self, la, lb, f):
# la is upper branch and lb is lower branch, f is last decoded bit.
return (la ** (1 - (2 * f))) * lb
def _lr_bit_decision(self, lr):
if lr < 1:
return int(1)
return int(0)
def _get_even_indices_values(self, u_hat):
# looks like overkill for some indexing, but zero and one based indexing mix-up gives you haedaches.
return u_hat[1::2]
def _get_odd_indices_values(self, u_hat):
return u_hat[0::2]
def _calculate_lrs(self, y, u):
ue = self._get_even_indices_values(u)
uo = self._get_odd_indices_values(u)
ya = y[0:y.size//2]
yb = y[(y.size//2):]
la = self._lr_decision_element(ya, (ue + uo) % 2)
lb = self._lr_decision_element(yb, ue)
return la, lb
def _lr_decision_element(self, y, u):
if y.size == 1:
return self._llr_bit(y[0])
if u.size % 2 == 0: # use odd branch formula
la, lb = self._calculate_lrs(y, u)
return self._llr_odd(la, lb)
else:
ui = u[-1]
la, lb = self._calculate_lrs(y, u[0:-1])
return self._llr_even(la, lb, ui)
def _retrieve_bit_from_lr(self, lr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._lr_bit_decision(lr)
return ui
def _lr_sc_decoder(self, y):
# this is the standard SC decoder as derived from the formulas. It sticks to natural bit order.
u = np.array([], dtype=int)
for i in range(y.size):
lr = self._lr_decision_element(y, u)
ui = self._retrieve_bit_from_llr(lr, i)
u = np.append(u, ui)
return u
def _llr_retrieve_bit(self, llr, pos):
f_index = np.where(self.frozen_bit_position == pos)[0]
if not f_index.size == 0:
ui = self.frozenbits[f_index][0]
else:
ui = self._llr_bit_decision(llr)
return ui
def _butterfly_decode_bits(self, pos, graph, u):
bit_num = u.size
llr = graph[pos][0]
ui = self._llr_retrieve_bit(llr, bit_num)
# ui = self._llr_bit_decision(llr)
u = np.append(u, ui)
lower_right = pos + (self.N // 2)
la = graph[pos][1]
lb = graph[lower_right][1]
graph[lower_right][0] = self._llr_even(la, lb, ui)
llr = graph[lower_right][0]
# ui = self._llr_bit_decision(llr)
ui = self._llr_retrieve_bit(llr, u.size)
u = np.append(u, ui)
return graph, u
def _lr_sc_decoder_efficient(self, y):
graph = np.full((self.N, self.power + 1), np.NaN, dtype=float)
for i in range(self.N):
graph[i][self.power] = self._llr_bit(y[i])
decode_order = self._vector_bit_reversed(np.arange(self.N), self.power)
decode_order = np.delete(decode_order, np.where(decode_order >= self.N // 2))
u = np.array([], dtype=int)
for pos in decode_order:
graph = self._butterfly(pos, 0, graph, u)
graph, u = self._butterfly_decode_bits(pos, graph, u)
return u
def _stop_propagation(self, bf_entry_row, stage):
# calculate break condition
modulus = 2 ** (self.power - stage)
# stage_size = self.N // (2 ** stage)
# half_stage_size = stage_size // 2
half_stage_size = self.N // (2 ** (stage + 1))
stage_pos = bf_entry_row % modulus
return stage_pos >= half_stage_size
def _butterfly(self, bf_entry_row, stage, graph, u):
if not self.power > stage:
return graph
if self._stop_propagation(bf_entry_row, stage):
upper_right = bf_entry_row - self.N // (2 ** (stage + 1))
la = graph[upper_right][stage + 1]
lb = graph[bf_entry_row][stage + 1]
ui = u[-1]
graph[bf_entry_row][stage] = self._llr_even(la, lb, ui)
return graph
# activate right side butterflies
u_even = self._get_even_indices_values(u)
u_odd = self._get_odd_indices_values(u)
graph = self._butterfly(bf_entry_row, stage + 1, graph, (u_even + u_odd) % 2)
lower_right = bf_entry_row + self.N // (2 ** (stage + 1))
graph = self._butterfly(lower_right, stage + 1, graph, u_even)
la = graph[bf_entry_row][stage + 1]
lb = graph[lower_right][stage + 1]
graph[bf_entry_row][stage] = self._llr_odd(la, lb)
return graph
def decode(self, data, is_packed=False):
if not len(data) == self.N:
raise ValueError("len(data)={0} is not equal to n={1}!".format(len(data), self.N))
if is_packed:
data = np.unpackbits(data)
data = self._lr_sc_decoder_efficient(data)
data = self._extract_info_bits(data)
if is_packed:
data = np.packbits(data)
return data
def _extract_info_bits_reversed(self, y):
info_bit_positions_reversed = self._vector_bit_reversed(self.info_bit_position, self.power)
return y[info_bit_positions_reversed]
def decode_systematic(self, data):
if not len(data) == self.N:
raise ValueError("len(data)={0} is not equal to n={1}!".format(len(data), self.N))
# data = self._reverse_bits(data)
data = self._lr_sc_decoder_efficient(data)
data = self._encode_natural_order(data)
data = self._extract_info_bits_reversed(data)
return data
def test_systematic_decoder():
ntests = 1000
n = 16
k = 8
frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
encoder = PolarEncoder(n, k, frozenbitposition)
decoder = PolarDecoder(n, k, frozenbitposition)
for i in range(ntests):
bits = np.random.randint(2, size=k)
y = encoder.encode_systematic(bits)
u_hat = decoder.decode_systematic(y)
assert (bits == u_hat).all()
def test_reverse_enc_dec():
n = 16
k = 8
frozenbits = np.zeros(n - k)
frozenbitposition = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
bits = np.random.randint(2, size=k)
encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
encoded = encoder.encode(bits)
print('encoded:', encoded)
rx = decoder.decode(encoded)
print('bits:', bits)
print('rx :', rx)
print((bits == rx).all())
def compare_decoder_impls():
print('\nthis is decoder test')
n = 8
k = 4
frozenbits = np.zeros(n - k)
# frozenbitposition16 = np.array((0, 1, 2, 3, 4, 5, 8, 9), dtype=int)
frozenbitposition = np.array((0, 1, 2, 4), dtype=int)
bits = np.random.randint(2, size=k)
print('bits:', bits)
encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
encoded = encoder.encode(bits)
print('encoded:', encoded)
rx_st = decoder._lr_sc_decoder(encoded)
rx_eff = decoder._lr_sc_decoder_efficient(encoded)
print('standard :', rx_st)
print('efficient:', rx_eff)
print((rx_st == rx_eff).all())
def main():
# power = 3
# n = 2 ** power
# k = 4
# frozenbits = np.zeros(n - k, dtype=int)
# frozenbitposition = np.array((0, 1, 2, 4), dtype=int)
# frozenbitposition4 = np.array((0, 1), dtype=int)
#
#
# encoder = PolarEncoder(n, k, frozenbitposition, frozenbits)
# decoder = PolarDecoder(n, k, frozenbitposition, frozenbits)
#
# bits = np.ones(k, dtype=int)
# print("bits: ", bits)
# evec = encoder.encode(bits)
# print("froz: ", encoder._insert_frozen_bits(bits))
# print("evec: ", evec)
#
# evec[1] = 0
# deced = decoder._lr_sc_decoder(evec)
# print('SC decoded:', deced)
#
# test_reverse_enc_dec()
# compare_decoder_impls()
test_systematic_decoder()
if __name__ == '__main__':
main()
| gpl-3.0 |
fatiando/fatiando | cookbook/seismic_wavefd_elastic_sh.py | 9 | 2247 | """
Seismic: 2D finite difference simulation of elastic SH wave propagation
"""
import numpy as np
from matplotlib import animation
from fatiando import gridder
from fatiando.seismic import wavefd
from fatiando.vis import mpl
# Set the parameters of the finite difference grid
shape = (150, 150)
area = [0, 60000, 0, 60000]
# Make a density and S wave velocity model
density = 2400 * np.ones(shape)
velocity = 3700
mu = wavefd.lame_mu(velocity, density)
# Make a wave source from a mexican hat wavelet
sources = [wavefd.MexHatSource(30000, 15000, area, shape, 100, 1, delay=2)]
# Get the iterator for the simulation
dt = wavefd.maxdt(area, shape, velocity)
duration = 20
maxit = int(duration / dt)
stations = [[50000, 0]] # x, z coordinate of the seismometer
snapshot = int(0.5 / dt) # Plot a snapshot of the simulation every 0.5 seconds
simulation = wavefd.elastic_sh(mu, density, area, dt, maxit, sources, stations,
snapshot, padding=50, taper=0.01)
# This part makes an animation using matplotlibs animation API
fig = mpl.figure(figsize=(14, 5))
ax = mpl.subplot(1, 2, 2)
mpl.title('Wavefield')
# Start with everything zero and grab the plot so that it can be updated later
wavefield_plt = mpl.imshow(np.zeros(shape), extent=area, vmin=-10 ** (-5),
vmax=10 ** (-5), cmap=mpl.cm.gray_r)
mpl.points(stations, '^b')
mpl.xlim(area[:2])
mpl.ylim(area[2:][::-1])
mpl.xlabel('x (km)')
mpl.ylabel('z (km)')
mpl.subplot(1, 2, 1)
seismogram_plt, = mpl.plot([], [], '-k')
mpl.xlim(0, duration)
mpl.ylim(-10 ** (-4), 10 ** (-4))
mpl.xlabel('time (s)')
mpl.ylabel('Amplitude')
times = np.linspace(0, duration, maxit)
# Update the plot everytime the simulation yields
def animate(i):
"""
Grab the iteration number, displacment panel and seismograms
"""
t, u, seismograms = simulation.next()
mpl.title('time: %0.1f s' % (times[t]))
wavefield_plt.set_array(u[::-1]) # Revert the z axis so that 0 is top
seismogram_plt.set_data(times[:t + 1], seismograms[0][:t + 1])
return wavefield_plt, seismogram_plt
anim = animation.FuncAnimation(
fig, animate, frames=maxit / snapshot, interval=1)
# anim.save('sh_wave.mp4', fps=10, dpi=200, bitrate=4000)
mpl.show()
| bsd-3-clause |
mblondel/scikit-learn | sklearn/utils/random.py | 19 | 10413 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribtion over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if None != p:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if None != p:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if None != p:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = classes[j].astype(int)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
np.random.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
mdepasca/miniature-adventure | classes_dir/plot/plot_k.py | 1 | 2399 | from matplotlib import pyplot as plt
import numpy as np
# import supernova
# import supernova_fit
# import utilities as util
# import classes
def plot_k(snObj, save=False, filename='plot_k.png'):
lambda_obs = [479.66, 638.26, 776.90, 910.82]
z = 0.10#(snObj.zSpec if snObj.zSpec else snObj.zPhotHost)
lambda_rest = [el/(1+z) for el in lambda_obs]
snrSortIdx = np.argsort(snObj.r.snr)
bandList = snObj.lcsDict.keys()
print snrSortIdx
print [snObj.lcsDict[el].mjd[snrSortIdx[-1]] for el in bandList]
flux = [snObj.lcsDict[el].flux[snrSortIdx[-1]] for el in bandList]
gKcor = np.interp(lambda_obs[0], lambda_rest[0:2], flux[0:2])
fig = plt.figure()
plt.subplots_adjust(top=0.93, right=0.96, left=0.08)
plt.suptitle('Source at z={:<3.2f}'.format(z))
plt.plot([0, lambda_obs[0], lambda_obs[0]], [gKcor, gKcor, 0], figure=fig,
color='black', linestyle='-.')
plt.plot(lambda_obs, flux, color='red', linestyle='-',
marker='o', markeredgecolor='red', markerfacecolor='red',
label='Obs. rest frame')
plt.plot(lambda_rest, flux, color='blue', linestyle='--',
marker='o', markeredgecolor='blue', markerfacecolor='blue',
figure=fig, label='SN rest frame')
plt.scatter(lambda_obs[0], gKcor, marker='o',
edgecolor='black', facecolor='black')
plt.xlim([np.floor(lambda_obs[0]/100.)*100, np.ceil(lambda_obs[-1]/100.)*100])
plt.ylim([min(flux)-2, max(flux)+2])
plt.xlabel('Wavelength [nm]')
plt.ylabel('Flux (at max S/N)')
plt.legend(loc='best', numpoints=2, handlelength=3)
if save:
plt.savefig(filename, dpi=300, format='png')
plt.show()
"""
either get the observation file or get a supernova object...
- get a supernova object (independt on storing format!)
- needs:
.filter's central wavelength
.to check for observations in all bands at the same *integer* mjd
- blueshifts filters to restfram wavelength
- plot same mjd flux measures (either max flux or max SNR)
- plot lines with dots
- plot observer rest frame central wavelength of each filter
- plot shift direction arrow
- title containing source redshift
- axes labels
"""
if __name__ == "__main__":
import utilities as util
snObj = util.get_sn_from_file('train_data/SIMGEN_PUBLIC_DES/DES_SN000017.DAT')
plot_k(snObj, save=True)
| unlicense |
Achuth17/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
joelfrederico/SciSalt | scisalt/matplotlib/addlabel.py | 1 | 1328 | import os as _os
_on_rtd = _os.environ.get('READTHEDOCS', None) == 'True'
if not _on_rtd:
import matplotlib.pyplot as _plt
import logging as _logging
logger = _logging.getLogger(__name__)
def addlabel(ax=None, toplabel=None, xlabel=None, ylabel=None, zlabel=None, clabel=None, cb=None, windowlabel=None, fig=None, axes=None):
"""Adds labels to a plot."""
if (axes is None) and (ax is not None):
axes = ax
if (windowlabel is not None) and (fig is not None):
fig.canvas.set_window_title(windowlabel)
if fig is None:
fig = _plt.gcf()
if fig is not None and axes is None:
axes = fig.get_axes()
if axes == []:
logger.error('No axes found!')
if axes is not None:
if toplabel is not None:
axes.set_title(toplabel)
if xlabel is not None:
axes.set_xlabel(xlabel)
if ylabel is not None:
axes.set_ylabel(ylabel)
if zlabel is not None:
axes.set_zlabel(zlabel)
if (clabel is not None) or (cb is not None):
if (clabel is not None) and (cb is not None):
cb.set_label(clabel)
else:
if clabel is None:
logger.error('Missing colorbar label')
else:
logger.error('Missing colorbar instance')
| mit |
xuewei4d/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 11 | 14166 | import sys
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
import pytest
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from io import StringIO
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_components, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_components, X = _build_sparse_mtx()
prior = 1. / n_components
lda_1 = LatentDirichletAllocation(n_components=n_components,
doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
evaluate_every=1, learning_method='batch',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., evaluate_every=1,
learning_method='online', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., total_samples=100,
random_state=rng)
for i in range(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_method='batch', random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_components = 3
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
X_trans = lda.fit_transform(X)
assert (X_trans > 0.0).any()
assert_array_almost_equal(np.sum(X_trans, axis=1),
np.ones(X_trans.shape[0]))
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_fit_transform(method):
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_components=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_components', LatentDirichletAllocation(n_components=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
with pytest.raises(ValueError, match=regex):
model.fit(X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = np.full((5, 10), -1.)
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
with pytest.raises(ValueError, match=regex):
lda.fit(X)
def test_lda_no_component_error():
# test `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = ("This LatentDirichletAllocation instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this "
"estimator.")
with pytest.raises(NotFittedError, match=regex):
lda.perplexity(X)
@if_safe_multiprocessing_with_blas
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_multi_jobs(method):
n_components, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_method=method,
evaluate_every=1, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
with pytest.raises(ValueError, match=r'Number of samples'):
lda._perplexity_precomp_distr(X, invalid_n_samples)
# invalid topic number
invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
with pytest.raises(ValueError, match=r'Number of topics'):
lda._perplexity_precomp_distr(X, invalid_n_components)
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_perplexity(method):
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_components, X = _build_sparse_mtx()
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert perp_1 >= perp_2
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert perp_1_subsampling >= perp_2_subsampling
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_score(method):
# Test LDA score for batch training
# score should be higher after each iteration
n_components, X = _build_sparse_mtx()
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert score_2 >= score_1
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10,
random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_fit_perplexity():
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch', random_state=0,
evaluate_every=1)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
def check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=3,
learning_method='batch',
verbose=verbose,
evaluate_every=evaluate_every,
random_state=0)
out = StringIO()
old_out, sys.stdout = sys.stdout, out
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count('\n')
n_perplexity = out.getvalue().count('perplexity')
assert expected_lines == n_lines
assert expected_perplexities == n_perplexity
@pytest.mark.parametrize(
'verbose,evaluate_every,expected_lines,expected_perplexities',
[(False, 1, 0, 0),
(False, 0, 0, 0),
(True, 0, 3, 0),
(True, 1, 3, 3),
(True, 2, 3, 1)])
def test_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities)
| bsd-3-clause |
buguen/pylayers | pylayers/antprop/examples/ex_antenna51.py | 3 | 2027 | from pylayers.antprop.antenna import *
from pylayers.antprop.antvsh import *
import matplotlib.pylab as plt
from numpy import *
import pdb
"""
This test :
1 : loads a measured antenna
2 : applies an electrical delay obtained from data with getdelay method
3 : evaluate the antenna vsh coefficient with a downsampling factor of 2
4 : evaluates the relative error of reconstruction (vsh3) for various values of order l
5 : display the results
"""
filename = 'S1R1.mat'
A = Antenna(filename,'ant/UWBAN/Matfile')
#plot(freq,angle(A.Ftheta[:,maxPowerInd[1],maxPowerInd[2]]*exp(2j*pi*freq.reshape(len(freq))*electricalDelay)))
freq = A.fa.reshape(104,1,1)
delayCandidates = arange(-10,10,0.001)
electricalDelay = A.getdelay(freq,delayCandidates)
disp('Electrical Delay = ' + str(electricalDelay)+' ns')
A.Ftheta = A.Ftheta*exp(2*1j*pi*freq*electricalDelay)
A.Fphi = A.Fphi*exp(2*1j*pi*freq*electricalDelay)
dsf = 2
A = vsh(A,dsf)
tn = []
tet = []
tep = []
te = []
tmse = []
l = 20
A.C.s1tos2(l)
u = np.shape(A.C.Br.s2)
Nf = u[0]
Nk = u[1]
tr = np.arange(2,Nk)
A.C.s2tos3_new(Nk)
UA = np.sum(A.C.Cr.s3*np.conj(A.C.Cr.s3),axis=0)
ua = A.C.Cr.ind3
da ={}
for k in range(Nk):
da[str(ua[k])]=UA[k]
tu = []
errelTha,errelPha,errela = A.errel(20,dsf,typ='s3')
print "a: nok",errela,errelPha,errelTha
for r in tr:
E = A.C.s2tos3_new(r)
errelTh,errelPh,errel = A.errel(20,dsf,typ='s3')
print 'r : ',r,errel,E
tet.append(errelTh)
tep.append(errelPh)
te.append(errel)
#
line1 = plt.plot(array(tr),10*log10(array(tep)),'b')
line2 = plt.plot(array(tr),10*log10(array(tet)),'r')
line3 = plt.plot(array(tr),10*log10(array(te)),'g')
#
plt.xlabel('order l')
plt.ylabel(u'$\epsilon_{rel}$ (dB)',fontsize=18)
plt.title('Evolution of reconstruction relative error wrt order')
plt.legend((u'$\epsilon_{rel}^{\phi}$',u'$\epsilon_{rel}^{\\theta}$',u'$\epsilon_{rel}^{total}$'))
plt.legend((line1,line2,line3),('a','b','c'))
plt.show()
plt.legend(('errel_phi','errel_theta','errel'))
| lgpl-3.0 |
awblocker/cplate | scripts/betas_to_bed.py | 1 | 1878 | """Converts cplate betas to a BED file."""
import argparse
import os
import re
import pandas as pd
COLUMNS = ['chrom', 'start', 'end', 'name', 'score', 'strand']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--summaries', dest='summaries',
help='cplate summaries file')
parser.add_argument('--gene_number', dest='gene_number', default=-1,
type=int, help='row number for gene')
parser.add_argument('--genes', dest='genes', help='gene definition file')
parser.add_argument('--output', dest='output', default='',
help='output BED file')
return parser.parse_args()
def main():
args = parse_args()
summaries = pd.read_table(args.summaries, delimiter=' ')
# Infer gene number if needed.
gene_number = args.gene_number
if gene_number < 0:
gene_number = (
int(re.search(r'gene(\d+)', args.summaries).group(1)) - 1)
# Infer output path if needed.
output = args.output
if output == '':
output = os.path.splitext(args.summaries)[0] + '_beta.bed'
genes = pd.read_csv(args.genes)
gene = genes.iloc[gene_number]
intervals = []
for i, row in summaries.iterrows():
start = i
end = i + 1
interval = {'start': start,
'end': end,
'name': '.',
'score': row['b'],
'strand': '.',
}
intervals.append(interval)
intervals = pd.DataFrame(intervals)
intervals['chrom'] = gene['Chrom']
intervals['start'] = intervals['start'] + gene['Start']
intervals['end'] = intervals['end'] + gene['Start']
intervals = intervals[COLUMNS]
intervals.to_csv(output, sep='\t', header=False, index=False, quoting=False)
if __name__ == '__main__':
main()
| apache-2.0 |
rseubert/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
akionakamura/scikit-learn | sklearn/decomposition/__init__.py | 99 | 1331 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD']
| bsd-3-clause |
sssllliang/edx-analytics-pipeline | edx/analytics/tasks/tests/acceptance/test_internal_reporting_user.py | 2 | 2665 | """
End to end test of the internal reporting user table loading task.
"""
import os
import logging
import datetime
import pandas
from luigi.date_interval import Date
from edx.analytics.tasks.tests.acceptance import AcceptanceTestCase
from edx.analytics.tasks.url import url_path_join
log = logging.getLogger(__name__)
class InternalReportingUserLoadAcceptanceTest(AcceptanceTestCase):
"""End-to-end test of the workflow to load the internal reporting warehouse's user table."""
INPUT_FILE = 'location_by_course_tracking.log'
DATE_INTERVAL = Date(2014, 7, 21)
def setUp(self):
super(InternalReportingUserLoadAcceptanceTest, self).setUp()
# Set up the mock LMS databases.
self.execute_sql_fixture_file('load_auth_user_for_internal_reporting_user.sql')
self.execute_sql_fixture_file('load_auth_userprofile.sql')
# Put up the mock tracking log for user locations.
self.upload_tracking_log(self.INPUT_FILE, datetime.datetime(2014, 7, 21))
def test_internal_reporting_user(self):
"""Tests the workflow for the internal reporting user table, end to end."""
self.task.launch([
'LoadInternalReportingUserToWarehouse',
'--interval', self.DATE_INTERVAL.to_string(),
'--user-country-output', url_path_join(self.test_out, 'user'),
'--n-reduce-tasks', str(self.NUM_REDUCERS),
])
self.validate_output()
def validate_output(self):
"""Validates the output, comparing it to a csv of all the expected output from this workflow."""
with self.vertica.cursor() as cursor:
expected_output_csv = os.path.join(self.data_dir, 'output', 'acceptance_expected_d_user.csv')
expected = pandas.read_csv(expected_output_csv, parse_dates=True)
cursor.execute("SELECT * FROM {schema}.d_user".format(schema=self.vertica.schema_name))
response = cursor.fetchall()
d_user = pandas.DataFrame(response, columns=['user_id', 'user_year_of_birth', 'user_level_of_education',
'user_gender', 'user_email', 'user_username',
'user_account_creation_time',
'user_last_location_country_code'])
try: # A ValueError will be thrown if the column names don't match or the two data frames are not square.
self.assertTrue(all(d_user == expected))
except ValueError:
self.fail("Expected and returned data frames have different shapes or labels.")
| agpl-3.0 |
leonardbinet/Transilien-Api-ETL | api_etl/builder_feature_matrix.py | 2 | 35646 | """Module containing class to build feature matrices for prediction.
There are two kinds of features:
- either features for direct prediction model
- either features for recursive prediction model
Only the first one is used for now.
"""
from os import path, makedirs
import logging
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from api_etl.utils_misc import (
get_paris_local_datetime_now, DateConverter, S3Bucket
)
from api_etl.querier_schedule import DBQuerier
from api_etl.querier_realtime import ResultsSet
from api_etl.settings import __S3_BUCKETS__, __TRAINING_SET_FOLDER_PATH__, __RAW_DAYS_FOLDER_PATH__, __DATA_PATH__
logger = logging.getLogger(__name__)
pd.options.mode.chained_assignment = None
class DayMatrixBuilder:
"""Build features and label matrices from data available from schedule
and from realtime info.
1st step (init): get all information from day (schedule+realtime):
needs day parameter (else set to today).
2nd step: build matrices using only data available at given time: needs
time parameter (else set to time now).
Still "beta" functionality: provide df directly.
"""
def __init__(self, day=None, df=None):
""" Given a day, will query schedule and realtime information to
provide a dataframe containing all stops.
"""
# Arguments validation and parsing
if day:
# will raise error if wrong format
datetime.strptime(day, "%Y%m%d")
self.day = str(day)
else:
dt_today = get_paris_local_datetime_now()
self.day = dt_today.strftime("%Y%m%d")
logger.info("Day considered: %s" % self.day)
if isinstance(df, pd.DataFrame):
self._initial_df = df
self._builder_realtime_request_time = None
logger.info("Dataframe provided for day %s" % self.day)
else:
logger.info("Requesting data for day %s" % self.day)
self.querier = DBQuerier(scheduled_day=self.day)
# Get schedule
self.stops_results = self.querier.stoptimes(on_day=self.day, level=4)
self.serialized_stoptimes = ResultsSet(self.stops_results)
logger.info("Schedule queried.")
# Perform realtime queries
dt_realtime_request = get_paris_local_datetime_now()
self._builder_realtime_request_time = dt_realtime_request\
.strftime("%H:%M:%S")
self.serialized_stoptimes.batch_realtime_query(self.day)
logger.info("RealTime queried.")
# Export flat dict as dataframe
self._initial_df = pd\
.DataFrame(self.serialized_stoptimes.get_flat_dicts())
logger.info("Initial dataframe created.")
# Datetime considered as now
self.paris_datetime_now = get_paris_local_datetime_now()
self._clean_initial_df()
logger.info("Initial dataframe cleaned.")
self._compute_initial_dates()
logger.info("Initial dataframe calculations computed.")
def _clean_initial_df(self):
""" Set Nan values, and convert necessary columns as float.
"""
# Replace Unknown by Nan
self._initial_df.replace("Unknown", np.nan, inplace=True)
# Convert to numeric
cols_to_num = ["StopTime_stop_sequence", "RealTime_data_freshness"]
for col in cols_to_num:
self._initial_df.loc[:, col] = pd\
.to_numeric(self._initial_df.loc[:, col], errors="coerce")
def _compute_initial_dates(self):
""" Adds following columns:
- D_business_day: bool
- D_stop_special_day: scheduled_day str, day in special date (25h)
- D_total_sequence: int: number of stops scheduled per trip
- D_stop_scheduled_datetime: datetime of scheduled stoptime
- D_trip_passed_scheduled_stop: bool
"""
# Detect if working day
self._initial_df.loc[:, "D_business_day"] = bool(
len(pd.bdate_range(self.day, self.day)))
# Write stoptime_day
self._initial_df.loc[:, "D_stop_special_day"] = self.day
# Scheduled stop datetime
self._initial_df.loc[:, "D_stop_scheduled_datetime"] = self._initial_df\
.StopTime_departure_time\
.apply(lambda x: DateConverter(
special_time=x,
special_date=self.day,
force_regular_date=True
).dt
)
# Has really passed schedule
self._initial_df.loc[:, "D_trip_passed_scheduled_stop"] = self._initial_df.D_stop_scheduled_datetime\
.apply(lambda x:
(self.paris_datetime_now - x).total_seconds() >= 0
)
# Observed stop datetime
self._initial_df.loc[:, "D_stop_observed_datetime"] = self\
._initial_df[self._initial_df.RealTime_data_freshness.notnull()]\
.apply(lambda x: DateConverter(
special_time=x.RealTime_expected_passage_time,
special_date=x.RealTime_expected_passage_day
).dt,
axis=1
)
self._initial_df.loc[:, "D_trip_time_to_observed_stop"] = self\
._initial_df[self._initial_df.D_stop_observed_datetime.notnull()]\
.D_stop_observed_datetime\
.apply(lambda x:
(self.paris_datetime_now - x).total_seconds()
)
# Has really passed observed stop
self._initial_df.loc[:, "D_trip_passed_observed_stop"] = self\
._initial_df[self._initial_df.D_stop_observed_datetime.notnull()]\
.D_trip_time_to_observed_stop\
.apply(lambda x: (x >= 0))
# Trip delay
self._initial_df.loc[:, "D_trip_delay"] = self\
._initial_df[self._initial_df.RealTime_data_freshness.notnull()]\
.apply(
lambda x:
(x["D_stop_observed_datetime"] -
x["D_stop_scheduled_datetime"])
.total_seconds(),
axis=1
)
# Trips total number of stops
trips_total_number_stations = self._initial_df\
.groupby("Trip_trip_id")["Stop_stop_id"].count()
trips_total_number_stations.name = "D_trip_number_of_stops"
self._initial_df = self._initial_df\
.join(trips_total_number_stations, on="Trip_trip_id")
def stats(self):
message = """
SUMMARY FOR DAY %(day)s: based on information available and requested
at time %(request_time)s, and trips passage being evaluated given time
%(date_now)s
TRIPS
Number of trips today: %(trips_today)s
STOPTIMES
Number of stop times that day: %(stoptimes_today)s
- Passed:
- scheduled: %(stoptimes_passed)s
- observed: %(stoptimes_passed_observed)s
- Not passed yet:
- scheduled: %(stoptimes_not_passed)s
- observed (predictions on boards) %(stoptimes_not_passed_observed)s
"""
self.summary = {
"day": self.day,
"request_time": self._builder_realtime_request_time,
"date_now": self.paris_datetime_now,
"trips_today": len(self._initial_df.Trip_trip_id.unique()),
"stoptimes_today": self._initial_df.Trip_trip_id.count(),
"stoptimes_passed": self._initial_df
.D_trip_passed_scheduled_stop.sum(),
"stoptimes_passed_observed": self._initial_df.
D_trip_passed_observed_stop.sum(),
"stoptimes_not_passed": (~self._initial_df.D_trip_passed_scheduled_stop).sum(),
"stoptimes_not_passed_observed":
(self._initial_df.D_trip_passed_observed_stop == False).sum(),
}
print(message % self.summary)
def missing_data_per(self, per="Route_route_short_name"):
# per can be also "Stop_stop_id", "Route_route_short_name"
md = self._initial_df.copy()
md.loc[:, "observed"] = md\
.loc[:, "RealTime_day_train_num"]\
.notnull().apply(int)
group = md.groupby(per)["observed"]
agg_observed = group.sum()
agg_scheduled = group.count()
agg_ratio = group.mean()
agg = pd.concat([agg_observed, agg_scheduled, agg_ratio], axis=1)
agg.columns = ["Observed", "Scheduled", "Ratio"]
return agg
class DirectPredictionMatrix(DayMatrixBuilder):
# CONFIGURATION
# Number of past seconds considered for station median delay
_secs = 1200
# Features columns
_feature_cols = [
"Route_route_short_name",
"TS_last_observed_delay",
"TS_line_station_median_delay",
"TS_line_median_delay",
"Trip_direction_id",
"TS_sequence_diff",
"TS_stations_scheduled_trip_time",
"TS_rolling_trips_on_line",
"RealTime_miss",
"D_business_day"
]
# Core identification columns
_id_cols = [
"TS_matrix_datetime",
"Route_route_short_name",
"RealTime_miss",
"Trip_trip_id",
"Stop_stop_id",
"TS_sequence_diff",
"TS_stations_scheduled_trip_time",
]
# Label columns
_label_cols = ["label", "label_ev"]
# Scoring columns
_scoring_cols = ["S_naive_pred_mae", "S_naive_pred_mse"]
# Prediction columns
_prediction_cols = ["P_api_pred", "P_api_pred_ev", "P_naive_pred"]
# Other useful columns
_other_useful_cols = [
"StopTime_departure_time",
"StopTime_stop_sequence",
"Stop_stop_name",
"RealTime_expected_passage_time",
"RealTime_data_freshness",
]
# For time debugging:
_time_debug_cols = [
"StopTime_departure_time", "RealTime_expected_passage_time",
'D_stop_special_day', 'D_stop_scheduled_datetime',
'D_trip_passed_scheduled_stop', 'D_stop_observed_datetime',
'D_trip_time_to_observed_stop', 'D_trip_passed_observed_stop',
'D_trip_delay', 'TS_matrix_datetime',
'TS_trip_passed_scheduled_stop', 'TS_observed_vs_matrix_datetime',
'TS_trip_passed_observed_stop', 'TS_observed_delay',
'TS_expected_delay', 'TS_trip_status'
]
def __init__(self, day=None, df=None):
DayMatrixBuilder.__init__(self, day=day, df=df)
self._state_at_time_computed = False
def direct_compute_for_time(self, time="12:00:00"):
"""Given the data obtained from schedule and realtime, this method will
compute network state at a given time, and provide prediction and label
matrices.
:param time:
"""
# Parameters parsing
full_str_dt = "%s%s" % (self.day, time)
# will raise error if wrong format
self.state_at_datetime = datetime\
.strptime(full_str_dt, "%Y%m%d%H:%M:%S")
self.time = time
logger.info(
"Building Matrix for day %s and time %s" % (
self.day, self.time)
)
# Recreate dataframe from initial one (deletes changes)
self.df = self._initial_df.copy()
# Computing
self._compute_trip_state()
logger.info("TripPredictor computed.")
self._trip_level()
logger.info("Trip level computations performed.")
self._line_level()
logger.info("Line level computations performed.")
# Will add labels if information is available
self._compute_labels()
logger.info("Labels assigned.")
self._compute_api_pred()
logger.info("Api and naive predictions assigned.")
self._compute_pred_scores()
logger.info("Naive predictions scored.")
def _compute_trip_state(self):
"""Computes:
- TS_matrix_datetime: datetime
= datetime for which state is computed
- TS_trip_passed_scheduled_stop: Bool
= at matrix datetime, has train passed scheduled stop?
- TS_observed_vs_matrix_datetime: int (seconds)
- TS_trip_passed_observed_stop: Bool
= at matrix datetime, has train passed observed stop?
- TS_observed_delay: int (seconds)
- TS_expected_delay: int (seconds)
"""
self.df.loc[:, "TS_matrix_datetime"] = self.state_at_datetime\
.strftime("%Y%m%d-%H:%M:%S")
# Has passed scheduled stop at state datetime
self.df.loc[:, "TS_trip_passed_scheduled_stop"] = self.df\
.D_stop_scheduled_datetime\
.apply(lambda x:
((self.state_at_datetime - x).total_seconds() >= 0),
)
# Time between matrix datetime (for which we compute the prediction
# features matrix), and stop times observed passages (only for observed
# passages). <0 means passed, >0 means not passed yet at the given time
self.df.loc[:, "TS_observed_vs_matrix_datetime"] = self\
.df[self.df["D_stop_observed_datetime"].notnull()]\
.D_stop_observed_datetime\
.apply(lambda x: (self.state_at_datetime - x).total_seconds())
# Has passed observed stop time at state datetime
self.df.loc[:, "TS_trip_passed_observed_stop"] = self\
.df[self.df["TS_observed_vs_matrix_datetime"]
.notnull()]\
.loc[:, "TS_observed_vs_matrix_datetime"]\
.apply(lambda x: (x >= 0))
# TripState_observed_delay
self.df.loc[:, "TS_observed_delay"] = self\
.df[self.df["TS_trip_passed_observed_stop"] == True]\
.D_trip_delay
# TripState_expected_delay
self.df.loc[:, "TS_expected_delay"] = self\
.df.query("(TS_trip_passed_observed_stop != True) & (RealTime_data_freshness.notnull())")\
.D_trip_delay
self._state_at_time_computed = True
def _trip_level(self):
"""Compute trip level information:
- TS_trip_status: 0<=x<=1: proportion of passed stations at time
- D_total_sequence: number of stops scheduled for this trip
- last_sequence_number: last observed stop sequence for this trip at
time
- last_observed_delay
"""
# Trips total number of stops
trips_total_number_stations = self.df\
.groupby("Trip_trip_id")["Stop_stop_id"].count()
# already added to day matrix
# Trips status at time
trips_number_passed_stations = self.df\
.groupby("Trip_trip_id")["TS_trip_passed_scheduled_stop"].sum()
trips_status = trips_number_passed_stations \
/ trips_total_number_stations
trips_status.name = "TS_trip_status"
self.trips_status = trips_status
self.df = self.df.join(trips_status, on="Trip_trip_id")
# Trips last observed stop_sequence
last_sequence_number = self\
.df.query("(TS_trip_status < 1) & (TS_trip_status > 0) & (TS_trip_passed_observed_stop == True)")\
.groupby("Trip_trip_id")["StopTime_stop_sequence"].max()
last_sequence_number.name = "TS_last_sequence_number"
self.df = self.df.join(last_sequence_number, on="Trip_trip_id")
# Compute number of stops between last observed station and predicted
# station.
self.df.loc[:, "TS_sequence_diff"] = self.df.StopTime_stop_sequence - \
self.df.loc[:, "TS_last_sequence_number"]
# Trips last observed delay
last_observed_delay = self.df\
.query("TS_last_sequence_number==StopTime_stop_sequence")\
.loc[:, ["Trip_trip_id", "TS_observed_delay"]]
last_observed_delay.set_index("Trip_trip_id", inplace=True)
last_observed_delay.columns = ["TS_last_observed_delay"]
self.df = self.df.join(last_observed_delay, on="Trip_trip_id")
# Trips last observed scheduled departure time
# useful to know how much time was scheduled between stations
last_observed_scheduled_dep_time = self.df\
.query("TS_last_sequence_number==StopTime_stop_sequence")\
.loc[:, ["Trip_trip_id", "StopTime_departure_time"]]
last_observed_scheduled_dep_time\
.set_index("Trip_trip_id", inplace=True)
last_observed_scheduled_dep_time.columns = [
"TS_last_observed_scheduled_dep_time"]
self.df = self.df\
.join(last_observed_scheduled_dep_time, on="Trip_trip_id")
# Compute number of seconds between last observed passed trip scheduled
# departure time, and departure time of predited station
self.df.loc[:, "TS_stations_scheduled_trip_time"] = self.df\
.query("TS_last_observed_scheduled_dep_time.notnull()")\
.apply(lambda x:
DateConverter(dt=x["D_stop_scheduled_datetime"])
.compute_delay_from(
special_date=self.day,
special_time=x["TS_last_observed_scheduled_dep_time"],
force_regular_date=True
),
axis=1
)
def _line_level(self):
""" Computes line level information:
- median delay on line on last n seconds
- median delay on line station on last n seconds
- number of currently rolling trips on line
Requires time to now (_add_time_to_now_col).
"""
# Compute delays on last n seconds (defined in init self._secs)
# Line aggregation
line_median_delay = self.df\
.query("(TS_observed_vs_matrix_datetime<%s) & (TS_observed_vs_matrix_datetime>=0) " % self._secs)\
.groupby("Route_route_short_name")\
.TS_observed_delay.median()
line_median_delay.name = "TS_line_median_delay"
self.df = self.df\
.join(line_median_delay, on="Route_route_short_name")
self.line_median_delay = line_median_delay
# Line and station aggregation
# same station can have different values given on which lines it
# is located.
line_station_median_delay = self.df\
.query("(TS_observed_vs_matrix_datetime < %s) & (TS_observed_vs_matrix_datetime>=0) " % self._secs)\
.groupby(["Route_route_short_name", "Stop_stop_id"])\
.TS_observed_delay.median()
line_station_median_delay.name = "TS_line_station_median_delay"
self.df = self.df\
.join(line_station_median_delay, on=["Route_route_short_name", "Stop_stop_id"])
self.line_station_median_delay = line_station_median_delay
# Number of currently rolling trips
rolling_trips_on_line = self\
.df.query("TS_trip_status>0 & TS_trip_status<1")\
.groupby("Route_route_short_name")\
.Trip_trip_id\
.count()
rolling_trips_on_line.name = "TS_rolling_trips_on_line"
self.df = self.df\
.join(rolling_trips_on_line, on="Route_route_short_name")
self.rolling_trips_on_line = rolling_trips_on_line
def _compute_labels(self):
"""Two main logics:
- either retroactive: then TripState_expected_delay is real one: label.
- either realtime (not retroactive): then we don't have real label, but
we have a api prediction.
Retroactive:
Adds two columns:
- label: observed delay at stop: real one.
- label_ev: observed delay evolution (difference between observed
delay predicted stop, and delay at last observed stop)
Not retroactive: realtime:
Adds two columns:
- P_api_pred: predicted delay from api.
- P_api_pred_ev: predicted evolution (from api) of delay.
"""
# if stop time really occured, then expected delay (extracted from api)
# is real one
self.df.loc[:, "label"] = self.df\
.query("D_trip_passed_observed_stop==True")\
.TS_expected_delay
# Evolution of delay between last observed station and predicted
# station
self.df.loc[:, "label_ev"] = self.df\
.query("D_trip_passed_observed_stop == True")\
.apply(lambda x: x.label - x["TS_last_observed_delay"], axis=1)
def _compute_api_pred(self):
"""This method provides two predictions if possible:
- naive pred: delay translation (last observed delay)
- api prediction
"""
# if not passed: it is the api-prediction
self.df.loc[:, "P_api_pred"] = self.df\
.query("D_trip_passed_observed_stop != True")\
.TS_expected_delay
# api delay evolution prediction
self.df.loc[:, "P_api_pred_ev"] = self.df\
.query("D_trip_passed_observed_stop != True")\
.apply(lambda x: x.label - x["TS_last_observed_delay"], axis=1)
self.df.loc[:, "P_naive_pred"] = self.df.loc[
:, "TS_last_observed_delay"]
def _compute_pred_scores(self):
"""
We can compute score only for stoptimes for which we have real
information.
At no point we will be able to have both real information and api pred,
so we only compute score for naive prediction.
NAIVE PREDICTION:
Naive prediction assumes that delay does not evolve:
- evolution of delay = 0
- delay predicted = last_observed_delay
=> error = real_delay - naive_pred
= label - last_observed_delay
= label_ev
Scores for navie prediction for delay can be:
- naive_pred_mae: mean absolute error: |label_ev|
- naive_pred_mse: mean square error: (label_ev)**2
"""
self.df.loc[:, "S_naive_pred_mae"] = self.df["label_ev"].abs()
self.df.loc[:, "S_naive_pred_mse"] = self.df["label_ev"]**2
def stats(self):
DayMatrixBuilder.stats(self)
if not self._state_at_time_computed:
return None
message = """
SUMMARY FOR DAY %(day)s AT TIME %(time)s
TRIPS
Number of trips today: %(trips_today)s
Number of trips currently rolling: %(trips_now)s (these are the trips for which we will try to make predictions)
Number of trips currently rolling for wich we observed at least one stop: %(trips_now_observed)s
STOPTIMES
Number of stop times that day: %(stoptimes_today)s
- Passed:
- scheduled: %(stoptimes_passed)s
- observed: %(stoptimes_passed_observed)s
- Not passed yet:
- scheduled: %(stoptimes_not_passed)s
- observed (predictions on boards) %(stoptimes_not_passed_observed)s
STOPTIMES FOR ROLLING TRIPS
Total number of stops for rolling trips: %(stoptimes_now)s
- Passed: those we will use to make our prediction
- scheduled: %(stoptimes_now_passed)s
- observed: %(stoptimes_now_passed_observed)s
- Not passed yet: those for which we want to make a prediction
- scheduled: %(stoptimes_now_not_passed)s
- already observed on boards (prediction): %(stoptimes_now_not_passed_observed)s
PREDICTIONS
Number of stop times for which we want to make a prediction (not passed yet): %(stoptimes_now_not_passed)s
Number of trips currently rolling for wich we observed at least one stop: %(trips_now_observed)s
Representing %(stoptimes_predictable)s stop times for which we can provide a prediction.
LABELED
Given that retroactive is %(retroactive)s, we have %(stoptimes_predictable_labeled)s labeled to_predict stoptimes for training.
"""
self.summary = {
"day": self.day,
"time": self.time,
"trips_today": len(self.df.Trip_trip_id.unique()),
"trips_now": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1)")
.Trip_trip_id.unique().shape[0],
"trips_now_observed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) & (TS_sequence_diff.notnull())")
.Trip_trip_id.unique().shape[0],
"stoptimes_today": self.df.Trip_trip_id.count(),
"stoptimes_passed": self.df.TS_trip_passed_scheduled_stop.sum(),
"stoptimes_passed_observed": self
.df.TS_trip_passed_observed_stop.sum(),
"stoptimes_not_passed": (~self.df.TS_trip_passed_scheduled_stop).sum(),
"stoptimes_not_passed_observed":
(self.df.TS_trip_passed_observed_stop == False).sum(),
"stoptimes_now": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1)")
.Trip_trip_id.count(),
"stoptimes_now_passed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_scheduled_stop==True)")
.Trip_trip_id.count(),
"stoptimes_now_passed_observed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_observed_stop==True)")
.Trip_trip_id.count(),
"stoptimes_now_not_passed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_scheduled_stop==False)")
.Trip_trip_id.count(),
"stoptimes_now_not_passed_observed": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_observed_stop==False)")
.Trip_trip_id.count(),
"stoptimes_predictable": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_scheduled_stop==False) &"
" (TS_sequence_diff.notnull())")
.Trip_trip_id.count(),
"stoptimes_predictable_labeled": self.df
.query("(TS_trip_status > 0) & (TS_trip_status < 1) &(TS_trip_passed_scheduled_stop==False) &"
" (TS_sequence_diff.notnull()) &(label.notnull())")
.Trip_trip_id.count(),
}
print(message % self.summary)
def get_predictable(self,
all_features_required=True,
labeled_only=True,
col_filter_level=2,
split_datasets=False,
set_index=True,
provided_df=None):
"""Return to_predict stop times.
:param all_features_required:
:param labeled_only:
:param col_filter_level:
:param split_datasets:
:param set_index:
:param provided_df:
"""
assert self._state_at_time_computed
if isinstance(provided_df, pd.DataFrame):
rdf = provided_df
else:
rdf = self.df
# Filter running trips, stations not passed yet
# Basic Conditions:
# - trip_status stricly between 0 and 1,
# - has not passed yet schedule (not True)
# - has not passed yet realtime (not True, it can be Nan or False)
rdf = rdf.query(
"TS_trip_status < 1 & TS_trip_status > 0 & TS_trip_passed_scheduled_stop !=\
True & TS_trip_passed_observed_stop != True")
if all_features_required:
# Only elements that have all features
for feature in self._feature_cols:
rdf = rdf.query("%s.notnull()" % feature)
if labeled_only:
rdf = rdf.query("label.notnull()")
if set_index:
rdf = self._df_set_index(rdf)
if col_filter_level:
# no filter, all columns
rdf = self._df_filter_cols(rdf, col_filter_level=col_filter_level)
if split_datasets:
# return dict
rdf = self._split_datasets(rdf)
logger.info("Predictable with labeled_only=%s, has a total of %s rows." % (labeled_only, len(rdf))
)
return rdf
def _df_filter_cols(self, rdf, col_filter_level):
# We need at least: index, features, and label
filtered_cols = self._feature_cols\
+ self._id_cols\
+ self._label_cols\
+ self._prediction_cols\
+ self._scoring_cols
if col_filter_level == 2:
# high filter: only necessary fields
return rdf[filtered_cols]
elif col_filter_level == 1:
# medium filter: add some useful cols
filtered_cols += self._other_useful_cols
return rdf[filtered_cols]
else:
raise ValueError("col_filter_level must be 0, 1 or 2")
def _df_set_index(self, rdf):
# copy columns so that it is available as value or index
# value columns are then filtered
assert isinstance(rdf, pd.DataFrame)
index_suffix = "_ix"
rdf.reset_index()
for col in self._id_cols:
rdf[col + index_suffix] = rdf[col]
new_ix = list(map(lambda x: x + index_suffix, self._id_cols))
rdf.set_index(new_ix, inplace=True)
return rdf
def _split_datasets(self, rdf):
res = {
"X": rdf[self._feature_cols],
"y_real": rdf[self._label_cols],
"y_pred": rdf[self._prediction_cols],
"y_score": rdf[self._scoring_cols]
}
return res
def compute_multiple_times_of_day(self, begin="00:00:00", end="23:59:00", min_diff=60, flush_former=True, **kwargs):
"""Compute dataframes for different times of day.
Default: begins at 00:00:00 and ends at 23:59:00 with a step of one
hour.
:param end:
:param min_diff:
:param flush_former:
:param begin:
"""
assert isinstance(min_diff, int)
diff = timedelta(minutes=min_diff)
# will raise error if wrong format
begin_dt = datetime.strptime(begin, "%H:%M:%S")
end_dt = datetime.strptime(end, "%H:%M:%S")
if flush_former:
self._flush_result_concat()
step_dt = begin_dt
while (end_dt >= step_dt):
step = step_dt.strftime("%H:%M:%S")
self.direct_compute_for_time(step)
step_df = self.get_predictable(**kwargs)
self._concat_dataframes(step_df)
step_dt += diff
return self.result_concat
def _concat_dataframes(self, df):
assert isinstance(df, pd.DataFrame)
# if no former result df, create empty df
if not hasattr(self, "result_concat"):
self.result_concat = pd.DataFrame()
# concat with previous results
self.result_concat = pd.concat([self.result_concat, df])
def _flush_result_concat(self):
self.result_concat = pd.DataFrame()
class RecursivePredictionMatrix(DayMatrixBuilder):
"""
NOT COMPLETED NOR USED FOR NOW
"""
def __init__(self, day=None, df=None):
DayMatrixBuilder.__init__(self, day=day, df=df)
def compute_all_possibles_sets(self):
"""Given the data obtained from schedule and realtime, this method will
compute data sets for recursive prediction.
Recursive predictions (to the contrary of direct predictions) are
relatively time agnostic. They primarily depend on previous stops.
The elements to be computed are:
- R_trip_previous_station_delay: the train previous stop delay:
-- will only accept previous stop
- R_previous_trip_last_station_delay: the forward train last estimated stop delay: difficult to compute?
-- RS_data_freshness
- R_: make a score of route section blocking potential
"""
self.df = self._initial_df.copy()
self._trip_previous_station()
def _trip_previous_station(self):
self.df.loc[:, "R_previous_station_sequence"] = self.df\
.query("StopTime_stop_sequence>0")\
.StopTime_stop_sequence - 1
previous_station = self.df\
.set_index(["Trip_trip_id", "StopTime_stop_sequence"])\
.loc[:, ["D_trip_delay", "StopTime_departure_time"]]\
.dropna()
self.df = self.df\
.join(
previous_station,
on=["Trip_trip_id", "R_previous_station_sequence"],
how="left", rsuffix="_previous_station"
)
class TripVizBuilder(DayMatrixBuilder):
def __init__(self, day=None, df=None):
DayMatrixBuilder.__init__(self, day=day, df=df)
def annote_for_route_section(self, passes_by_all=None, passes_by_one=None):
"""
Adds a column: stop custom sequence. It represents the station sequence
on the given route, on the given section.
Trip directions will be separated.
Filters trips passing by chosen stations.
To compute custom route sequence, we have to assign to each relevant
stop_id a sequence number.
Ideally we would like to select by stop_name, but they are not unique.
:param passes_by_all:
:param passes_by_one:
"""
pass
class TrainingSetBuilder:
def __init__(self, start, end, tempo=30):
"""
Start and end included.
:param start:
:param end:
:param tempo:
:return:
"""
dti = pd.date_range(start=start, end=end, freq="D")
self.days = dti.map(lambda x: x.strftime("%Y%m%d")).tolist()
assert isinstance(tempo, int)
self.tempo = tempo
self.bucket_name = __S3_BUCKETS__["training-sets"]
self._bucket_provider = S3Bucket(
self.bucket_name,
create_if_absent=True
)
def _create_day_training_set(self, day, save_s3):
mat = DirectPredictionMatrix(day)
mat.compute_multiple_times_of_day(min_diff=self.tempo)
__FULL_TRAINING_SET_FOLDER__ = __TRAINING_SET_FOLDER_PATH__ % self.tempo
if not path.exists(__RAW_DAYS_FOLDER_PATH__):
makedirs(__RAW_DAYS_FOLDER_PATH__)
if not path.exists(__FULL_TRAINING_SET_FOLDER__):
makedirs(__FULL_TRAINING_SET_FOLDER__)
__RAW_FILE_NAME__ = "%s.pickle" % day
__RAW_FILE_PATH__ = path.join(__RAW_DAYS_FOLDER_PATH__, __RAW_FILE_NAME__)
__TRAINING_SET_FILE_NAME__ = "%s.pickle" % day
__TRAINING_SET_FILE_PATH__ = path.join(__FULL_TRAINING_SET_FOLDER__, __TRAINING_SET_FILE_NAME__)
logger.info("Saving data in %s." % __RAW_DAYS_FOLDER_PATH__)
mat._initial_df.to_pickle(__RAW_FILE_PATH__)
mat.result_concat.to_pickle(__TRAINING_SET_FILE_PATH__)
if save_s3:
self._bucket_provider.send_file(
file_local_path=__RAW_FILE_PATH__,
file_remote_path=path.relpath(__RAW_FILE_PATH__, __DATA_PATH__)
)
self._bucket_provider.send_file(
file_local_path=__TRAINING_SET_FILE_PATH__,
file_remote_path=path.relpath(__TRAINING_SET_FILE_PATH__, __DATA_PATH__)
)
def create_training_sets(self, save_s3=True):
for day in self.days:
self._create_day_training_set(
day=day,
save_s3=save_s3
)
# TODO
# A - take into account trip direction when computing delays on line
# DONE: B - handle cases where no realtime information is found
# DONE: C - when retroactive is False, give api prediction as feature
# D - ability to save results
# E - investigate missing values/stations
# F - perform trip_id train_num comparison on RATP lines
# G - improve speed by having an option to make computations only on running
# trains
| mit |
gmatteo/pymatgen | pymatgen/io/abinit/tests/test_abiinspect.py | 5 | 4156 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
import tempfile
from pymatgen.io.abinit.abiinspect import *
from pymatgen.util.testing import PymatgenTest
_test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "test_files", "abinit")
try:
import matplotlib
have_matplotlib = "DISPLAY" in os.environ
except ImportError:
have_matplotlib = False
def ref_file(filename):
return os.path.join(_test_dir, filename)
def ref_files(*filenames):
return list(map(ref_file, filenames))
class YamlTokenizerTest(PymatgenTest):
"""Test YamlTokenizer."""
def test_base(self):
string = """---
none: [~, null]
bool: [true, false, on, off]
int: 42
float: 3.14159
list: [LITE, RES_ACID, SUS_DEXT]
dict: {hp: 13, sp: 5}
...
this is not a YAML document!
and the tokenizer will ignore it
--- !Monster
name: Cave spider
hp: [2,6] # 2d6
ac: 16
attacks: [BITE, HURT]
...
This is not a proper document since it does not start with ---
the end tag below is ignored
...
--- !Monster
name: Dragon
hp: [2,6] # 2d6
ac: 32
attacks: [BITE, HURT]
...
"""
# for i, line in enumerate(string.splitlines()): print(i, line)
fd, filename = tempfile.mkstemp(text=True)
with open(filename, "w") as fh:
fh.write(string)
doc_tags = [None, "!Monster", "!Monster"]
doc_linenos = [1, 13, 23]
with YamlTokenizer(filename) as r:
# Iterate the docs
n = 0
for i, doc in enumerate(r):
n += 1
print("doc", doc)
self.assertTrue(doc.tag == doc_tags[i])
self.assertTrue(doc.lineno == doc_linenos[i])
self.assertTrue(n == len(doc_tags))
# Read all docs present in the file.
r.seek(0)
all_docs = r.all_yaml_docs()
# print(all_docs)
self.assertTrue(len(all_docs) == 3)
# We should be at the begining at the file.
self.assertTrue(all_docs == r.all_yaml_docs())
# Find documents by tag.
r.seek(0)
monster = r.next_doc_with_tag("!Monster")
# print("monster",monster)
self.assertTrue(monster == all_docs[1])
monster = r.next_doc_with_tag("!Monster")
self.assertTrue(monster == all_docs[2])
# this should raise StopIteration
with self.assertRaises(StopIteration):
monster = r.next_doc_with_tag("!Monster")
# os.remove(filename)
class AbinitInpectTest(PymatgenTest):
def test_scfcycle(self):
"""Testing ScfCycle."""
cycle = GroundStateScfCycle.from_file(ref_file("mgb2_scf.abo"))
str(cycle)
cycle.to_string(verbose=2)
assert cycle.num_iterations == 6
last = cycle.last_iteration
assert last["Etot(hartree)"] == -7.1476241568657 and last["vres2"] == 3.879e-08
assert list(cycle["vres2"]) == [
1.769e02,
7.920e-01,
1.570e-01,
4.259e-03,
4.150e-05,
3.879e-08,
]
# TODO: Reactivate
# if have_matplotlib:
# assert cycle.plot(show=False)
# Testing CyclesPlotter.
p = CyclesPlotter()
p.add_label_cycle("mgb2 SCF", cycle)
p.add_label_cycle("same SCF", cycle)
# TODO: Reactivate
# if have_matplotlib:
# assert p.combiplot(show=False)
# p.slideshow()
def test_relaxation(self):
"""Testing Relaxation object."""
relaxation = Relaxation.from_file(ref_file("sic_relax.abo"))
print(relaxation)
assert len(relaxation) == 4
assert relaxation[0]["Etot(hartree)"][-1] == -8.8077409200473
assert relaxation[-1]["Etot(hartree)"][-1] == -8.8234906607147
for scf_step in relaxation:
print(scf_step.num_iterations)
# TODO: Reactivate
# if have_matplotlib:
# relaxation.plot(show=False)
# relaxation.slideshow(show=False)
| mit |
aymeric-spiga/planetoplot | bin/pp.py | 1 | 7891 | #! /usr/bin/env python
##############################################
## A MINIMAL PP.PY SCRIPT USING PPCLASS.PY ##
## Author: A. Spiga 03/2013 ##
##############################################
from optparse import OptionParser ### TBR by argparse
from ppclass import pp, inspect
import ppplot
import sys
##############################################
# NB: this is a convenient command-line script
# ... but ppclass is more versatile
# ... than what is proposed here
# ... e.g. differences between files,
# ... complex operations,
# ... see sample scripts
######################################
# define parser with version and usage
######################################
parser = OptionParser()
parser.version = \
'''**************************************************
******** PLANETOPLOT (for help: pp.py -h) ********
**************************************************'''
parser.usage = \
'''pp.py [options] netCDF file(s)
(NB: no options --> simple inspection of variables and dimensions in netCDF files)
-------------------
PLANETOPLOT
--> command line tool to make nice & quick plots from netCDF files
--> based on python + numpy + scipy + matplotlib + basemap + netCDF4
--> Author: A. Spiga (LMD/UPMC) [email protected]
-------------------'''
########################################
# set options for the pp.py command line
########################################
parser.add_option('--verbose',action='store_true',dest='verbose',default=False,help='make the program verbose')
# field --> lower case
parser.add_option('-v','--var',action='append',dest='var',type="string",default=None,help="'variable' or ['var1','var2',etc]")
parser.add_option('-x','--lon',action='append',dest='x',type="string",default=None,help="x axis value. one value; or val1,val2 (computations)")
parser.add_option('-y','--lat',action='append',dest='y',type="string",default=None,help="y axis value. one value; or val1,val2 (computations)")
parser.add_option('-z','--vert',action='append',dest='z',type="string",default=None,help="z axis value. one value; or val1,val2 (computations)")
parser.add_option('-t','--time',action='append',dest='t',type="string",default=None,help="t axis value. one value; or val1,val2 (computations)")
parser.add_option('-u','--compute',action='store',dest='compute',type="string",default="mean",help="computation: mean, min, max, meanarea")
parser.add_option('-c','--contour',action='store',dest='contour',type="string",default=None,help="one 'variable' for contour")
parser.add_option('-i','--vecx',action='store',dest='vecx',type="string",default=None,help="one 'variable' for wind vector x component")
parser.add_option('-j','--vecy',action='store',dest='vecy',type="string",default=None,help="one 'variable' for wind vector y component")
parser.add_option('-m','--mult',action='store',dest='mult',type="float",default=None,help="multiplicative factor on field")
parser.add_option('--pow',action='store',dest='pow',type="float",default=None,help="exponent power on field")
parser.add_option('-a','--add',action='store',dest='add',type="float",default=None,help="additive factor on field")
parser.add_option('-o','--output',action='store',dest='filename',type="string",default=None,help="name of output files")
parser.add_option('-d','--directory',action='store',dest='folder',type="string",default="./",help="directory of output files")
parser.add_option('-s','--changetime',action='store',dest='changetime',type="string",default=None,\
help="transformation on time axis : [None] | correctls | mars_sol2ls | mars_dayini | mars_meso_ls | mars_meso_sol | mars_meso_utc | mars_meso_lt ")
parser.add_option('-p','--print',action='store_true',dest='savtxt',default=False,help="[1D] output field+coord in an ASCII file")
parser.add_option('--sx',action='store',dest='sx',type="int",default=1,help="Load data every sx grid points over x dimension")
parser.add_option('--sy',action='store',dest='sy',type="int",default=1,help="Load data every sy grid points over y dimension")
parser.add_option('--sz',action='store',dest='sz',type="int",default=1,help="Load data every sz grid points over z dimension")
parser.add_option('--st',action='store',dest='st',type="int",default=1,help="Load data every st grid points over t dimension")
parser.add_option('--namex',action='store',dest='name_x',type="string",default=None,help="Choice of x axis")
parser.add_option('--namey',action='store',dest='name_y',type="string",default=None,help="Choice of y axis")
parser.add_option('--namez',action='store',dest='name_z',type="string",default=None,help="Choice of z axis")
parser.add_option('--namet',action='store',dest='name_t',type="string",default=None,help="Choice of t axis")
parser.add_option('--useindex',action='store',dest="useindex",type="string",default="0000",help="Use index for arrays and not values of dimensions 1/0 as tzyx")
parser.add_option('--kind3d',action='store',dest='kind3d',type="string",default="tyx",help="dimensions if rank<4: tzy, tyx (default)")
# plot options --> upper case. see ppplot.
parser = ppplot.opt(parser)
parser = ppplot.opt1d(parser)
parser = ppplot.opt2d(parser)
###########################
(opt,args) = parser.parse_args()
# remains G R
if (len(args) == 0):
parser.print_version()
######################################
# get arguments (one or several files)
######################################
if args is None:
print "Stop here! I need file(s) as argument(s)!" ; exit()
else:
files = args
#############################################
# a possibility to simply inspect the file(s)
#############################################
if opt.var is None:
for filename in files: inspect(filename)
exit()
######################################
# use ppclass to get field and plot it
######################################
# treat the case of additional vectors or contours (contours must be before vectors)
var = [] ; vargoal = []
for element in opt.var:
var.append(element) ; vargoal.append("main")
if opt.contour is not None: var.append(opt.contour) ; vargoal.append("contour")
if opt.vecx is not None: var.append(opt.vecx) ; vargoal.append("vector")
if opt.vecy is not None: var.append(opt.vecy) ; vargoal.append("vector")
# set pp object
user = pp()
user.file = files
user.var = var ; user.vargoal = vargoal
user.x = opt.x ; user.y = opt.y
user.z = opt.z ; user.t = opt.t
user.verbose = opt.verbose
if not user.verbose: user.quiet = True
user.compute = opt.compute
user.changetime = opt.changetime
user.useindex = opt.useindex
user.sx = opt.sx ; user.sy = opt.sy
user.sz = opt.sz ; user.st = opt.st
user.name_x = opt.name_x ; user.name_y = opt.name_y
user.name_z = opt.name_z ; user.name_t = opt.name_t
user.svx = opt.svx ; user.svy = opt.svy
user.savtxt = opt.savtxt
user.kind3d = opt.kind3d
if opt.xp is not None: user.xp = opt.xp
if opt.yp is not None: user.yp = opt.yp
# define field
user.define()
# retrieve field
user.retrieve()
# some possible operations
if opt.add is not None: user = user + opt.add
if opt.mult is not None: user = user * opt.mult
if opt.pow is not None: user = user ** opt.pow
# get some options
user.superpose = opt.superpose
user.filename = opt.filename
user.folder = opt.folder
user.out = opt.out
user.proj = opt.proj
user.res = opt.res
user.showcb = opt.showcb
# define plot
user.defineplot()
# user-defined plot settings
# ... shouldn't this be before defineplot?
user.getopt(opt)
# make plot
user.makeplot()
####################################
# save a .sh file with the command #
####################################
command = ""
for arg in sys.argv:
command = command + arg + ' '
if opt.filename is not None:
try:
f = open(opt.folder+'/'+opt.filename+'.sh', 'w')
f.write(command)
except IOError:
print "!! WARNING !! pp.py command not saved. Probably do not have permission to write here."
| gpl-2.0 |
arbuz001/sms-tools | software/models_interface/spsModel_function.py | 21 | 3527 | # function to call the main analysis/synthesis functions in software/models/spsModel.py
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import spsModel as SPS
import utilFunctions as UF
def main(inputFile='../../sounds/bendir.wav', window='hamming', M=2001, N=2048, t=-80, minSineDur=0.02,
maxnSines=150, freqDevOffset=10, freqDevSlope=0.001, stocf=0.2):
"""
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size; N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks; minSineDur: minimum duration of sinusoidal tracks
maxnSines: maximum number of parallel sinusoids
freqDevOffset: frequency deviation allowed in the sinusoids from frame to frame at frequency 0
freqDevSlope: slope of the frequency deviation, higher frequencies have bigger deviation
stocf: decimation factor used for the stochastic approximation
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sound
(fs, x) = UF.wavread(inputFile)
# compute analysis window
w = get_window(window, M)
# perform sinusoidal+sotchastic analysis
tfreq, tmag, tphase, stocEnv = SPS.spsModelAnal(x, fs, w, N, H, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope, stocf)
# synthesize sinusoidal+stochastic model
y, ys, yst = SPS.spsModelSynth(tfreq, tmag, tphase, stocEnv, Ns, H, fs)
# output sound file (monophonic with sampling rate of 44100)
outputFileSines = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel_sines.wav'
outputFileStochastic = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel_stochastic.wav'
outputFile = 'output_sounds/' + os.path.basename(inputFile)[:-4] + '_spsModel.wav'
# write sounds files for sinusoidal, residual, and the sum
UF.wavwrite(ys, fs, outputFileSines)
UF.wavwrite(yst, fs, outputFileStochastic)
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 10000.0
# plot the input sound
plt.subplot(3,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('input sound: x')
plt.subplot(3,1,2)
numFrames = int(stocEnv[:,0].size)
sizeEnv = int(stocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot sinusoidal frequencies on top of stochastic component
if (tfreq.shape[1] > 0):
sines = tfreq*np.less(tfreq,maxplotfreq)
sines[sines==0] = np.nan
numFrames = int(sines[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, sines, color='k', ms=3, alpha=1)
plt.xlabel('time(s)')
plt.ylabel('Frequency(Hz)')
plt.autoscale(tight=True)
plt.title('sinusoidal + stochastic spectrogram')
# plot the output sound
plt.subplot(3,1,3)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| agpl-3.0 |
joshloyal/scikit-learn | examples/mixture/plot_gmm_covariances.py | 89 | 4724 | """
===============
GMM covariances
===============
Demonstration of several covariances types for Gaussian mixture models.
See :ref:`gmm` for more information on the estimator.
Although GMM are often used for clustering, we can compare the obtained
clusters with the actual classes from the dataset. We initialize the means
of the Gaussians with the means of the classes from the training set to make
this comparison valid.
We plot predicted labels on both training and held out test data using a
variety of GMM covariance types on the iris dataset.
We compare GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
# Author: Ron Weiss <[email protected]>, Gael Varoquaux
# Modified by Thierry Guillemot <[email protected]>
# License: BSD 3 clause
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
print(__doc__)
colors = ['navy', 'turquoise', 'darkorange']
def make_ellipses(gmm, ax):
for n, color in enumerate(colors):
if gmm.covariance_type == 'full':
covariances = gmm.covariances_[n][:2, :2]
elif gmm.covariance_type == 'tied':
covariances = gmm.covariances_[:2, :2]
elif gmm.covariance_type == 'diag':
covariances = np.diag(gmm.covariances_[n][:2])
elif gmm.covariance_type == 'spherical':
covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n]
v, w = np.linalg.eigh(covariances)
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(n_splits=4)
# Only take the first fold.
train_index, test_index = next(iter(skf.split(iris.data, iris.target)))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
estimators = dict((cov_type, GaussianMixture(n_components=n_classes,
covariance_type=cov_type, max_iter=20, random_state=0))
for cov_type in ['spherical', 'diag', 'tied', 'full'])
n_estimators = len(estimators)
plt.figure(figsize=(3 * n_estimators // 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, estimator) in enumerate(estimators.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
estimator.means_init = np.array([X_train[y_train == i].mean(axis=0)
for i in range(n_classes)])
# Train the other parameters using the EM algorithm.
estimator.fit(X_train)
h = plt.subplot(2, n_estimators // 2, index + 1)
make_ellipses(estimator, h)
for n, color in enumerate(colors):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], s=0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate(colors):
data = X_test[y_test == n]
plt.scatter(data[:, 0], data[:, 1], marker='x', color=color)
y_train_pred = estimator.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = estimator.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(scatterpoints=1, loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
josenavas/QiiTa | qiita_pet/handlers/rest/study_samples.py | 3 | 4239 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.escape import json_encode, json_decode
import pandas as pd
from qiita_db.handlers.oauth2 import authenticate_oauth
from .rest_handler import RESTHandler
class StudySamplesHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
if study.sample_template is None:
samples = []
else:
samples = list(study.sample_template.keys())
self.write(json_encode(samples))
self.finish()
@authenticate_oauth
def patch(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
if study.sample_template is None:
self.fail('No sample information found', 404)
return
else:
sample_info = study.sample_template.to_dataframe()
data = pd.DataFrame.from_dict(json_decode(self.request.body),
orient='index')
if len(data.index) == 0:
self.fail('No samples provided', 400)
return
categories = set(study.sample_template.categories())
if set(data.columns) != categories:
if set(data.columns).issubset(categories):
self.fail('Not all sample information categories provided',
400)
else:
unknown = set(data.columns) - categories
self.fail("Some categories do not exist in the sample "
"information", 400,
categories_not_found=sorted(unknown))
return
existing_samples = set(sample_info.index)
overlapping_ids = set(data.index).intersection(existing_samples)
new_ids = set(data.index) - existing_samples
status = 500
# warnings generated are not currently caught
# see https://github.com/biocore/qiita/issues/2096
if overlapping_ids:
to_update = data.loc[overlapping_ids]
study.sample_template.update(to_update)
status = 200
if new_ids:
to_extend = data.loc[new_ids]
study.sample_template.extend(to_extend)
status = 201
self.set_status(status)
self.finish()
class StudySamplesCategoriesHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id, categories):
if not categories:
self.fail('No categories specified', 405)
return
study = self.safe_get_study(study_id)
if study is None:
return
categories = categories.split(',')
if study.sample_template is None:
self.fail('Study does not have sample information', 404)
return
available_categories = set(study.sample_template.categories())
not_found = set(categories) - available_categories
if not_found:
self.fail('Category not found', 404,
categories_not_found=sorted(not_found))
return
blob = {'header': categories,
'samples': {}}
df = study.sample_template.to_dataframe()
for idx, row in df[categories].iterrows():
blob['samples'][idx] = list(row)
self.write(json_encode(blob))
self.finish()
class StudySamplesInfoHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
st = study.sample_template
if st is None:
info = {'number-of-samples': 0,
'categories': []}
else:
info = {'number-of-samples': len(st),
'categories': st.categories()}
self.write(json_encode(info))
self.finish()
| bsd-3-clause |
lbeltrame/bcbio-nextgen | bcbio/structural/validate.py | 4 | 19051 | """Provide validation of structural variations against truth sets.
"""
import csv
import os
import six
import toolz as tz
import numpy as np
import pandas as pd
import pybedtools
from bcbio.log import logger
from bcbio import utils
from bcbio.bam import ref
from bcbio.pipeline import datadict as dd
from bcbio.provenance import do
from bcbio.structural import convert
from bcbio.distributed.transaction import file_transaction
from bcbio.variation import vcfutils, ploidy, validateplot
from bcbio.pipeline import config_utils
mpl = utils.LazyImport("matplotlib")
plt = utils.LazyImport("matplotlib.pyplot")
sns = utils.LazyImport("seaborn")
# -- VCF based validation
def _evaluate_vcf(calls, truth_vcf, work_dir, data):
out_file = os.path.join(work_dir, os.path.join("%s-sv-validate.csv" % dd.get_sample_name(data)))
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
writer = csv.writer(out_handle)
writer.writerow(["sample", "caller", "vtype", "metric", "value"])
for call in calls:
detail_dir = utils.safe_makedir(os.path.join(work_dir, call["variantcaller"]))
if call.get("vrn_file"):
for stats in _validate_caller_vcf(call["vrn_file"], truth_vcf, dd.get_sample_callable(data),
call["variantcaller"], detail_dir, data):
writer.writerow(stats)
return out_file
def _validate_caller_vcf(call_vcf, truth_vcf, callable_bed, svcaller, work_dir, data):
"""Validate a caller VCF against truth within callable regions using SURVIVOR.
Combines files with SURIVOR merge and counts (https://github.com/fritzsedlazeck/SURVIVOR/)
"""
stats = _calculate_comparison_stats(truth_vcf)
call_vcf = _prep_vcf(call_vcf, callable_bed, dd.get_sample_name(data), dd.get_sample_name(data),
stats, work_dir, data)
truth_vcf = _prep_vcf(truth_vcf, callable_bed, vcfutils.get_samples(truth_vcf)[0],
"%s-truth" % dd.get_sample_name(data), stats, work_dir, data)
cmp_vcf = _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data)
return _comparison_stats_from_merge(cmp_vcf, stats, svcaller, data)
def _comparison_stats_from_merge(in_file, stats, svcaller, data):
"""Extract true/false positive/negatives from a merged SURIVOR VCF.
"""
truth_stats = {"tp": [], "fn": [], "fp": []}
samples = ["truth" if x.endswith("-truth") else "eval" for x in vcfutils.get_samples(in_file)]
with open(in_file) as in_handle:
for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")):
supp_vec_str = [x for x in call[7].split(";") if x.startswith("SUPP_VEC=")][0]
_, supp_vec = supp_vec_str.split("=")
calls = dict(zip(samples, [int(x) for x in supp_vec]))
if calls["truth"] and calls["eval"]:
metric = "tp"
elif calls["truth"]:
metric = "fn"
else:
metric = "fp"
truth_stats[metric].append(_summarize_call(call))
return _to_csv(truth_stats, stats, dd.get_sample_name(data), svcaller)
def _survivor_merge(call_vcf, truth_vcf, stats, work_dir, data):
"""Perform a merge of two callsets using SURVIVOR,
"""
out_file = os.path.join(work_dir, "eval-merge.vcf")
if not utils.file_uptodate(out_file, call_vcf):
in_call_vcf = call_vcf.replace(".vcf.gz", ".vcf")
if not utils.file_exists(in_call_vcf):
with file_transaction(data, in_call_vcf) as tx_in_call_vcf:
do.run("gunzip -c {call_vcf} > {tx_in_call_vcf}".format(**locals()))
in_truth_vcf = truth_vcf.replace(".vcf.gz", ".vcf")
if not utils.file_exists(in_truth_vcf):
with file_transaction(data, in_truth_vcf) as tx_in_truth_vcf:
do.run("gunzip -c {truth_vcf} > {tx_in_truth_vcf}".format(**locals()))
in_list_file = os.path.join(work_dir, "eval-inputs.txt")
with open(in_list_file, "w") as out_handle:
out_handle.write("%s\n%s\n" % (in_call_vcf, in_truth_vcf))
with file_transaction(data, out_file) as tx_out_file:
cmd = ("SURVIVOR merge {in_list_file} {stats[merge_size]} 1 0 0 0 {stats[min_size]} {tx_out_file}")
do.run(cmd.format(**locals()), "Merge SV files for validation: %s" % dd.get_sample_name(data))
return out_file
def _to_csv(truth_stats, stats, sample, svcaller):
out = []
for metric, vals in truth_stats.items():
for svtype in sorted(list(stats["svtypes"])):
count = len([x for x in vals if x["svtype"] == svtype])
out.append([sample, svcaller, svtype, metric, count])
for start, end in stats["ranges"]:
count = len([x for x in vals if (x["svtype"] == svtype
and x["size"] >= start and x["size"] < end)])
out.append([sample, svcaller, "%s_%s-%s" % (svtype, start, end), metric, count])
return out
def _calculate_comparison_stats(truth_vcf):
"""Identify calls to validate from the input truth VCF.
"""
# Avoid very small events for average calculations
min_stat_size = 50
min_median_size = 250
sizes = []
svtypes = set([])
with utils.open_gzipsafe(truth_vcf) as in_handle:
for call in (l.rstrip().split("\t") for l in in_handle if not l.startswith("#")):
stats = _summarize_call(call)
if stats["size"] > min_stat_size:
sizes.append(stats["size"])
svtypes.add(stats["svtype"])
pct10 = int(np.percentile(sizes, 10))
pct25 = int(np.percentile(sizes, 25))
pct50 = int(np.percentile(sizes, 50))
pct75 = int(np.percentile(sizes, 75))
ranges_detailed = [(int(min(sizes)), pct10), (pct10, pct25), (pct25, pct50),
(pct50, pct75), (pct75, max(sizes))]
ranges_split = [(int(min(sizes)), pct50), (pct50, max(sizes))]
return {"min_size": int(min(sizes) * 0.95), "max_size": int(max(sizes) + 1.05),
"svtypes": svtypes, "merge_size": int(np.percentile([x for x in sizes if x > min_median_size], 50)),
"ranges": []}
def _get_start_end(parts, index=7):
"""Retrieve start and end for a VCF record, skips BNDs without END coords
"""
start = parts[1]
end = [x.split("=")[-1] for x in parts[index].split(";") if x.startswith("END=")]
if end:
end = end[0]
return start, end
return None, None
def _summarize_call(parts):
"""Provide summary metrics on size and svtype for a SV call.
"""
svtype = [x.split("=")[1] for x in parts[7].split(";") if x.startswith("SVTYPE=")]
svtype = svtype[0] if svtype else ""
start, end = _get_start_end(parts)
return {"svtype": svtype, "size": int(end) - int(start)}
def _prep_vcf(in_file, region_bed, sample, new_sample, stats, work_dir, data):
"""Prepare VCF for SV validation:
- Subset to passing variants
- Subset to genotyped variants -- removes reference and no calls
- Selects and names samples
- Subset to callable regions
- Remove larger annotations which slow down VCF processing
"""
in_file = vcfutils.bgzip_and_index(in_file, data, remove_orig=False)
out_file = os.path.join(work_dir, "%s-vprep.vcf.gz" % utils.splitext_plus(os.path.basename(in_file))[0])
if not utils.file_uptodate(out_file, in_file):
callable_bed = _prep_callable_bed(region_bed, work_dir, stats, data)
with file_transaction(data, out_file) as tx_out_file:
ann_remove = _get_anns_to_remove(in_file)
ann_str = " | bcftools annotate -x {ann_remove}" if ann_remove else ""
cmd = ("bcftools view -T {callable_bed} -f 'PASS,.' --min-ac '1:nref' -s {sample} {in_file} "
+ ann_str +
r"| sed 's|\t{sample}|\t{new_sample}|' "
"| bgzip -c > {out_file}")
do.run(cmd.format(**locals()), "Create SV validation VCF for %s" % new_sample)
return vcfutils.bgzip_and_index(out_file, data["config"])
def _prep_callable_bed(in_file, work_dir, stats, data):
"""Sort and merge callable BED regions to prevent SV double counting
"""
out_file = os.path.join(work_dir, "%s-merge.bed.gz" % utils.splitext_plus(os.path.basename(in_file))[0])
gsort = config_utils.get_program("gsort", data)
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
fai_file = ref.fasta_idx(dd.get_ref_file(data))
cmd = ("{gsort} {in_file} {fai_file} | bedtools merge -i - -d {stats[merge_size]} | "
"bgzip -c > {tx_out_file}")
do.run(cmd.format(**locals()), "Prepare SV callable BED regions")
return vcfutils.bgzip_and_index(out_file, data["config"])
def _get_anns_to_remove(in_file):
"""Find larger annotations, if present in VCF, that slow down processing.
"""
to_remove = ["ANN", "LOF"]
to_remove_str = tuple(["##INFO=<ID=%s" % x for x in to_remove])
cur_remove = []
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
break
elif line.startswith(to_remove_str):
cur_id = line.split("ID=")[-1].split(",")[0]
cur_remove.append("INFO/%s" % cur_id)
return ",".join(cur_remove)
# -- BED based evaluation
EVENT_SIZES = [(100, 450), (450, 2000), (2000, 4000), (4000, 20000), (20000, 60000),
(60000, int(1e6))]
def _stat_str(x, n):
if n > 0:
val = float(x) / float(n) * 100.0
return {"label": "%.1f%% (%s / %s)" % (val, x, n), "val": val}
else:
return {"label": "", "val": 0}
def cnv_to_event(name, data):
"""Convert a CNV to an event name.
"""
cur_ploidy = ploidy.get_ploidy([data])
if name.startswith("cnv"):
num = max([int(x) for x in name.split("_")[0].replace("cnv", "").split(";")])
if num < cur_ploidy:
return "DEL"
elif num > cur_ploidy:
return "DUP"
else:
return name
else:
return name
def _evaluate_one(caller, svtype, size_range, ensemble, truth, data):
"""Compare a ensemble results for a caller against a specific caller and SV type.
"""
def cnv_matches(name):
return cnv_to_event(name, data) == svtype
def is_breakend(name):
return name.startswith("BND")
def in_size_range(max_buffer=0):
def _work(feat):
minf, maxf = size_range
buffer = min(max_buffer, int(((maxf + minf) / 2.0) / 10.0))
size = feat.end - feat.start
return size >= max([0, minf - buffer]) and size < maxf + buffer
return _work
def is_caller_svtype(feat):
for name in feat.name.split(","):
if ((name.startswith(svtype) or cnv_matches(name) or is_breakend(name))
and (caller == "sv-ensemble" or name.endswith(caller))):
return True
return False
minf, maxf = size_range
efeats = pybedtools.BedTool(ensemble).filter(in_size_range(0)).filter(is_caller_svtype).saveas().sort().merge()
tfeats = pybedtools.BedTool(truth).filter(in_size_range(0)).sort().merge().saveas()
etotal = efeats.count()
ttotal = tfeats.count()
match = efeats.intersect(tfeats, u=True).sort().merge().saveas().count()
return {"sensitivity": _stat_str(match, ttotal),
"precision": _stat_str(match, etotal)}
def _evaluate_multi(calls, truth_svtypes, work_dir, data):
base = os.path.join(work_dir, "%s-sv-validate" % (dd.get_sample_name(data)))
out_file = base + ".csv"
df_file = base + "-df.csv"
if any((not utils.file_uptodate(out_file, x["vrn_file"])
or not utils.file_uptodate(df_file, x["vrn_file"])) for x in calls):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with open(df_file, "w") as df_out_handle:
writer = csv.writer(out_handle)
dfwriter = csv.writer(df_out_handle)
writer.writerow(["svtype", "size", "caller", "sensitivity", "precision"])
dfwriter.writerow(["svtype", "size", "caller", "metric", "value", "label"])
for svtype, truth in truth_svtypes.items():
for size in EVENT_SIZES:
str_size = "%s-%s" % size
for call in calls:
call_bed = convert.to_bed(call, dd.get_sample_name(data), work_dir, calls, data)
if utils.file_exists(call_bed):
evalout = _evaluate_one(call["variantcaller"], svtype, size, call_bed,
truth, data)
writer.writerow([svtype, str_size, call["variantcaller"],
evalout["sensitivity"]["label"], evalout["precision"]["label"]])
for metric in ["sensitivity", "precision"]:
dfwriter.writerow([svtype, str_size, call["variantcaller"], metric,
evalout[metric]["val"], evalout[metric]["label"]])
return out_file, df_file
def _plot_evaluation(df_csv):
if mpl is None or plt is None or sns is None:
not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None])
logger.info("No validation plot. Missing imports: %s" % not_found)
return None
mpl.use('Agg', force=True)
df = pd.read_csv(df_csv).fillna("0%")
out = {}
for event in df["svtype"].unique():
out[event] = _plot_evaluation_event(df_csv, event)
return out
def _plot_evaluation_event(df_csv, svtype):
"""Provide plot of evaluation metrics for an SV event, stratified by event size.
"""
titles = {"INV": "Inversions", "DEL": "Deletions", "DUP": "Duplications",
"INS": "Insertions"}
out_file = "%s-%s.png" % (os.path.splitext(df_csv)[0], svtype)
sns.set(style='white')
if not utils.file_uptodate(out_file, df_csv):
metrics = ["sensitivity", "precision"]
df = pd.read_csv(df_csv).fillna("0%")
df = df[(df["svtype"] == svtype)]
event_sizes = _find_events_to_include(df, EVENT_SIZES)
fig, axs = plt.subplots(len(event_sizes), len(metrics), tight_layout=True)
if len(event_sizes) == 1:
axs = [axs]
callers = sorted(df["caller"].unique())
if "sv-ensemble" in callers:
callers.remove("sv-ensemble")
callers.append("sv-ensemble")
for i, size in enumerate(event_sizes):
size_label = "%s to %sbp" % size
size = "%s-%s" % size
for j, metric in enumerate(metrics):
ax = axs[i][j]
ax.get_xaxis().set_ticks([])
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlim(0, 125.0)
if i == 0:
ax.set_title(metric, size=12, y=1.2)
vals, labels = _get_plot_val_labels(df, size, metric, callers)
ax.barh(range(1,len(vals)+1), vals)
if j == 0:
ax.tick_params(axis='y', which='major', labelsize=8)
ax.locator_params(axis="y", tight=True)
ax.set_yticks(range(1,len(callers)+1,1))
ax.set_yticklabels(callers, va="center")
ax.text(100, len(callers)+1, size_label, fontsize=10)
else:
ax.get_yaxis().set_ticks([])
for ai, (val, label) in enumerate(zip(vals, labels)):
ax.annotate(label, (val + 0.75, ai + 1), va='center', size=7)
if svtype in titles:
fig.text(0.025, 0.95, titles[svtype], size=14)
fig.set_size_inches(7, len(event_sizes) + 1)
fig.savefig(out_file)
return out_file
def _find_events_to_include(df, event_sizes):
out = []
for size in event_sizes:
str_size = "%s-%s" % size
curdf = df[(df["size"] == str_size) & (df["metric"] == "sensitivity")]
for val in list(curdf["label"]):
if val != "0%":
out.append(size)
break
return out
def _get_plot_val_labels(df, size, metric, callers):
curdf = df[(df["size"] == size) & (df["metric"] == metric)]
vals, labels = [], []
for caller in callers:
row = curdf[curdf["caller"] == caller]
val = list(row["value"])[0]
if val == 0:
val = 0.1
vals.append(val)
labels.append(list(row["label"])[0])
return vals, labels
# -- general functionality
def evaluate(data):
"""Provide evaluations for multiple callers split by structural variant type.
"""
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
dd.get_sample_name(data), "validate"))
truth_sets = tz.get_in(["config", "algorithm", "svvalidate"], data)
if truth_sets and data.get("sv"):
if isinstance(truth_sets, dict):
val_summary, df_csv = _evaluate_multi(data["sv"], truth_sets, work_dir, data)
summary_plots = _plot_evaluation(df_csv)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots, "df": df_csv}
else:
assert isinstance(truth_sets, six.string_types) and utils.file_exists(truth_sets), truth_sets
val_summary = _evaluate_vcf(data["sv"], truth_sets, work_dir, data)
title = "%s structural variants" % dd.get_sample_name(data)
summary_plots = validateplot.classifyplot_from_valfile(val_summary, outtype="png", title=title)
data["sv-validate"] = {"csv": val_summary, "plot": summary_plots[0] if len(summary_plots) > 0 else None}
return data
if __name__ == "__main__":
#_, df_csv = _evaluate_multi(["lumpy", "delly", "wham", "sv-ensemble"],
# {"DEL": "synthetic_challenge_set3_tumor_20pctmasked_truth_sv_DEL.bed"},
# "syn3-tumor-ensemble-filter.bed", "sv_exclude.bed")
#_, df_csv = _evaluate_multi(["lumpy", "delly", "cn_mops", "sv-ensemble"],
# {"DEL": "NA12878.50X.ldgp.molpb_val.20140508.bed"},
# "NA12878-ensemble.bed", "LCR.bed.gz")
import sys
_plot_evaluation(sys.argv[1])
| mit |
gavruskin/fitlands | models_HIV_2007.py | 1 | 9879 | import pandas
import numpy
from three_way_epistasis import get_next_ordering, ordering_to_fitness, epistasis_positive, epistasis_negative
__author__ = '@gavruskin'
# Gives the probabilities output[i][j] = p_{i, j}, where {i, j} \subset {1, ..., 8} given trial lists using
# p_{i, j} = P(W_i < W_j) = \sum_x P(W_j = x) * P(W_i < x):
def ranking_probabilities(fit_data_list):
output = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for j in range(8):
for i in range(8):
if i != j:
probability_j = 1 / float(len(fit_data_list[j])) # That's P(W_j = x), which doesn't depend on x.
# Now find probability_i = \sum_x P(W_i < x):
count = 0
for x in range(len(fit_data_list[j])): # This is \sum_x
for r in range(len(fit_data_list[i])):
if fit_data_list[i][r] < fit_data_list[j][x]:
count += 1
probability_i = count / float(len(fit_data_list[i]))
# Multiply the running probability by p_{i, j}:
output[i][j] = probability_j * probability_i
return output
# The comparison (competition experiment) model.
# Returns the probability of epistasis given the trial data.
def epistasis_probability_from_comparisons(fit_data_list, threshold_prob):
# Compute probabilities P(W_i < W_j) = p_{i,j}:
p_ij = ranking_probabilities(fit_data_list)
# Loop through all rankings:
ordering = [1, 1, 1, 1, 1, 1, 1, 1]
ranking = [1, 2, 3, 4, 5, 6, 7, 8]
positives = {1, 5, 6, 7}
negatives = {4, 3, 2, 8}
repetitions = [1, 1, 1, 1, 1, 1, 1, 1]
positive_epi_prob = 0
negative_epi_prob = 0
total_prob_mass = 0
positive_rankings = []
negative_rankings = []
non_informative_rankings = []
# Compute the probability of the ranking as \Pi_{i, j} p_{i, j}.
rank_prob = 1
for j in range(len(ranking)):
for i in range(j):
rank_prob *= p_ij[ranking[i] - 1][ranking[j] - 1]
total_prob_mass += rank_prob
if epistasis_positive(ranking, positives, negatives, repetitions):
positive_epi_prob += rank_prob
if rank_prob > threshold_prob:
positive_rankings.append([ranking, rank_prob])
elif epistasis_negative(ranking, positives, negatives, repetitions):
negative_epi_prob += rank_prob
if rank_prob > threshold_prob:
negative_rankings.append([ranking, rank_prob])
elif rank_prob > threshold_prob:
non_informative_rankings.append([ranking, rank_prob])
while ordering != [8, 7, 6, 5, 4, 3, 2, 1]:
ordering = get_next_ordering(ordering)
ranking = ordering_to_fitness(ordering)
rank_prob = 1
for j in range(len(ranking)):
for i in range(j):
rank_prob *= p_ij[ranking[i] - 1][ranking[j] - 1]
total_prob_mass += rank_prob
if epistasis_positive(ranking, positives, negatives, repetitions):
positive_epi_prob += rank_prob
if rank_prob > threshold_prob:
positive_rankings.append([ranking, rank_prob])
elif epistasis_negative(ranking, positives, negatives, repetitions):
negative_epi_prob += rank_prob
if rank_prob > threshold_prob:
negative_rankings.append([ranking, rank_prob])
elif rank_prob > threshold_prob:
non_informative_rankings.append([ranking, rank_prob])
positive_epi_prob /= total_prob_mass
negative_epi_prob /= total_prob_mass
# print "Probability of positive epistasis: " + str(positive_epi_prob)
# print "Probability of negative epistasis: " + str(negative_epi_prob)
top_pos_epi_prob = 0
for i in range(len(positive_rankings)):
top_pos_epi_prob += positive_rankings[i][1]
top_neg_epi_prob = 0
for i in range(len(negative_rankings)):
top_neg_epi_prob += negative_rankings[i][1]
top_non_info_prob = 0
for i in range(len(non_informative_rankings)):
top_non_info_prob += non_informative_rankings[i][1]
print("Threshold probability: " + str(threshold_prob))
print("Top rankings with positive epistasis: " + str(positive_rankings))
print(str(len(positive_rankings)) + " in total")
print("With total probability: " + str(top_pos_epi_prob))
print("Top rankings with negative epistasis: " + str(negative_rankings))
print(str(len(negative_rankings)) + " in total")
print("With total probability: " + str(top_neg_epi_prob))
print("Top non-informative rankings: " + str(non_informative_rankings))
print(str(len(non_informative_rankings)) + " in total")
print("With total probability: " + str(top_non_info_prob) + "\n")
return [[positive_epi_prob, negative_epi_prob],
[positive_rankings, negative_rankings, non_informative_rankings]]
# Returns the probability of positive and negative epistasis from the fitness measurements.
def epistasis_from_values(fit_data_list):
epi_pos = epi_neg = total_count = 0
for w0 in range(len(fit_data_list[0])):
for w1 in range(len(fit_data_list[1])):
for w2 in range(len(fit_data_list[2])):
for w3 in range(len(fit_data_list[3])):
for w4 in range(len(fit_data_list[4])):
for w5 in range(len(fit_data_list[5])):
for w6 in range(len(fit_data_list[6])):
for w7 in range(len(fit_data_list[7])):
total_count += 1
e = (fit_data_list[0][w0] + fit_data_list[4][w4] + fit_data_list[5][w5] +
fit_data_list[6][w6]) - \
(fit_data_list[1][w1] + fit_data_list[2][w2] + fit_data_list[3][w3] +
fit_data_list[7][w7])
if e > 0:
epi_pos += 1
elif e < 0:
epi_neg += 1
epi_pos_prob = epi_pos / float(total_count)
epi_neg_prob = epi_neg / float(total_count)
return [epi_pos_prob, epi_neg_prob, epi_pos, epi_neg, total_count]
# Returns k closest entries to the mean for each component of fit_data_list:
def closest_to_mean(fit_data_list, k, mean_type="mean"):
if mean_type == "mean":
means = [numpy.mean(fit_data_list[j]) for j in range(len(fit_data_list))]
elif mean_type == "median":
means = [numpy.median(fit_data_list[j]) for j in range(len(fit_data_list))]
fit_data_list_copy = []
for r in range(len(fit_data_list)):
copy = [fit_data_list[r][l] for l in range(len(fit_data_list[r]))]
fit_data_list_copy.append(copy)
output = []
for r in range(k):
output_r = []
for l in range(len(means)):
close_to_mean_index = (numpy.abs(fit_data_list_copy[l] - means[l])).argmin()
output_r.append(fit_data_list_copy[l][close_to_mean_index])
del fit_data_list_copy[l][close_to_mean_index]
output.append(output_r)
return output
# Returns the list of lists of fitness values for the eight genotypes corresponding to the hard-coded mutations:
def datafile_hiv_process():
data_file = "2007_HIV_data.csv"
mutations = [["L", "M"], # mutations: L to M, M to V, t to Y
["M", "V"],
["t", "Y"]]
sites = [88, 244, 275] # sites: PRO L90M, RT M184V, RT T215Y
sites = [0] + [1] + sites # This is specific to the data file. Column 0 contains fitness, column 1 names.
values = pandas.read_csv(data_file, usecols=sites)
values.iloc[:, 0] = numpy.log10(values.iloc[:, 0]) # log10 scale of fitness values.
size = len(values.iloc[:, 1])
f000 = []
f001 = []
f010 = []
f100 = []
f011 = []
f101 = []
f110 = []
f111 = []
for m in range(size):
if (values.iloc[m, 2] == mutations[0][0]) & (values.iloc[m, 3] == mutations[1][0]) & \
(values.iloc[m, 4] == mutations[2][0]):
f000.append(values.iloc[m, 0])
elif (values.iloc[m, 2] == mutations[0][0]) & (values.iloc[m, 3] == mutations[1][0]) & \
(values.iloc[m, 4] == mutations[2][1]):
f001.append(values.iloc[m, 0])
elif (values.iloc[m, 2] == mutations[0][0]) & (values.iloc[m, 3] == mutations[1][1]) & \
(values.iloc[m, 4] == mutations[2][0]):
f010.append(values.iloc[m, 0])
elif (values.iloc[m, 2] == mutations[0][1]) & (values.iloc[m, 3] == mutations[1][0]) & \
(values.iloc[m, 4] == mutations[2][0]):
f100.append(values.iloc[m, 0])
elif (values.iloc[m, 2] == mutations[0][0]) & (values.iloc[m, 3] == mutations[1][1]) & \
(values.iloc[m, 4] == mutations[2][1]):
f011.append(values.iloc[m, 0])
elif (values.iloc[m, 2] == mutations[0][1]) & (values.iloc[m, 3] == mutations[1][0]) & \
(values.iloc[m, 4] == mutations[2][1]):
f101.append(values.iloc[m, 0])
elif (values.iloc[m, 2] == mutations[0][1]) & (values.iloc[m, 3] == mutations[1][1]) & \
(values.iloc[m, 4] == mutations[2][0]):
f110.append(values.iloc[m, 0])
elif (values.iloc[m, 2] == mutations[0][1]) & (values.iloc[m, 3] == mutations[1][1]) & \
(values.iloc[m, 4] == mutations[2][1]):
f111.append(values.iloc[m, 0])
return [f000, f001, f010, f100, f011, f101, f110, f111]
| gpl-3.0 |
lbishal/scikit-learn | sklearn/utils/validation.py | 16 | 26075 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .deprecation import deprecated
from ..exceptions import DataConversionWarning as DataConversionWarning_
from ..exceptions import NonBLASDotWarning as NonBLASDotWarning_
from ..exceptions import NotFittedError as NotFittedError_
class DataConversionWarning(DataConversionWarning_):
pass
DataConversionWarning = deprecated("DataConversionWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(DataConversionWarning)
class NonBLASDotWarning(NonBLASDotWarning_):
pass
NonBLASDotWarning = deprecated("NonBLASDotWarning has been moved "
"into the sklearn.exceptions module. "
"It will not be available here from "
"version 0.19")(NonBLASDotWarning)
class NotFittedError(NotFittedError_):
pass
NotFittedError = deprecated("NotFittedError has been moved into the "
"sklearn.exceptions module. It will not be "
"available here from version 0.19")(NotFittedError)
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', NonBLASDotWarning_)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2nd numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we acually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, DataConversionWarning_)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None, copy=False,
force_all_finite=True, ensure_2d=True, allow_nd=False,
multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning_, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
# FIXME NotFittedError_ --> NotFittedError in 0.19
raise NotFittedError_(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
scikit-hep/uproot | uproot3/tree.py | 1 | 103142 | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot3/blob/master/LICENSE
from __future__ import absolute_import
import base64
import codecs
import glob
import importlib
import inspect
import itertools
import math
import numbers
import os
import re
import struct
import sys
import threading
from collections import namedtuple
from collections import OrderedDict
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
import numpy
import cachetools
import awkward0
import uproot_methods.profiles
import uproot3.rootio
from uproot3.rootio import _bytesid
from uproot3.rootio import _memsize
from uproot3.rootio import nofilter
from uproot3.rootio import _safename
from uproot3.interp.auto import interpret
from uproot3.interp.numerical import asdtype
from uproot3.interp.jagged import asjagged
from uproot3.interp.objects import asobj
from uproot3.interp.objects import asgenobj
from uproot3.source.cursor import Cursor
from uproot3.source.memmap import MemmapSource
from uproot3.source.xrootd import XRootDSource
from uproot3.source.http import HTTPSource
if sys.version_info[0] <= 2:
string_types = (unicode, str)
else:
string_types = (str, bytes)
def _delayedraise(excinfo):
if excinfo is not None:
cls, err, trc = excinfo
if sys.version_info[0] <= 2:
exec("raise cls, err, trc")
else:
raise err.with_traceback(trc)
def _filename_explode(x):
if isinstance(x, getattr(os, "PathLike", ())):
x = os.fspath(x)
elif hasattr(x, "__fspath__"):
x = x.__fspath__()
elif x.__class__.__module__ == "pathlib":
import pathlib
if isinstance(x, pathlib.Path):
x = str(x)
parsed = urlparse(x)
if _bytesid(parsed.scheme) == b"file" or len(parsed.scheme) == 0 or (os.name == "nt" and _filename_explode._windows_absolute.match(x) is not None):
if not (os.name == "nt" and _filename_explode._windows_absolute.match(x) is not None):
path = parsed.netloc + parsed.path
else:
path = x
pattern = os.path.expanduser(path)
if "*" in pattern or "?" in pattern or "[" in pattern:
out = sorted(glob.glob(pattern))
if len(out) == 0:
raise TypeError("no matches for filename {0}".format(repr(pattern)))
else:
out = [pattern]
return out
else:
return [x]
_filename_explode._windows_absolute = re.compile(r"^[A-Za-z]:\\")
def _normalize_awkwardlib(awkwardlib):
if awkwardlib is None:
return awkward0
elif isinstance(awkwardlib, str):
return importlib.import_module(awkwardlib)
else:
return awkwardlib
def _normalize_entrystartstop(numentries, entrystart, entrystop):
if entrystart is None:
entrystart = 0
elif entrystart < 0:
entrystart += numentries
entrystart = min(numentries, max(0, entrystart))
if entrystop is None:
entrystop = numentries
elif entrystop < 0:
entrystop += numentries
entrystop = min(numentries, max(0, entrystop))
if entrystop < entrystart:
raise IndexError("entrystop must be greater than or equal to entrystart")
return int(entrystart), int(entrystop)
################################################################ high-level interface
def iterate(path, treepath, branches=None, entrysteps=float("inf"), outputtype=dict, namedecode=None, reportpath=False, reportfile=False, reportentries=False, flatten=False, flatname=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
awkward0 = _normalize_awkwardlib(awkwardlib)
for tree, branchesinterp, globalentrystart, thispath, thisfile in _iterate(path, treepath, branches, awkward0, localsource, xrootdsource, httpsource, **options):
for start, stop, arrays in tree.iterate(branches=branchesinterp, entrysteps=entrysteps, outputtype=outputtype, namedecode=namedecode, reportentries=True, entrystart=0, entrystop=tree.numentries, flatten=flatten, flatname=flatname, awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, blocking=blocking):
if getattr(outputtype, "__name__", None) == "DataFrame" and getattr(outputtype, "__module__", None) == "pandas.core.frame":
if type(arrays.index).__name__ == "MultiIndex":
if hasattr(arrays.index.levels[0], "array"):
index = arrays.index.levels[0].array # pandas>=0.24.0
else:
index = arrays.index.levels[0].values # pandas<0.24.0
awkward0.numpy.add(index, globalentrystart, out=index)
elif type(arrays.index).__name__ == "RangeIndex":
if hasattr(arrays.index, "start") and hasattr(arrays.index, "stop"):
indexstart = arrays.index.start # pandas>=0.25.0
indexstop = arrays.index.stop
else:
indexstart = arrays.index._start # pandas<0.25.0
indexstop = arrays.index._stop
arrays.index = type(arrays.index)(indexstart + globalentrystart, indexstop + globalentrystart)
else:
if hasattr(arrays.index, "array"):
index = arrays.index.array # pandas>=0.24.0
else:
index = arrays.index.values # pandas<0.24.0
awkward0.numpy.add(index, globalentrystart, out=index)
out = (arrays,)
if reportentries:
out = (globalentrystart + start, globalentrystart + stop) + out
if reportfile:
out = (thisfile,) + out
if reportpath:
out = (thispath,) + out
if len(out) == 1:
yield out[0]
else:
yield out
def _iterate(path, treepath, branches, awkward0, localsource, xrootdsource, httpsource, **options):
if isinstance(path, string_types):
paths = _filename_explode(path)
else:
paths = [y for x in path for y in _filename_explode(x)]
globalentrystart = 0
for path in paths:
file = uproot3.rootio.open(path, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
try:
tree = file[treepath]
except KeyError:
continue
branchesinterp = OrderedDict()
for branch, interpretation in tree._normalize_branches(branches, awkward0):
branchesinterp[branch.name] = interpretation
yield tree, branchesinterp, globalentrystart, path, file
globalentrystart += tree.numentries
################################################################ methods for TTree
class TTreeMethods(object):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (uproot3.rootio.ROOTObject.__metaclass__,), {})
_copycontext = True
_vector_regex = re.compile(b"^vector<(.+)>$")
_objectpointer_regex = re.compile(br"\(([^()]*)\)$")
def _attachstreamer(self, branch, streamer, streamerinfosmap, isTClonesArray):
if streamer is None:
m = re.match(self._vector_regex, getattr(branch, "_fClassName", b""))
if m is None:
if branch.name in streamerinfosmap:
streamer = streamerinfosmap[branch.name]
else:
return
else:
if m.group(1) in streamerinfosmap:
substreamer = streamerinfosmap[m.group(1)]
if isinstance(substreamer, uproot3.rootio.TStreamerInfo):
streamer = uproot3.rootio.TStreamerSTL.vector(None, substreamer._fName)
else:
streamer = uproot3.rootio.TStreamerSTL.vector(substreamer._fType, substreamer._fTypeName)
else:
return
if isinstance(streamer, uproot3.rootio.TStreamerInfo):
if len(streamer._fElements) == 1 and isinstance(streamer._fElements[0], uproot3.rootio.TStreamerBase) and streamer._fElements[0]._fName == b"TObjArray":
if streamer._fName == b"TClonesArray":
return self._attachstreamer(branch, streamerinfosmap.get(branch._fClonesName, None), streamerinfosmap, True)
else:
# FIXME: can only determine streamer by reading some values?
return
elif len(streamer._fElements) == 1 and isinstance(streamer._fElements[0], uproot3.rootio.TStreamerSTL) and streamer._fElements[0]._fName == b"This":
return self._attachstreamer(branch, streamer._fElements[0], streamerinfosmap, isTClonesArray)
if isinstance(streamer, uproot3.rootio.TStreamerObject):
if streamer._fTypeName == b"TClonesArray":
return self._attachstreamer(branch, streamerinfosmap.get(branch._fClonesName, None), streamerinfosmap, True)
else:
return self._attachstreamer(branch, streamerinfosmap.get(streamer._fTypeName, None), streamerinfosmap, True)
branch._streamer = streamer
branch._isTClonesArray = isTClonesArray
if isinstance(streamer, uproot3.rootio.TStreamerSTL) and streamer._fSTLtype == uproot3.const.kSTLvector:
branch._vecstreamer = streamerinfosmap.get(re.match(self._vector_regex, streamer._fTypeName).group(1), None)
isTClonesArray = True
else:
branch._vecstreamer = None
digDeeperTypes = (uproot3.rootio.TStreamerObject, uproot3.rootio.TStreamerObjectAny, uproot3.rootio.TStreamerObjectPointer, uproot3.rootio.TStreamerObjectAnyPointer)
members = None
if isinstance(streamer, uproot3.rootio.TStreamerInfo):
members = streamer.members
elif isinstance(streamer, digDeeperTypes):
typename = streamer._fTypeName.rstrip(b"*")
if typename in streamerinfosmap:
m = self._objectpointer_regex.search(streamer._fTitle)
if typename == b'TClonesArray' and m is not None:
typename = m.group(1)
members = streamerinfosmap[typename].members
elif isinstance(streamer, uproot3.rootio.TStreamerSTL):
try:
# FIXME: string manipulation only works for one-parameter templates
typename = streamer._fTypeName[streamer._fTypeName.index(b"<") + 1 : streamer._fTypeName.rindex(b">")].rstrip(b"*")
except ValueError:
pass
else:
if typename in streamerinfosmap:
members = streamerinfosmap[typename].members
if members is not None:
for subbranch in branch.itervalues(recursive=True):
name = subbranch._fName
if name.startswith(branch._fName + b"."): # drop parent branch's name
name = name[len(branch._fName) + 1:]
submembers = members
while True: # drop nested struct names one at a time
try:
index = name.index(b".")
except ValueError:
break
else:
base, name = name[:index], name[index + 1:]
if base in submembers and isinstance(submembers[base], digDeeperTypes):
key = submembers[base]._fTypeName.rstrip(b"*")
try:
submembers = streamerinfosmap[key].members
except KeyError:
for regex, substitution in uproot3.interp.auto.streamer_aliases:
new_key, n_matched = regex.subn(substitution, key)
if n_matched:
submembers = streamerinfosmap[new_key].members
self._context.classes[_safename(key)] = self._context.classes[_safename(new_key)]
break
else:
raise
try:
name = name[:name.index(b"[")]
except ValueError:
pass
self._attachstreamer(subbranch, submembers.get(name, None), streamerinfosmap, isTClonesArray)
def _addprovenance(self, branch, context, parents = None):
if parents is None:
parents = [context.treename]
if len(branch._provenance) == 0:
branch._provenance = parents
for x in branch.itervalues():
x._provenance = parents + [branch.name]
self._addprovenance(x, context, x._provenance)
def _postprocess(self, source, cursor, context, parent):
self._context = context
self._context.treename = self.name
self._context.speedbump = True
for branch in self._fBranches:
self._attachstreamer(branch, context.streamerinfosmap.get(getattr(branch, "_fClassName", None), None), context.streamerinfosmap, False)
self._addprovenance(branch, context)
self._branchlookup = {}
self._fill_branchlookup(self._branchlookup)
leaf2branch = {}
for branch in self.itervalues(recursive=True):
if len(branch._fLeaves) == 1:
leaf2branch[id(branch._fLeaves[0])] = branch
for branch in self.itervalues(recursive=True):
if len(branch._fLeaves) > 0:
branch._countleaf = branch._fLeaves[0]._fLeafCount
if branch._countleaf is not None:
branch._countbranch = leaf2branch.get(id(branch._countleaf), None)
if getattr(self, "_fAliases", None) is None:
self.aliases = {}
else:
self.aliases = dict((alias._fName, alias._fTitle) for alias in self._fAliases)
def _fill_branchlookup(self, branchlookup):
for subbranch in self._fBranches:
subbranch._fill_branchlookup(branchlookup)
branchlookup[subbranch.name] = subbranch
@property
def name(self):
return self._fName
@property
def title(self):
return self._fTitle
@property
def numentries(self):
return int(self._fEntries)
@property
def numbranches(self):
count = 0
for x in self.itervalues(recursive=True):
count += 1
return count
def iterkeys(self, recursive=False, filtername=nofilter, filtertitle=nofilter, aliases=True):
for branch_name, branch in self.iteritems(recursive, filtername, filtertitle, aliases):
yield branch_name
def itervalues(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
for branch_name, branch in self.iteritems(recursive, filtername, filtertitle, aliases=False):
yield branch
def iteritems(self, recursive=False, filtername=nofilter, filtertitle=nofilter, aliases=True):
for branch in self._fBranches:
branch_name = branch.name
if aliases:
branch_name = self.aliases.get(branch_name, branch_name)
if filtername(branch_name) and filtertitle(branch.title):
yield branch_name, branch
if recursive:
try:
iterator = branch.iteritems(recursive, filtername, filtertitle, aliases=aliases)
except TypeError:
# Probably unknown `aliases` paramter
# Try without
iterator = branch.iteritems(recursive, filtername, filtertitle)
for n, b in iterator:
if recursive == '/':
n = branch_name + b'/' + n
yield n, b
def keys(self, recursive=False, filtername=nofilter, filtertitle=nofilter, aliases=True):
return list(self.iterkeys(recursive=recursive, filtername=filtername, filtertitle=filtertitle, aliases=aliases))
def _ipython_key_completions_(self):
"Support for completion of keys in an IPython kernel"
return [item.decode("ascii") for item in self.iterkeys()]
def values(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return list(self.itervalues(recursive=recursive, filtername=filtername, filtertitle=filtertitle))
def items(self, recursive=False, filtername=nofilter, filtertitle=nofilter, aliases=True):
return list(self.iteritems(recursive=recursive, filtername=filtername, filtertitle=filtertitle, aliases=aliases))
def allkeys(self, filtername=nofilter, filtertitle=nofilter, aliases=True):
return self.keys(recursive=True, filtername=filtername, filtertitle=filtertitle, aliases=aliases)
def allvalues(self, filtername=nofilter, filtertitle=nofilter):
return self.values(recursive=True, filtername=filtername, filtertitle=filtertitle)
def allitems(self, filtername=nofilter, filtertitle=nofilter, aliases=True):
return self.items(recursive=True, filtername=filtername, filtertitle=filtertitle, aliases=aliases)
def _get(self, name, recursive=True, filtername=nofilter, filtertitle=nofilter, aliases=True):
if b'/' in name:
# Look for exact subbranch
recursive = '/'
for n, b in self.iteritems(recursive=recursive, filtername=filtername, filtertitle=filtertitle, aliases=aliases):
if n == name:
self._branchlookup[name] = b
return b
raise uproot3.rootio._KeyError("not found: {0}\n in file: {1}".format(repr(name), self._context.sourcepath))
def get(self, name, recursive=True, filtername=nofilter, filtertitle=nofilter, aliases=True):
name = _bytesid(name)
try:
return self._branchlookup[name]
except KeyError:
return self._get(name, recursive, filtername, filtertitle, aliases)
def __contains__(self, name):
try:
self.get(name)
except KeyError:
return False
else:
return True
def mempartitions(self, numbytes, branches=None, entrystart=None, entrystop=None, keycache=None, linear=True):
m = _memsize(numbytes)
if m is not None:
numbytes = m
if numbytes <= 0:
raise ValueError("target numbytes must be positive")
awkward0 = _normalize_awkwardlib(None)
branches = list(self._normalize_branches(branches, awkward0))
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
if not linear:
raise NotImplementedError("non-linear mempartition has not been implemented")
relevant_numbytes = 0.0
for branch, interpretation in branches:
if branch._recoveredbaskets is None:
branch._tryrecover()
for i, key in enumerate(branch._threadsafe_iterate_keys(keycache, False)):
start, stop = branch._entryoffsets[i], branch._entryoffsets[i + 1]
if entrystart < stop and start < entrystop:
this_numbytes = key._fObjlen * (min(stop, entrystop) - max(start, entrystart)) / float(stop - start)
assert this_numbytes >= 0.0
relevant_numbytes += this_numbytes
entrysteps = max(1, int(round(math.ceil((entrystop - entrystart) * numbytes / relevant_numbytes))))
start, stop = entrystart, entrystart
while stop < entrystop:
stop = min(stop + entrysteps, entrystop)
if stop > start:
yield start, stop
start = stop
def clusters(self, branches=None, entrystart=None, entrystop=None, strict=False):
awkward0 = _normalize_awkwardlib(None)
branches = list(self._normalize_branches(branches, awkward0))
# convenience class; simplifies presentation of the algorithm
class BranchCursor(object):
def __init__(self, branch):
self.branch = branch
self.basketstart = 0
self.basketstop = 0
@property
def entrystart(self):
return self.branch.basket_entrystart(self.basketstart)
@property
def entrystop(self):
return self.branch.basket_entrystop(self.basketstop)
cursors = [BranchCursor(branch) for branch, interpretation in branches if branch.numbaskets > 0]
if len(cursors) == 0:
yield _normalize_entrystartstop(self.numentries, entrystart, entrystop)
else:
# everybody starts at the same entry number; if there is no such place before someone runs out of baskets, there will be an exception
leadingstart = max(cursor.entrystart for cursor in cursors)
while not all(cursor.entrystart == leadingstart for cursor in cursors):
for cursor in cursors:
while cursor.entrystart < leadingstart:
cursor.basketstart += 1
cursor.basketstop += 1
leadingstart = max(cursor.entrystart for cursor in cursors)
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
# move all cursors forward, yielding a (start, stop) pair if their baskets line up
while any(cursor.basketstop < cursor.branch.numbaskets for cursor in cursors):
# move all subleading baskets forward until they are no longer subleading
leadingstop = max(cursor.entrystop for cursor in cursors)
for cursor in cursors:
while cursor.entrystop < leadingstop:
cursor.basketstop += 1
# if they all line up, this is a good cluster
if all(cursor.entrystop == leadingstop for cursor in cursors):
# check to see if it's within the bounds the user requested (strictly or not strictly)
if strict:
if entrystart <= leadingstart and leadingstop <= entrystop:
yield leadingstart, leadingstop
else:
if entrystart < leadingstop and leadingstart < entrystop:
yield leadingstart, leadingstop
# anyway, move all the starts to the new stopping position and move all stops forward by one
leadingstart = leadingstop
for cursor in cursors:
cursor.basketstart = cursor.basketstop
cursor.basketstop += 1
# stop iterating if we're past all acceptable clusters
if leadingstart >= entrystop:
break
def array(self, branch, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True):
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branch, awkward0))
if len(branches) == 1:
if interpretation is None:
tbranch, interpretation = branches[0]
else:
tbranch, _ = branches[0]
else:
raise ValueError("list of branch names or glob/regex matches more than one branch; use TTree.arrays (plural)")
return tbranch.array(interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, blocking=blocking)
def arrays(self, branches=None, outputtype=dict, namedecode=None, entrystart=None, entrystop=None, flatten=False, flatname=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True, recursive=True):
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branches, awkward0))
for branch, interpretation in branches:
if branch._recoveredbaskets is None:
branch._tryrecover()
if flatten is None:
branches = [(branch, interpretation) for branch, interpretation in branches if not isinstance(interpretation, asjagged)]
flatten = False
# for the case of outputtype == pandas.DataFrame, do some preparation to fill DataFrames efficiently
ispandas = getattr(outputtype, "__name__", None) == "DataFrame" and getattr(outputtype, "__module__", None) == "pandas.core.frame"
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
# start the job of filling the arrays
futures = None
if recursive and recursive is not True:
def wrap_name(branch, namedecode):
if len(branch._provenance) != 0:
if namedecode is None:
return recursive.join(branch._provenance + [branch.name])
else:
return recursive.join([p.decode(namedecode) for p in (branch._provenance + [branch.name])])
else:
return branch.name if namedecode is None else branch.name.decode(namedecode)
futures = [(wrap_name(branch, namedecode), interpretation, branch.array(interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=(flatten and not ispandas), awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, blocking=False)) for branch, interpretation in branches]
else:
futures = [(branch.name if namedecode is None else branch.name.decode(namedecode), interpretation, branch.array(interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=(flatten and not ispandas), awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, blocking=False)) for branch, interpretation in branches]
# make functions that wait for the filling job to be done and return the right outputtype
if outputtype == namedtuple:
outputtype = namedtuple("Arrays", [codecs.ascii_decode(branch.name, "replace")[0] if namedecode is None else branch.name.decode(namedecode) for branch, interpretation in branches])
def wait():
return outputtype(*[future() for name, interpretation, future in futures])
elif ispandas:
import uproot3._connect._pandas
def wait():
return uproot3._connect._pandas.futures2df(futures, outputtype, entrystart, entrystop, flatten, flatname, awkward0)
elif isinstance(outputtype, type) and issubclass(outputtype, dict):
def wait():
return outputtype((name, future()) for name, interpretation, future in futures)
elif isinstance(outputtype, type) and issubclass(outputtype, (list, tuple)):
def wait():
return outputtype(future() for name, interpretation, future in futures)
else:
def wait():
return outputtype(*[future() for name, interpretation, future in futures])
# if blocking, return the result of that function; otherwise, the function itself
if blocking:
return wait()
else:
return wait
def lazyarray(self, branch, interpretation=None, entrysteps=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, chunked=True):
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branch, awkward0))
if len(branches) == 1:
if interpretation is None:
tbranch, interpretation = branches[0]
else:
tbranch, _ = branches[0]
else:
raise ValueError("list of branch names or glob/regex matches more than one branch; use TTree.lazyarrays (plural)")
return tbranch.lazyarray(interpretation=interpretation, entrysteps=entrysteps, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, persistvirtual=persistvirtual, chunked=chunked)
def lazyarrays(self, branches=None, namedecode="utf-8", entrysteps=None, entrystart=None, entrystop=None, flatten=False, profile=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, chunked=True):
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
if not chunked and entrysteps is None:
entrysteps = float('inf')
entrysteps = list(self._normalize_entrysteps(entrysteps, branches, entrystart, entrystop, keycache))
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branches, awkward0))
for branch, interpretation in branches:
if branch._recoveredbaskets is None:
branch._tryrecover()
lazytree = _LazyTree(self._context.sourcepath, self._context.treename, self, dict((b.name, x) for b, x in branches), flatten, awkward0.__name__, basketcache, keycache, executor)
out = awkward0.Table()
for branch, interpretation in branches:
inner = interpretation
while isinstance(inner, asjagged):
inner = inner.content
if isinstance(inner, asobj) and getattr(inner.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.cls._arraymethods, awkward0.VirtualArray)
elif isinstance(inner, asgenobj) and getattr(inner.generator.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.generator.cls._arraymethods, awkward0.VirtualArray)
else:
VirtualArray = awkward0.VirtualArray
name = branch.name.decode("ascii") if namedecode is None else branch.name.decode(namedecode)
if chunked:
chunks = []
counts = []
for start, stop in entrysteps:
chunks.append(VirtualArray(lazytree, (branch.name, start, stop), cache=cache, type=awkward0.type.ArrayType(stop - start, interpretation.type), persistvirtual=persistvirtual))
counts.append(stop - start)
out[name] = awkward0.ChunkedArray(chunks, counts)
out[name].__doc__ = branch.title.decode('ascii')
else:
start, stop = entrysteps[0]
out[name] = VirtualArray(lazytree, (branch.name, start, stop), cache=cache, type=awkward0.type.ArrayType(stop - start, interpretation.type), persistvirtual=persistvirtual)
out[name].__doc__ = branch.title.decode('ascii')
if profile is not None:
out = uproot_methods.profiles.transformer(profile)(out)
return out
def _normalize_entrysteps(self, entrysteps, branches, entrystart, entrystop, keycache):
numbytes = _memsize(entrysteps)
if numbytes is not None:
return self.mempartitions(numbytes, branches=branches, entrystart=entrystart, entrystop=entrystop, keycache=keycache, linear=True)
if isinstance(entrysteps, string_types):
raise ValueError("string {0} does not match the memory size pattern (number followed by B/kB/MB/GB/etc.)".format(repr(entrysteps)))
if entrysteps is None:
return self.clusters(branches, entrystart=entrystart, entrystop=entrystop, strict=False)
elif entrysteps == float("inf"):
return [(entrystart, min(entrystop, self.numentries))]
elif isinstance(entrysteps, (numbers.Integral, numpy.integer)):
entrystepsize = entrysteps
if entrystepsize <= 0:
raise ValueError("if an integer, entrysteps must be positive")
effectivestop = min(entrystop, self.numentries)
starts = numpy.arange(entrystart, effectivestop, entrystepsize)
stops = numpy.append(starts[1:], effectivestop)
return zip(starts, stops)
else:
try:
iter(entrysteps)
except TypeError:
raise TypeError("entrysteps must be None for cluster iteration, a positive integer for equal steps in number of entries (inf for maximal), a memory size string (number followed by B/kB/MB/GB/etc.), or an iterable of 2-tuples for explicit entry starts (inclusive) and stops (exclusive)")
return entrysteps
def iterate(self, branches=None, entrysteps=None, outputtype=dict, namedecode=None, reportentries=False, entrystart=None, entrystop=None, flatten=False, flatname=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True):
if keycache is None:
keycache = {}
if basketcache is None:
basketcache = {}
explicit_basketcache = False
else:
explicit_basketcache = True
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
entrysteps = self._normalize_entrysteps(entrysteps, branches, entrystart, entrystop, keycache)
awkward0 = _normalize_awkwardlib(awkwardlib)
branches = list(self._normalize_branches(branches, awkward0))
for branch, interpretation in branches:
if branch._recoveredbaskets is None:
branch._tryrecover()
# for the case of outputtype == pandas.DataFrame, do some preparation to fill DataFrames efficiently
ispandas = getattr(outputtype, "__name__", None) == "DataFrame" and getattr(outputtype, "__module__", None) == "pandas.core.frame"
def evaluate(branch, interpretation, future, past, cachekey, pythonize):
if future is None:
return past
else:
out = interpretation.finalize(future(), branch)
if cache is not None:
cache[cachekey] = out
if flatten and isinstance(interpretation, asjagged):
return out.flatten()
elif pythonize:
return list(out)
else:
return out
if outputtype == namedtuple:
outputtype = namedtuple("Arrays", [codecs.ascii_decode(branch.name, "replace")[0] if namedecode is None else branch.name.decode(namedecode) for branch, interpretation in branches])
def wrap_for_python_scope(futures, start, stop):
return lambda: outputtype(*[evaluate(branch, interpretation, future, past, cachekey, False) for branch, interpretation, future, past, cachekey in futures])
elif ispandas:
import uproot3._connect._pandas
def wrap_for_python_scope(futures, start, stop):
def wrap_again(branch, interpretation, future):
return lambda: interpretation.finalize(future(), branch)
return lambda: uproot3._connect._pandas.futures2df([(branch.name, interpretation, wrap_again(branch, interpretation, future)) for branch, interpretation, future, past, cachekey in futures], outputtype, start, stop, flatten, flatname, awkward0)
elif isinstance(outputtype, type) and issubclass(outputtype, dict):
def wrap_for_python_scope(futures, start, stop):
return lambda: outputtype((branch.name if namedecode is None else branch.name.decode(namedecode), evaluate(branch, interpretation, future, past, cachekey, False)) for branch, interpretation, future, past, cachekey in futures)
elif isinstance(outputtype, type) and issubclass(outputtype, (list, tuple)):
def wrap_for_python_scope(futures, start, stop):
return lambda: outputtype(evaluate(branch, interpretation, future, past, cachekey, False) for branch, interpretation, future, past, cachekey in futures)
else:
def wrap_for_python_scope(futures, start, stop):
return lambda: outputtype(*[evaluate(branch, interpretation, future, past, cachekey, False) for branch, interpretation, future, past, cachekey in futures])
for start, stop in entrysteps:
start = max(start, entrystart)
stop = min(stop, entrystop)
if start > stop:
continue
futures = []
for branch, interpretation in branches:
cachekey = branch._cachekey(interpretation, start, stop)
if branch.numbaskets == 0:
futures.append((branch, interpretation, interpretation.empty, None, cachekey))
else:
basketstart, basketstop = branch._basketstartstop(start, stop)
basket_itemoffset = branch._basket_itemoffset(interpretation, basketstart, basketstop, keycache)
basket_entryoffset = branch._basket_entryoffset(basketstart, basketstop)
if cache is not None:
out = cache.get(cachekey, None)
if out is not None:
futures.append((branch, interpretation, None, out, cachekey))
continue
future = branch._step_array(interpretation, basket_itemoffset, basket_entryoffset, start, stop, awkward0, basketcache, keycache, executor, explicit_basketcache)
futures.append((branch, interpretation, future, None, cachekey))
out = wrap_for_python_scope(futures, start, stop)
if blocking:
out = out()
if reportentries:
yield start, stop, out
else:
yield out
def _format(self, indent=""):
# TODO: add TTree data to the bottom of this
out = []
for branch in self._fBranches:
out.extend(branch._format(indent))
return out
def show(self, foldnames=False, stream=sys.stdout):
if stream is None:
return "\n".join(self._format(foldnames))
else:
for line in self._format(foldnames):
stream.write(line)
stream.write("\n")
def _recover(self):
for branch in self.allvalues():
branch._recover()
def matches(self, branches):
awkward0 = _normalize_awkwardlib(None)
return [b.name for b, i in self._normalize_branches(branches, awkward0, allownone=False, allowcallable=False, allowdict=False, allowstring=True)]
_branch_regex = re.compile(b"^/(.*)/([iLmsux]*)$")
@staticmethod
def _branch_flags(flags):
flagsbyte = 0
for flag in flags:
if flag == "i":
flagsbyte += re.I
elif flag == "L":
flagsbyte += re.L
elif flag == "m":
flagsbyte += re.M
elif flag == "s":
flagsbyte += re.S
elif flag == "u":
flagsbyte += re.U
elif flag == "x":
flagsbyte += re.X
return flagsbyte
def _normalize_branches(self, arg, awkward0, allownone=True, allowcallable=True, allowdict=True, allowstring=True, aliases=True):
if allownone and arg is None: # no specification; read all branches
for branch in self.allvalues(): # that have interpretations
interpretation = interpret(branch, awkward0)
if interpretation is not None:
yield branch, interpretation
elif allowcallable and callable(arg):
for branch in self.allvalues():
result = arg(branch)
if result is None or result is False:
pass
elif result is True: # function is a filter
interpretation = interpret(branch, awkward0)
if interpretation is not None:
yield branch, interpretation
else: # function is giving interpretations
yield branch, branch._normalize_dtype(result, awkward0)
elif allowdict and isinstance(arg, dict):
for word, interpretation in arg.items():
word = _bytesid(word)
isregex = re.match(self._branch_regex, word)
if isregex is not None:
regex, flags = isregex.groups()
for name, branch in self.iteritems(recursive=True, aliases=aliases):
if re.match(regex, name, self._branch_flags(flags)):
yield branch, branch._normalize_dtype(interpretation, awkward0)
elif b"*" in word or b"?" in word or b"[" in word:
for name, branch in self.iteritems(recursive=True, aliases=aliases):
if name == word or glob.fnmatch.fnmatchcase(name, word):
yield branch, branch._normalize_dtype(interpretation, awkward0)
else:
branch = self.get(word, aliases=aliases)
yield branch, branch._normalize_dtype(interpretation, awkward0)
elif allowstring and isinstance(arg, string_types):
for x in self._normalize_branches([arg], awkward0):
yield x
else:
try:
words = iter(arg) # only way to check for iterable (in general)
except Exception:
raise TypeError("'branches' argument not understood")
else:
for word in words:
word = _bytesid(word)
isregex = re.match(self._branch_regex, word)
if isregex is not None:
regex, flags = isregex.groups()
for name, branch in self.iteritems(recursive=True, aliases=aliases):
if re.match(regex, name, self._branch_flags(flags)):
interpretation = interpret(branch, awkward0)
if interpretation is None:
if name == word:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(branch.name), self._context.sourcepath))
else:
yield branch, interpretation
elif b"*" in word or b"?" in word or b"[" in word:
for name, branch in self.iteritems(recursive=True, aliases=aliases):
if name == word or glob.fnmatch.fnmatchcase(name, word):
interpretation = interpret(branch, awkward0)
if interpretation is None:
if name == word:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(branch.name), self._context.sourcepath))
else:
yield branch, interpretation
else:
branch = self.get(word, aliases=aliases)
interpretation = interpret(branch, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(branch.name), self._context.sourcepath))
else:
yield branch, interpretation
def __len__(self):
return self.numentries
def __getitem__(self, name):
return self.get(name)
def __iter__(self):
# prevent Python's attempt to interpret __len__ and __getitem__ as iteration
raise TypeError("'TTree' object is not iterable")
@property
def pandas(self):
import uproot3._connect._pandas
return uproot3._connect._pandas.TTreeMethods_pandas(self)
################################################################ methods for TBranch
class TBranchMethods(object):
# makes __doc__ attribute mutable before Python 3.3
__metaclass__ = type.__new__(type, "type", (uproot3.rootio.ROOTObject.__metaclass__,), {})
def _postprocess(self, source, cursor, context, parent):
self._source = source
self._context = context
self._streamer = None
self._interpretation = None
self._provenance = []
self._numgoodbaskets = 0
for i, x in enumerate(self._fBasketSeek):
if x == 0 or i == self._fWriteBasket:
break
self._numgoodbaskets += 1
if self.numentries == self._fBasketEntry[self._numgoodbaskets]:
self._recoveredbaskets = []
self._entryoffsets = self._fBasketEntry[: self._numgoodbaskets + 1].tolist()
self._recoverylock = None
else:
self._recoveredbaskets = None
self._entryoffsets = None
self._recoverylock = threading.Lock()
self._countbranch = None
self._tree_iofeatures = 0
if hasattr(parent, "_fIOFeatures"):
self._tree_iofeatures = parent._fIOFeatures._fIOBits
def _fill_branchlookup(self, branchlookup):
for subbranch in self._fBranches:
subbranch._fill_branchlookup(branchlookup)
branchlookup[subbranch.name] = subbranch
@property
def name(self):
return self._fName
@property
def title(self):
return self._fTitle
@property
def interpretation(self):
awkward0 = _normalize_awkwardlib(None)
if self._interpretation is None:
self._interpretation = interpret(self, awkward0)
return self._interpretation
@property
def countbranch(self):
return self._countbranch
@property
def countleaf(self):
return self._countleaf
@property
def numentries(self):
return int(self._fEntries) # or self._fEntryNumber?
@property
def numbranches(self):
count = 0
for x in self.itervalues(recursive=True):
count += 1
return count
def iterkeys(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
for branch_name, branch in self.iteritems(recursive, filtername, filtertitle):
yield branch_name
def itervalues(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
for branch_name, branch in self.iteritems(recursive, filtername, filtertitle):
yield branch
def iteritems(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
for branch in self._fBranches:
branch_name = branch.name
if filtername(branch_name) and filtertitle(branch.title):
yield branch_name, branch
if recursive:
iterator = branch.iteritems(recursive, filtername, filtertitle)
for n, b in iterator:
if recursive == '/':
n = branch_name + b'/' + n
yield n, b
def keys(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return list(self.iterkeys(recursive=recursive, filtername=filtername, filtertitle=filtertitle))
def _ipython_key_completions_(self):
"Support for completion of keys in an IPython kernel"
return [item.decode("ascii") for item in self.iterkeys()]
def values(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return list(self.itervalues(recursive=recursive, filtername=filtername, filtertitle=filtertitle))
def items(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return list(self.iteritems(recursive=recursive, filtername=filtername, filtertitle=filtertitle))
def allkeys(self, recursive=False, filtername=nofilter, filtertitle=nofilter):
return self.keys(recursive=True, filtername=filtername, filtertitle=filtertitle)
def allvalues(self, filtername=nofilter, filtertitle=nofilter):
return self.values(recursive=True, filtername=filtername, filtertitle=filtertitle)
def allitems(self, filtername=nofilter, filtertitle=nofilter):
return self.items(recursive=True, filtername=filtername, filtertitle=filtertitle)
def _get(self, name, recursive=True, filtername=nofilter, filtertitle=nofilter):
if b'/' in name:
# Look for exact subbranch
recursive = '/'
for n, b in self.iteritems(recursive=recursive, filtername=filtername, filtertitle=filtertitle):
if n == name:
return b
raise uproot3.rootio._KeyError("not found: {0}\n in file: {1}".format(repr(name), self._context.sourcepath))
def get(self, name, recursive=True, filtername=nofilter, filtertitle=nofilter):
name = _bytesid(name)
return self._get(name, recursive, filtername, filtertitle)
@property
def numbaskets(self):
if self._recoveredbaskets is None:
self._tryrecover()
return self._numgoodbaskets + len(self._recoveredbaskets)
def _cachekey(self, interpretation, entrystart, entrystop):
return "{0};{1};{2};{3};{4}-{5}".format(base64.b64encode(self._context.uuid).decode("ascii"), self._context.treename.decode("ascii"), self.name.decode("ascii"), interpretation.identifier, entrystart, entrystop)
def _basketcachekey(self, i):
return "{0};{1};{2};{3};raw".format(base64.b64encode(self._context.uuid).decode("ascii"), self._context.treename.decode("ascii"), self.name.decode("ascii"), i)
def _keycachekey(self, i):
return "{0};{1};{2};{3};key".format(base64.b64encode(self._context.uuid).decode("ascii"), self._context.treename.decode("ascii"), self.name.decode("ascii"), i)
def _threadsafe_key(self, i, keycache, complete):
key = None
if keycache is not None:
key = keycache.get(self._keycachekey(i), None)
if key is None:
keysource = self._source.threadlocal()
try:
key = self._basketkey(keysource, i, complete)
if keycache is not None:
keycache[self._keycachekey(i)] = key
finally:
keysource.dismiss()
return key
def _threadsafe_iterate_keys(self, keycache, complete, basketstart=None, basketstop=None):
if basketstart is None:
basketstart = 0
if basketstop is None:
basketstop = self.numbaskets
done = False
if keycache is not None:
keys = [keycache.get(self._keycachekey(i), None) for i in range(basketstart, basketstop)]
if all(x is not None for x in keys):
if not complete or all(hasattr(x, "border") for x in keys):
for key in keys:
yield key
done = True
if not done:
keysource = self._source.threadlocal()
try:
for i in range(basketstart, basketstop):
key = None if keycache is None else keycache.get(self._keycachekey(i), None)
if key is None or (complete and not hasattr(key, "border")):
key = self._basketkey(keysource, i, complete)
if keycache is not None:
keycache[self._keycachekey(i)] = key
yield key
else:
yield key
finally:
keysource.dismiss()
def uncompressedbytes(self, keycache=None):
return sum(key._fObjlen for key in self._threadsafe_iterate_keys(keycache, False))
def compressedbytes(self, keycache=None):
return sum(key._fNbytes - key._fKeylen for key in self._threadsafe_iterate_keys(keycache, False))
def compressionratio(self, keycache=None):
numer, denom = 0, 0
for key in self._threadsafe_iterate_keys(keycache, False):
numer += key._fObjlen
denom += key._fNbytes - key._fKeylen
return float(numer) / float(denom)
def _normalize_dtype(self, interpretation, awkward0):
if inspect.isclass(interpretation) and issubclass(interpretation, awkward0.numpy.generic):
return self._normalize_dtype(awkward0.numpy.dtype(interpretation), awkward0)
elif isinstance(interpretation, awkward0.numpy.dtype): # user specified a Numpy dtype
default = interpret(self, awkward0)
if isinstance(default, (asdtype, asjagged)):
return default.to(interpretation)
else:
raise ValueError("cannot cast branch {0} (default interpretation {1}) as dtype {2}".format(repr(self.name), default, interpretation))
elif isinstance(interpretation, awkward0.numpy.ndarray): # user specified a Numpy array
default = interpret(self, awkward0)
if isinstance(default, asdtype):
return default.toarray(interpretation)
else:
raise ValueError("cannot cast branch {0} (default interpretation {1}) as dtype {2}".format(repr(self.name), default, interpretation))
elif not isinstance(interpretation, uproot3.interp.interp.Interpretation):
raise TypeError("branch interpretation must be an Interpretation, not {0} (type {1})".format(interpretation, type(interpretation)))
else:
return interpretation
def _normalize_interpretation(self, interpretation, awkward0):
if interpretation is None:
interpretation = interpret(self, awkward0)
else:
interpretation = self._normalize_dtype(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if interpretation.awkward0 is not awkward0:
interpretation = interpretation.awkwardlib(awkward0)
return interpretation
def numitems(self, interpretation=None, keycache=None):
awkward0 = _normalize_awkwardlib(None)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
return sum(interpretation.numitems(key.border, self.basket_numentries(i)) for i, key in enumerate(self._threadsafe_iterate_keys(keycache, True)))
@property
def compression(self):
try:
return uproot3.source.compressed.Compression(self._fCompress)
except ValueError:
return self._context.compression
def basket_entrystart(self, i):
if self._recoveredbaskets is None:
self._tryrecover()
if 0 <= i < self.numbaskets:
return self._entryoffsets[i]
else:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
def basket_entrystop(self, i):
if self._recoveredbaskets is None:
self._tryrecover()
if 0 <= i < self.numbaskets:
return self._entryoffsets[i + 1]
else:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
def basket_numentries(self, i):
if self._recoveredbaskets is None:
self._tryrecover()
if 0 <= i < self.numbaskets:
return self._entryoffsets[i + 1] - self._entryoffsets[i]
else:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
def basket_uncompressedbytes(self, i, keycache=None):
if self._recoveredbaskets is None:
self._tryrecover()
return self._threadsafe_key(i, keycache, False)._fObjlen
def basket_compressedbytes(self, i, keycache=None):
if self._recoveredbaskets is None:
self._tryrecover()
key = self._threadsafe_key(i, keycache, False)
return key._fNbytes - key._fKeylen
def basket_numitems(self, i, interpretation=None, keycache=None):
if self._recoveredbaskets is None:
self._tryrecover()
awkward0 = _normalize_awkwardlib(None)
interpretation = self._normalize_interpretation(interpretation, awkward0)
key = self._threadsafe_key(i, keycache, True)
return interpretation.numitems(key.border, self.basket_numentries(i))
def _localentries(self, i, entrystart, entrystop):
local_entrystart = max(0, entrystart - self.basket_entrystart(i))
local_entrystop = max(0, min(entrystop - self.basket_entrystart(i), self.basket_entrystop(i) - self.basket_entrystart(i)))
return local_entrystart, local_entrystop
def _basket(self, i, interpretation, local_entrystart, local_entrystop, awkward0, basketcache, keycache):
basketdata = None
if basketcache is not None:
basketcachekey = self._basketcachekey(i)
basketdata = basketcache.get(basketcachekey, None)
key = self._threadsafe_key(i, keycache, True)
if basketdata is None:
basketdata = key.basketdata()
if basketcache is not None:
basketcache[basketcachekey] = basketdata
if key._fObjlen == key.border:
data, byteoffsets = basketdata, None
if self._countbranch is not None and awkward0.numpy.uint8(self._tree_iofeatures) & awkward0.numpy.uint8(uproot3.const.kGenerateOffsetMap) != 0:
counts = self._countbranch.array(entrystart=(local_entrystart + self.basket_entrystart(i)),
entrystop=(local_entrystop + self.basket_entrystart(i)))
itemsize = 1
if isinstance(interpretation, asjagged):
itemsize = interpretation.content.fromdtype.itemsize
awkward0.numpy.multiply(counts, itemsize, counts)
byteoffsets = awkward0.numpy.empty(len(counts) + 1, dtype=awkward0.numpy.int32)
byteoffsets[0] = 0
awkward0.numpy.cumsum(counts, out=byteoffsets[1:])
else:
data = basketdata[:key.border]
byteoffsets = awkward0.numpy.empty((key._fObjlen - key.border - 4) // 4, dtype=awkward0.numpy.int32) # native endian
byteoffsets[:-1] = basketdata[key.border + 4 : -4].view(">i4") # read as big-endian and convert
byteoffsets[-1] = key._fLast
awkward0.numpy.subtract(byteoffsets, key._fKeylen, byteoffsets)
return interpretation.fromroot(data, byteoffsets, local_entrystart, local_entrystop, key._fKeylen)
def basket(self, i, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None):
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
if not 0 <= i < self.numbaskets:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
local_entrystart, local_entrystop = self._localentries(i, entrystart, entrystop)
entrystart = self.basket_entrystart(i) + local_entrystart
entrystop = self.basket_entrystart(i) + local_entrystop
numentries = local_entrystop - local_entrystart
if cache is not None:
cachekey = self._cachekey(interpretation, entrystart, entrystop)
out = cache.get(cachekey, None)
if out is not None:
if flatten and isinstance(interpretation, asjagged):
return out.content
else:
return out
source = self._basket(i, interpretation, local_entrystart, local_entrystop, awkward0, basketcache, keycache)
numitems = interpretation.source_numitems(source)
destination = interpretation.destination(numitems, numentries)
interpretation.fill(source, destination, 0, numitems, 0, numentries)
out = interpretation.finalize(destination, self)
if cache is not None:
cache[cachekey] = out
if flatten and isinstance(interpretation, asjagged):
return out.content
else:
return out
def _basketstartstop(self, entrystart, entrystop):
basketstart, basketstop = None, None
for i in range(self.numbaskets):
if basketstart is None:
if entrystart < self.basket_entrystop(i) and self.basket_entrystart(i) < entrystop:
basketstart = i
basketstop = i
else:
if self.basket_entrystart(i) < entrystop:
basketstop = i
if basketstop is not None:
basketstop += 1 # stop is exclusive
return basketstart, basketstop
def baskets(self, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, reportentries=False, executor=None, blocking=True):
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
basketstart, basketstop = self._basketstartstop(entrystart, entrystop)
if basketstart is None:
if blocking:
return []
else:
def wait():
return []
return wait
out = [None] * (basketstop - basketstart)
def fill(j):
try:
basket = self.basket(j + basketstart, interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache)
if reportentries:
local_entrystart, local_entrystop = self._localentries(j + basketstart, entrystart, entrystop)
basket = (local_entrystart + self.basket_entrystart(j + basketstart),
local_entrystop + self.basket_entrystart(j + basketstart),
basket)
except Exception:
return sys.exc_info()
else:
out[j] = basket
return None
if executor is None:
for j in range(basketstop - basketstart):
_delayedraise(fill(j))
excinfos = ()
else:
excinfos = executor.map(fill, range(basketstop - basketstart))
if blocking:
for excinfo in excinfos:
_delayedraise(excinfo)
return out
else:
def wait():
for excinfo in excinfos:
_delayedraise(excinfo)
return out
return wait
def iterate_baskets(self, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, reportentries=False):
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
for i in range(self.numbaskets):
if entrystart < self.basket_entrystop(i) and self.basket_entrystart(i) < entrystop:
local_entrystart, local_entrystop = self._localentries(i, entrystart, entrystop)
if local_entrystop > local_entrystart:
if reportentries:
yield (local_entrystart + self.basket_entrystart(i),
local_entrystop + self.basket_entrystart(i),
self.basket(i, interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache))
else:
yield self.basket(i, interpretation=interpretation, entrystart=entrystart, entrystop=entrystop, flatten=flatten, awkwardlib=awkward0, cache=cache, basketcache=basketcache, keycache=keycache)
def _basket_itemoffset(self, interpretation, basketstart, basketstop, keycache):
basket_itemoffset = [0]
for j, key in enumerate(self._threadsafe_iterate_keys(keycache, True, basketstart, basketstop)):
i = basketstart + j
numitems = interpretation.numitems(key.border, self.basket_numentries(i))
basket_itemoffset.append(basket_itemoffset[-1] + numitems)
return basket_itemoffset
def _basket_entryoffset(self, basketstart, basketstop):
basket_entryoffset = [0]
for i in range(basketstart, basketstop):
basket_entryoffset.append(basket_entryoffset[-1] + self.basket_numentries(i))
return basket_entryoffset
def array(self, interpretation=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, blocking=True):
if self._recoveredbaskets is None:
self._tryrecover()
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
basketstart, basketstop = self._basketstartstop(entrystart, entrystop)
if basketstart is not None and basketstop is not None and self._source.parent() is not None:
self._source.parent().preload([self._fBasketSeek[i] for i in range(basketstart, basketstop)])
if cache is not None:
cachekey = self._cachekey(interpretation, entrystart, entrystop)
out = cache.get(cachekey, None)
if out is not None:
if flatten and isinstance(interpretation, asjagged):
out = out.content
if blocking:
return out
else:
return lambda: out
if basketstart is None:
if blocking:
return interpretation.empty()
else:
def wait():
return interpretation.empty()
return wait
if keycache is None:
keycache = {}
basket_itemoffset = self._basket_itemoffset(interpretation, basketstart, basketstop, keycache)
basket_entryoffset = self._basket_entryoffset(basketstart, basketstop)
destination = interpretation.destination(basket_itemoffset[-1], basket_entryoffset[-1])
def fill(j):
try:
i = j + basketstart
local_entrystart, local_entrystop = self._localentries(i, entrystart, entrystop)
source = self._basket(i, interpretation, local_entrystart, local_entrystop, awkward0, basketcache, keycache)
expecteditems = basket_itemoffset[j + 1] - basket_itemoffset[j]
source_numitems = interpretation.source_numitems(source)
expectedentries = basket_entryoffset[j + 1] - basket_entryoffset[j]
source_numentries = local_entrystop - local_entrystart
if j + 1 == basketstop - basketstart:
if expecteditems > source_numitems:
basket_itemoffset[j + 1] -= expecteditems - source_numitems
if expectedentries > source_numentries:
basket_entryoffset[j + 1] -= expectedentries - source_numentries
elif j == 0:
if expecteditems > source_numitems:
basket_itemoffset[j] += expecteditems - source_numitems
if expectedentries > source_numentries:
basket_entryoffset[j] += expectedentries - source_numentries
interpretation.fill(source,
destination,
basket_itemoffset[j],
basket_itemoffset[j + 1],
basket_entryoffset[j],
basket_entryoffset[j + 1])
except Exception:
return sys.exc_info()
if executor is None:
for j in range(basketstop - basketstart):
_delayedraise(fill(j))
excinfos = ()
else:
excinfos = executor.map(fill, range(basketstop - basketstart))
def wait():
for excinfo in excinfos:
_delayedraise(excinfo)
clipped = interpretation.clip(destination,
basket_itemoffset[0],
basket_itemoffset[-1],
basket_entryoffset[0],
basket_entryoffset[-1])
out = interpretation.finalize(clipped, self)
if cache is not None:
cache[cachekey] = out
if flatten and isinstance(interpretation, asjagged):
return out.content
else:
return out
if blocking:
return wait()
else:
return wait
def _step_array(self, interpretation, basket_itemoffset, basket_entryoffset, entrystart, entrystop, awkward0, basketcache, keycache, executor, explicit_basketcache):
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
if self._recoveredbaskets is None:
self._tryrecover()
basketstart, basketstop = self._basketstartstop(entrystart, entrystop)
if basketstart is None:
return lambda: interpretation.empty()
destination = interpretation.destination(basket_itemoffset[-1], basket_entryoffset[-1])
def fill(j):
try:
i = j + basketstart
local_entrystart, local_entrystop = self._localentries(i, entrystart, entrystop)
source = self._basket(i, interpretation, local_entrystart, local_entrystop, awkward0, basketcache, keycache)
expecteditems = basket_itemoffset[j + 1] - basket_itemoffset[j]
source_numitems = interpretation.source_numitems(source)
expectedentries = basket_entryoffset[j + 1] - basket_entryoffset[j]
source_numentries = local_entrystop - local_entrystart
if j + 1 == basketstop - basketstart:
if expecteditems > source_numitems:
basket_itemoffset[j + 1] -= expecteditems - source_numitems
if expectedentries > source_numentries:
basket_entryoffset[j + 1] -= expectedentries - source_numentries
elif j == 0:
if expecteditems > source_numitems:
basket_itemoffset[j] += expecteditems - source_numitems
if expectedentries > source_numentries:
basket_entryoffset[j] += expectedentries - source_numentries
interpretation.fill(source,
destination,
basket_itemoffset[j],
basket_itemoffset[j + 1],
basket_entryoffset[j],
basket_entryoffset[j + 1])
except Exception:
return sys.exc_info()
if executor is None:
for j in range(basketstop - basketstart):
_delayedraise(fill(j))
excinfos = ()
else:
excinfos = executor.map(fill, range(basketstop - basketstart))
def wait():
for excinfo in excinfos:
_delayedraise(excinfo)
if not explicit_basketcache:
for i in range(basketstop - 1): # not including the last real basket
try:
del basketcache[self._basketcachekey(i)]
except KeyError:
pass
return interpretation.clip(destination,
basket_itemoffset[0],
basket_itemoffset[-1],
basket_entryoffset[0],
basket_entryoffset[-1])
return wait
def mempartitions(self, numbytes, entrystart=None, entrystop=None, keycache=None, linear=True):
m = _memsize(numbytes)
if m is not None:
numbytes = m
if numbytes <= 0:
raise ValueError("target numbytes must be positive")
awkward0 = _normalize_awkwardlib(None)
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
if not linear:
raise NotImplementedError("non-linear mempartition has not been implemented")
relevant_numbytes = 0.0
if self._recoveredbaskets is None:
self._tryrecover()
for i, key in enumerate(self._threadsafe_iterate_keys(keycache, False)):
start, stop = self._entryoffsets[i], self._entryoffsets[i + 1]
if entrystart < stop and start < entrystop:
this_numbytes = key._fObjlen * (min(stop, entrystop) - max(start, entrystart)) / float(stop - start)
assert this_numbytes >= 0.0
relevant_numbytes += this_numbytes
entrysteps = max(1, round(math.ceil((entrystop - entrystart) * numbytes / relevant_numbytes)))
start, stop = entrystart, entrystart
while stop < entrystop:
stop = min(stop + entrysteps, entrystop)
if stop > start:
yield start, stop
start = stop
def _normalize_entrysteps(self, entrysteps, entrystart, entrystop, keycache):
numbytes = _memsize(entrysteps)
if numbytes is not None:
return self.mempartitions(numbytes, entrystart=entrystart, entrystop=entrystop, keycache=keycache, linear=True)
if isinstance(entrysteps, string_types):
raise ValueError("string {0} does not match the memory size pattern (number followed by B/kB/MB/GB/etc.)".format(repr(entrysteps)))
if entrysteps is None:
if self._recoveredbaskets is None:
self._tryrecover()
return [(self._entryoffsets[i], self._entryoffsets[i + 1]) for i in range(self.numbaskets) if entrystart < self._entryoffsets[i + 1] and entrystop >= self._entryoffsets[i]]
elif entrysteps == float("inf"):
return [(entrystart, min(entrystop, self.numentries))]
elif isinstance(entrysteps, (numbers.Integral, numpy.integer)):
entrystepsize = entrysteps
if entrystepsize <= 0:
raise ValueError("if an integer, entrysteps must be positive")
effectivestop = min(entrystop, self.numentries)
starts = numpy.arange(entrystart, effectivestop, entrystepsize)
stops = numpy.append(starts[1:], effectivestop)
return zip(starts, stops)
else:
try:
iter(entrysteps)
except TypeError:
raise TypeError("entrysteps must be None for cluster iteration, a positive integer for equal steps in number of entries (inf for maximal), a memory size string (number followed by B/kB/MB/GB/etc.), or an iterable of 2-tuples for explicit entry starts (inclusive) and stops (exclusive)")
return entrysteps
def lazyarray(self, interpretation=None, entrysteps=None, entrystart=None, entrystop=None, flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, chunked=True):
if self._recoveredbaskets is None:
self._tryrecover()
awkward0 = _normalize_awkwardlib(awkwardlib)
interpretation = self._normalize_interpretation(interpretation, awkward0)
if interpretation is None:
raise ValueError("cannot interpret branch {0} as a Python type\n in file: {1}".format(repr(self.name), self._context.sourcepath))
entrystart, entrystop = _normalize_entrystartstop(self.numentries, entrystart, entrystop)
if not chunked and entrysteps is None:
entrysteps = float('inf')
entrysteps = self._normalize_entrysteps(entrysteps, entrystart, entrystop, keycache)
inner = interpretation
while isinstance(inner, asjagged):
inner = inner.content
if isinstance(inner, asobj) and getattr(inner.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.cls._arraymethods, awkward0.VirtualArray)
chunkedarray = awkward0.Methods.mixin(inner.cls._arraymethods, awkward0.ChunkedArray)
elif isinstance(inner, asgenobj) and getattr(inner.generator.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.generator.cls._arraymethods, awkward0.VirtualArray)
chunkedarray = awkward0.Methods.mixin(inner.generator.cls._arraymethods, awkward0.ChunkedArray)
else:
VirtualArray = awkward0.VirtualArray
chunkedarray = awkward0.ChunkedArray
lazybranch = _LazyBranch(self._context.sourcepath, self._context.treename, self.name, self, interpretation, flatten, awkward0.__name__, basketcache, keycache, executor)
if chunked:
chunks = []
counts = []
for start, stop in entrysteps:
numentries = stop - start
chunks.append(VirtualArray(lazybranch, (start, stop), cache=cache, type=awkward0.type.ArrayType(numentries, interpretation.type), persistvirtual=persistvirtual))
counts.append(numentries)
out = chunkedarray(chunks, counts)
out.__doc__ = self.title.decode('ascii')
return out
else:
start, stop = entrysteps[0]
out = VirtualArray(lazybranch, (start, stop), cache=cache, type=awkward0.type.ArrayType(stop - start, interpretation.type), persistvirtual=persistvirtual)
out.__doc__ = self.title.decode('ascii')
return out
class _BasketKey(object):
def __init__(self, source, cursor, compression, complete):
start = cursor.index
self._fNbytes, self._fVersion, self._fObjlen, self._fDatime, self._fKeylen, self._fCycle, self._fSeekKey, self._fSeekPdir = cursor.fields(source, TBranchMethods._BasketKey._format_small)
if self._fVersion > 1000:
cursor.index = start
self._fNbytes, self._fVersion, self._fObjlen, self._fDatime, self._fKeylen, self._fCycle, self._fSeekKey, self._fSeekPdir = cursor.fields(source, TBranchMethods._BasketKey._format_big)
if complete:
cursor.index = start + self._fKeylen - TBranchMethods._BasketKey._format_complete.size - 1
self._fVersion, self._fBufferSize, self._fNevBufSize, self._fNevBuf, self._fLast = cursor.fields(source, TBranchMethods._BasketKey._format_complete)
self.border = self._fLast - self._fKeylen
if source.size() is not None:
if source.size() - self._fSeekKey < self._fNbytes:
s = source
while s.parent() is not None and s.parent() is not s:
s = s.parent()
raise ValueError("TKey declares that object has {0} bytes but only {1} remain in the file\n in file: {2}".format(self._fNbytes, source.size() - self._fSeekKey, s.path))
if self._fObjlen != self._fNbytes - self._fKeylen:
self.source = uproot3.source.compressed.CompressedSource(compression, source, Cursor(self._fSeekKey + self._fKeylen), self._fNbytes - self._fKeylen, self._fObjlen)
self.cursor = Cursor(0)
else:
self.source = source
self.cursor = Cursor(self._fSeekKey + self._fKeylen)
_format_small = struct.Struct(">ihiIhhii")
_format_big = struct.Struct(">ihiIhhqq")
_format_complete = struct.Struct(">Hiiii")
@property
def fName(self):
return "TBranchMethods._BasketKey"
@property
def fTitle(self):
return "TBranchMethods._BasketKey"
@property
def fClassName(self):
return "TBasket"
def basketdata(self):
datasource = self.source.threadlocal()
try:
return self.cursor.copied().bytes(datasource, self._fObjlen)
finally:
datasource.dismiss()
class _RecoveredTBasket(uproot3.rootio.ROOTObject):
@classmethod
def _readinto(cls, self, source, cursor, context, parent):
start = cursor.index
self._fNbytes, self._fVersion, self._fObjlen, self._fDatime, self._fKeylen, self._fCycle = cursor.fields(source, cls._format1)
# skip the class name, name, and title
cursor.index = start + self._fKeylen - cls._format2.size - 1
self._fVersion, self._fBufferSize, self._fNevBufSize, self._fNevBuf, self._fLast = cursor.fields(source, cls._format2)
# one-byte terminator
cursor.skip(1)
# then if you have offsets data, read them in
if self._fNevBufSize > 8:
byteoffsets = cursor.bytes(source, self._fNevBuf * 4 + 8)
cursor.skip(-4)
# there's a second TKey here, but it doesn't contain any new information (in fact, less)
cursor.skip(self._fKeylen)
size = self.border = self._fLast - self._fKeylen
# the data (not including offsets)
self.contents = cursor.bytes(source, size)
# put the offsets back in, in the way that we expect it
if self._fNevBufSize > 8:
self.contents = numpy.concatenate((self.contents, byteoffsets))
size += byteoffsets.nbytes
self._fObjlen = size
self._fNbytes = self._fObjlen + self._fKeylen
return self
_format1 = struct.Struct(">ihiIhh")
_format2 = struct.Struct(">Hiiii")
def basketdata(self):
return self.contents
@property
def numentries(self):
return self._fNevBuf
def _recover(self):
recoveredbaskets = [x for x in uproot3.rootio.TObjArray.read(self._source, self._fBaskets._cursor, self._context, self, asclass=TBranchMethods._RecoveredTBasket) if x is not None]
if self._numgoodbaskets == 0:
entryoffsets = [0]
else:
entryoffsets = self._fBasketEntry[:self._numgoodbaskets + 1].tolist()
for basket in recoveredbaskets:
entryoffsets.append(entryoffsets[-1] + basket.numentries)
if entryoffsets[-1] == self.numentries:
with self._recoverylock:
self._recoveredbaskets = recoveredbaskets
self._entryoffsets = entryoffsets
else:
if self.interpretation is None:
self._recoveredbaskets = []
else:
raise ValueError("entries in recovered baskets (offsets {0}) don't add up to total number of entries ({1})\n in file: {2}".format(entryoffsets, self.numentries, self._context.sourcepath))
def _tryrecover(self):
if self._recoveredbaskets is None:
self._recover()
def _basketkey(self, source, i, complete):
if 0 <= i < self._numgoodbaskets:
return self._BasketKey(source.parent(), Cursor(self._fBasketSeek[i]), self.compression, complete)
elif self._numgoodbaskets <= i < self.numbaskets:
return self._recoveredbaskets[i - self._numgoodbaskets]
else:
raise IndexError("index {0} out of range for branch with {1} baskets".format(i, self.numbaskets))
def _format(self, foldnames, indent="", strip=""):
name = self._fName.decode("ascii")
if foldnames and name.startswith(strip + "."):
name = name[len(strip) + 1:]
if len(name) > 26:
out = [indent + name, indent + "{0:26s} {1:26s} {2}".format("", "(no streamer)" if self._streamer is None else self._streamer.__class__.__name__, self.interpretation)]
else:
out = [indent + "{0:26s} {1:26s} {2}".format(name, "(no streamer)" if self._streamer is None else self._streamer.__class__.__name__, self.interpretation)]
for branch in self._fBranches:
out.extend(branch._format(foldnames, indent + " " if foldnames else indent, self._fName))
if len(self._fBranches) > 0 and out[-1] != "":
out.append("")
return out
def show(self, foldnames=False, stream=sys.stdout):
if stream is None:
return "\n".join(self._format(foldnames))
else:
for line in self._format(foldnames):
stream.write(line)
stream.write("\n")
def __len__(self):
return self.numentries
def __getitem__(self, name):
return self.get(name)
def __iter__(self):
# prevent Python's attempt to interpret __len__ and __getitem__ as iteration
raise TypeError("'TBranch' object is not iterable")
################################################################ for lazy arrays
class _LazyFiles(object):
def __init__(self, paths, treepath, branches, entrysteps, flatten, awkwardlib, basketcache, keycache, executor, persistvirtual, localsource, xrootdsource, httpsource, options):
self.paths = paths
self.treepath = treepath
self.branches = branches
self.entrysteps = entrysteps
self.flatten = flatten
self.awkwardlib = awkwardlib
self.basketcache = basketcache
self.keycache = keycache
self.executor = executor
self.persistvirtual = persistvirtual
self.localsource = localsource
self.xrootdsource = xrootdsource
self.httpsource = httpsource
self.options = options
self._init()
def _init(self):
self.trees = cachetools.LRUCache(5) # last 5 TTrees
if self.basketcache is None:
self.basketcache = uproot3.cache.ThreadSafeArrayCache(1024**2) # 1 MB
if self.keycache is None:
self.keycache = cachetools.LRUCache(10000) # last 10000 TKeys
def __getstate__(self):
return {"paths": self.paths,
"treepath": self.treepath,
"branches": self.branches,
"entrysteps": self.entrysteps,
"flatten": self.flatten,
"awkwardlib": self.awkwardlib,
"persistvirtual": self.persistvirtual,
"localsource": self.localsource,
"xrootdsource": self.xrootdsource,
"httpsource": self.httpsource,
"options": self.options}
def __setstate__(self, state):
self.paths = state["paths"]
self.treepath = state["treepath"]
self.branches = state["branches"]
self.entrysteps = state["entrysteps"]
self.flatten = state["flatten"]
self.awkwardlib = state["awkwardlib"]
self.basketcache = None
self.keycache = None
self.executor = None
self.persistvirtual = state["persistvirtual"]
self.localsource = state["localsource"]
self.xrootdsource = state["xrootdsource"]
self.httpsource = state["httpsource"]
self.options = state["options"]
self._init()
def __call__(self, pathi, branchname):
awkward0 = _normalize_awkwardlib(self.awkwardlib)
tree = self.trees.get(self.paths[pathi], None)
if tree is None:
tree = self.trees[self.paths[pathi]] = uproot3.rootio.open(self.paths[pathi])[self.treepath]
tree.interpretations = dict((b.name, x) for b, x in tree._normalize_branches(self.branches, awkward0))
return tree[branchname].lazyarray(interpretation=tree.interpretations[branchname], entrysteps=self.entrysteps, entrystart=None, entrystop=None, flatten=self.flatten, awkwardlib=awkward0, cache=None, basketcache=self.basketcache, keycache=self.keycache, executor=self.executor, persistvirtual=self.persistvirtual)
class _LazyTree(object):
def __init__(self, path, treepath, tree, interpretation, flatten, awkwardlib, basketcache, keycache, executor):
self.path = path
self.treepath = treepath
self.tree = tree
self.interpretation = interpretation
self.flatten = flatten
self.awkwardlib = awkwardlib
self.basketcache = basketcache
self.keycache = keycache
self.executor = executor
self._init()
def _init(self):
if self.tree is None:
self.tree = uproot3.rootio.open(self.path)[self.treepath]
if self.basketcache is None:
self.basketcache = uproot3.cache.ThreadSafeArrayCache(1024**2) # 1 MB
if self.keycache is None:
self.keycache = {} # unlimited
def __getstate__(self):
return {"path": self.path,
"treepath": self.treepath,
"interpretation": self.interpretation,
"flatten": self.flatten,
"awkwardlib": self.awkwardlib}
def __setstate__(self, state):
self.path = state["path"]
self.treepath = state["treepath"]
self.tree = None
self.interpretation = state["interpretation"]
self.flatten = state["flatten"]
self.awkwardlib = state["awkwardlib"]
self.basketcache = None
self.keycache = None
self.executor = None
self._init()
def __call__(self, branch, entrystart, entrystop):
return self.tree[branch].array(interpretation=self.interpretation[branch], entrystart=entrystart, entrystop=entrystop, flatten=self.flatten, awkwardlib=self.awkwardlib, cache=None, basketcache=self.basketcache, keycache=self.keycache, executor=self.executor)
class _LazyBranch(object):
def __init__(self, path, treepath, branchname, branch, interpretation, flatten, awkwardlib, basketcache, keycache, executor):
self.path = path
self.treepath = treepath
self.branchname = branchname
self.branch = branch
self.interpretation = interpretation
self.flatten = flatten
self.awkwardlib = awkwardlib
self.basketcache = basketcache
self.keycache = keycache
self.executor = executor
self._init()
def _init(self):
if self.branch is None:
self.branch = uproot3.rootio.open(self.path)[self.treepath][self.branchname]
if self.basketcache is None:
self.basketcache = uproot3.cache.ThreadSafeArrayCache(1024**2) # 1 MB
if self.keycache is None:
self.keycache = {} # unlimited
def __getstate__(self):
return {"path": self.path,
"treepath": self.treepath,
"branchname": self.branchname,
"interpretation": self.interpretation,
"flatten": self.flatten,
"awkwardlib": self.awkwardlib}
def __setstate__(self, state):
self.path = state["path"]
self.treepath = state["treepath"]
self.branchname = state["branchname"]
self.branch = None
self.interpretation = state["interpretation"]
self.flatten = state["flatten"]
self.awkwardlib = state["awkwardlib"]
self.basketcache = None
self.keycache = None
self.executor = None
self._init()
def __call__(self, entrystart, entrystop):
return self.branch.array(interpretation=self.interpretation, entrystart=entrystart, entrystop=entrystop, flatten=self.flatten, awkwardlib=self.awkwardlib, cache=None, basketcache=self.basketcache, keycache=self.keycache, executor=self.executor, blocking=True)
def lazyarray(path, treepath, branchname, interpretation=None, namedecode="utf-8", entrysteps=float("inf"), flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
if interpretation is None:
branches = branchname
else:
branches = {branchname: interpretation}
out = lazyarrays(path, treepath, branches=branches, namedecode=namedecode, entrysteps=entrysteps, flatten=flatten, profile=None, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, persistvirtual=persistvirtual, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
if len(out.columns) != 1:
raise ValueError("list of branch names or glob/regex matches more than one branch; use uproot3.lazyarrays (plural)")
return out[out.columns[0]]
def lazyarrays(path, treepath, branches=None, namedecode="utf-8", entrysteps=float("inf"), flatten=False, profile=None, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, persistvirtual=False, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
awkward0 = _normalize_awkwardlib(awkwardlib)
if isinstance(path, string_types):
paths = _filename_explode(path)
else:
paths = [y for x in path for y in _filename_explode(x)]
path2count = numentries(path, treepath, total=False, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, executor=executor, blocking=True)
lazyfiles = _LazyFiles(paths, treepath, branches, entrysteps, flatten, awkward0.__name__, basketcache, keycache, executor, persistvirtual, localsource, xrootdsource, httpsource, options)
brancheslist = None
for path in paths:
file = uproot3.rootio.open(path, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
try:
tree = file[treepath]
except KeyError:
continue
brancheslist = list(tree._normalize_branches(branches, awkward0))
break
if brancheslist is None:
raise ValueError("no matching paths contained a tree named {0}".format(repr(treepath)))
out = awkward0.Table()
for branch, interpretation in brancheslist:
inner = interpretation
while isinstance(inner, asjagged):
inner = inner.content
if isinstance(inner, asobj) and getattr(inner.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.cls._arraymethods, awkward0.VirtualArray)
elif isinstance(inner, asgenobj) and getattr(inner.generator.cls, "_arraymethods", None) is not None:
VirtualArray = awkward0.Methods.mixin(inner.generator.cls._arraymethods, awkward0.VirtualArray)
else:
VirtualArray = awkward0.VirtualArray
chunks = []
counts = []
for pathi, path in enumerate(paths):
chunks.append(VirtualArray(lazyfiles, (pathi, branch.name), cache=cache, type=awkward0.type.ArrayType(path2count[path], interpretation.type), persistvirtual=persistvirtual))
counts.append(path2count[path])
name = branch.name.decode("ascii") if namedecode is None else branch.name.decode(namedecode)
out[name] = awkward0.ChunkedArray(chunks, counts)
if profile is not None:
out = uproot_methods.profiles.transformer(profile)(out)
return out
def daskarray(path, treepath, branchname, interpretation=None, namedecode="utf-8", entrysteps=float("inf"), flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
out = lazyarray(path, treepath, branchname, interpretation=interpretation, namedecode=namedecode, entrysteps=entrysteps, flatten=flatten, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, persistvirtual=False, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
import dask.array
if len(out.shape) == 1:
return dask.array.from_array(out, out.shape, fancy=True)
else:
raise NotImplementedError("TODO: len(shape) > 1")
def daskframe(path, treepath, branches=None, namedecode="utf-8", entrysteps=float("inf"), flatten=False, awkwardlib=None, cache=None, basketcache=None, keycache=None, executor=None, localsource=MemmapSource.defaults, xrootdsource=XRootDSource.defaults, httpsource=HTTPSource.defaults, **options):
import dask.array
import dask.dataframe
out = lazyarrays(path, treepath, branches=branches, namedecode=namedecode, entrysteps=entrysteps, flatten=flatten, profile=None, awkwardlib=awkwardlib, cache=cache, basketcache=basketcache, keycache=keycache, executor=executor, persistvirtual=False, localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, **options)
series = []
for n in out.columns:
x = out[n]
if len(x.shape) == 1:
array = dask.array.from_array(x, x.shape, fancy=True)
series.append(dask.dataframe.from_dask_array(array, columns=n))
else:
raise NotImplementedError("TODO: len(shape) > 1")
return dask.dataframe.concat(series, axis=1)
################################################################ for quickly getting numentries
def numentries(path, treepath, total=True, localsource=MemmapSource.defaults, xrootdsource={"timeout": None, "chunkbytes": 32*1024, "limitbytes": 1024**2, "parallel": False}, httpsource={"chunkbytes": 32*1024, "limitbytes": 1024**2, "parallel": False}, executor=None, blocking=True, **options):
if isinstance(path, string_types):
paths = _filename_explode(path)
else:
paths = [y for x in path for y in _filename_explode(x)]
return _numentries(paths, treepath, total, localsource, xrootdsource, httpsource, executor, blocking, [None] * len(paths), options)
def _numentries(paths, treepath, total, localsource, xrootdsource, httpsource, executor, blocking, uuids, options):
class _TTreeForNumEntries(uproot3.rootio.ROOTStreamedObject):
@classmethod
def _readinto(cls, self, source, cursor, context, parent):
start, cnt, classversion = uproot3.rootio._startcheck(source, cursor)
tnamed = uproot3.rootio.Undefined.read(source, cursor, context, parent)
tattline = uproot3.rootio.Undefined.read(source, cursor, context, parent)
tattfill = uproot3.rootio.Undefined.read(source, cursor, context, parent)
tattmarker = uproot3.rootio.Undefined.read(source, cursor, context, parent)
self._fEntries, = cursor.fields(source, _TTreeForNumEntries._format1)
return self
_format1 = struct.Struct('>q')
out = [None] * len(paths)
def fill(i):
try:
file = uproot3.rootio.open(paths[i], localsource=localsource, xrootdsource=xrootdsource, httpsource=httpsource, read_streamers=False, **options)
except Exception:
return sys.exc_info()
else:
try:
source = file._context.source
file._context.classes["TTree"] = _TTreeForNumEntries
try:
out[i] = file[treepath]._fEntries
except KeyError:
out[i] = 0
uuids[i] = file._context.uuid
except Exception:
return sys.exc_info()
else:
return None
finally:
source.close()
if executor is None:
for i in range(len(paths)):
_delayedraise(fill(i))
excinfos = ()
else:
excinfos = executor.map(fill, range(len(paths)))
def wait():
for excinfo in excinfos:
_delayedraise(excinfo)
if total:
return sum(out)
else:
return OrderedDict(zip(paths, out))
if blocking:
return wait()
else:
return wait
| bsd-3-clause |
RayMick/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
sameera2004/xray-vision | xray_vision/backend/__init__.py | 6 | 11809 | # ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .. import QtCore, QtGui
from collections import defaultdict
from six.moves import zip
import numpy as np
import logging
logger = logging.getLogger(__name__)
class AbstractDataView(object):
"""
AbstractDataView class docstring. Defaults to a single matplotlib axes
"""
default_dict_type = defaultdict
default_list_type = list
def __init__(self, data_list, key_list, *args, **kwargs):
"""
Parameters
----------
data_list : list
The data stored as a list
key_list : list
The order of keys to plot
"""
super(AbstractDataView, self).__init__(*args, **kwargs)
if len(data_list) != len(key_list):
raise ValueError(("lengths of data ({0}) and keys ({1}) must be the"
" same").format(len(data_list), len(key_list)))
if data_list is None:
raise ValueError(("data_list cannot have a value of None. It must "
"be, at minimum, an empty list"))
if key_list is None:
raise ValueError(("key_list cannot have a value of None. It must "
"be, at minimum, an empty list"))
# init the data dictionary
data_dict = self.default_dict_type()
if len(data_list) > 0:
# but only give it values if the data_list has any entries
for (k, v) in zip(key_list, data_list):
data_dict[k] = v
# stash the dict and keys
self._data_dict = data_dict
self._key_list = key_list
def replot(self):
"""
Do nothing in the abstract base class. Needs to be implemented
in the concrete classes
"""
raise NotImplementedError("Must override the replot() method in "
"the concrete base class")
def clear_data(self):
"""
Clear all data
"""
self._data_dict.clear()
self._key_list[:] = []
def remove_data(self, lbl_list):
"""
Remove the key:value pair from the dictionary as specified by the
labels in lbl_list
Parameters
----------
lbl_list : list
String
name(s) of dataset to remove
"""
for lbl in lbl_list:
try:
del self._data_dict[lbl]
self._key_list.remove(lbl)
except KeyError:
# do nothing
pass
class AbstractDataView1D(AbstractDataView):
"""
AbstractDataView1D class docstring.
"""
# no init because AbstractDataView1D contains no new attributes
def add_data(self, lbl_list, x_list, y_list, position=None):
"""
add data with the name 'lbl'. Will overwrite data if
'lbl' already exists in the data dictionary
Parameters
----------
lbl : String
Name of the data set
x : np.ndarray
single vector of x-coordinates
y : np.ndarray
single vector of y-coordinates
position: int
The position in the key list to begin inserting the data.
Default (None) behavior is to append to the end of the list
"""
# loop over the data passed in
if position is None:
position = len(self._key_list)
for counter, (lbl, x, y) in enumerate(zip(lbl_list, x_list, y_list)):
self._data_dict[lbl] = (x, y)
self._key_list.insert(position+counter, lbl)
def append_data(self, lbl_list, x_list, y_list):
"""
Append (x, y) coordinates to a dataset. If there is no dataset
called 'lbl', add the (x_data, y_data) tuple to a new entry
specified by 'lbl'
Parameters
----------
lbl : list
str
name of data set to append
x : list
np.ndarray
single vector of x-coordinates to add.
x_data must be the same length as y_data
y : list
np.ndarray
single vector of y-coordinates to add.
y_data must be the same length as x_data
"""
lbl_to_add = []
x_to_add = []
y_to_add = []
for (lbl, x, y) in zip(lbl_list, x_list, y_list):
lbl = str(lbl)
if lbl in self._data_dict:
# get the current vectors at 'lbl'
(prev_x, prev_y) = self._data_dict[lbl]
# set the concatenated data to 'lbl'
self._data_dict[lbl] = (np.concatenate((prev_x, x)),
np.concatenate((prev_y, y)))
else:
# key doesn't exist, append the data to lists
lbl_to_add.append(lbl)
x_to_add.append(x)
y_to_add.append(y)
if len(lbl_to_add) > 0:
self.add_data(lbl_list=lbl_to_add, x_list=x_to_add, y_list=y_to_add)
class AbstractDataView2D(AbstractDataView):
"""
AbstractDataView2D class docstring
"""
def __init__(self, data_list, key_list, *args, **kwargs):
"""
Parameters
----------
data_dict : Dict
k:v pairs of data
key_list : List
ordered key list which defines the order that images appear in the
stack
corners_dict : Dict
k:v pairs of the location of the corners of each image
(x0, y0, x1, y1)
"""
super(AbstractDataView2D, self).__init__(data_list=data_list,
key_list=key_list, *args,
**kwargs)
def add_data(self, lbl_list, xy_list, corners_list=None, position=None):
"""
add data with the name 'lbl'. Will overwrite data if
'lbl' already exists in the data dictionary
Parameters
----------
lbl : String
Name of the data set
x : np.ndarray
single vector of x-coordinates
y : np.ndarray
single vector of y-coordinates
position: int
The position in the key list to begin inserting the data.
Default (None) behavior is to append to the end of the list
"""
# check for default corners_list behavior
if corners_list is None:
corners_list = self.default_list_type()
for xy in xy_list:
corners_list.append(self.find_corners(xy))
# declare a local loop index
counter = 0
# loop over the data passed in
for (lbl, xy, corners) in zip(lbl_list, xy_list, corners_list):
# stash the data
self._data_dict[lbl] = xy
# stash the corners
self._corners_dict[lbl] = corners
# insert the key into the desired position in the keys list
if position is None:
self._key_list.append(lbl)
else:
self._key_list.insert(i=position+counter, x=lbl)
counter += 1
def append_data(self, lbl_list, xy_list, axis=[], append_to_end=[]):
"""
Append (x, y) coordinates to a dataset. If there is no dataset
called 'lbl', add the (x_data, y_data) tuple to a new entry
specified by 'lbl'
Parameters
----------
lbl : list
str
name of data set to append
xy : list
np.ndarray
List of 2D arrays
axis : list
int
axis == 0 is appending in the horizontal direction
axis == 1 is appending in the vertical direction
append_to_end : list
bool
if false, prepend to the dataset
"""
for (lbl, xy, ax, end) in zip(lbl_list, xy_list, axis, append_to_end):
try:
# set the concatenated data to 'lbl'
if end:
self._data_dict[lbl] = np.r_[str(ax),
self._data_dict[lbl],
xy]
# TODO: Need to update the corners_list also...
else:
self._data_dict[lbl] = np.r_[str(ax),
xy,
self._data_dict[lbl]]
# TODO: Need to update the corners_list also...
except KeyError:
# key doesn't exist, add data to a new entry called 'lbl'
self.add_data(lbl, xy)
def add_datum(self, lbl_list, x_list, y_list, val_list):
"""
Add a single data point to an array
Parameters
----------
lbl : list
str
name of the dataset to add one datum to
x : list
int
index of x coordinate
y : list
int
index of y coordinate
val : list
float
value of datum at the coordinates specified by (x,y)
"""
raise NotImplementedError("Not yet implemented") | bsd-3-clause |
cython-testbed/pandas | pandas/tests/indexes/timedeltas/test_tools.py | 5 | 6579 | import pytest
from datetime import time, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
from pandas import Series, to_timedelta, isna, TimedeltaIndex
from pandas._libs.tslib import iNaT
class TestTimedeltas(object):
def test_to_timedelta(self):
def conv(v):
return v.astype('m8[ns]')
d1 = np.timedelta64(1, 'D')
assert (to_timedelta('1 days 06:05:01.00003', box=False) ==
conv(d1 + np.timedelta64(6 * 3600 + 5 * 60 + 1, 's') +
np.timedelta64(30, 'us')))
assert (to_timedelta('15.5us', box=False) ==
conv(np.timedelta64(15500, 'ns')))
# empty string
result = to_timedelta('', box=False)
assert result.astype('int64') == iNaT
result = to_timedelta(['', ''])
assert isna(result).all()
# pass thru
result = to_timedelta(np.array([np.timedelta64(1, 's')]))
expected = pd.Index(np.array([np.timedelta64(1, 's')]))
tm.assert_index_equal(result, expected)
# ints
result = np.timedelta64(0, 'ns')
expected = to_timedelta(0, box=False)
assert result == expected
# Series
expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])
result = to_timedelta(Series(['1d', '1days 00:00:01']))
tm.assert_series_equal(result, expected)
# with units
result = TimedeltaIndex([np.timedelta64(0, 'ns'), np.timedelta64(
10, 's').astype('m8[ns]')])
expected = to_timedelta([0, 10], unit='s')
tm.assert_index_equal(result, expected)
# single element conversion
v = timedelta(seconds=1)
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
v = np.timedelta64(timedelta(seconds=1))
result = to_timedelta(v, box=False)
expected = np.timedelta64(timedelta(seconds=1))
assert result == expected
# arrays of various dtypes
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='s')
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='m')
expected = TimedeltaIndex([np.timedelta64(1, 'm')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='int64')
result = to_timedelta(arr, unit='h')
expected = TimedeltaIndex([np.timedelta64(1, 'h')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[s]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 's')] * 5)
tm.assert_index_equal(result, expected)
arr = np.array([1] * 5, dtype='timedelta64[D]')
result = to_timedelta(arr)
expected = TimedeltaIndex([np.timedelta64(1, 'D')] * 5)
tm.assert_index_equal(result, expected)
# Test with lists as input when box=false
expected = np.array(np.arange(3) * 1000000000, dtype='timedelta64[ns]')
result = to_timedelta(range(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta(np.arange(3), unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
result = to_timedelta([0, 1, 2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
# Tests with fractional seconds as input:
expected = np.array(
[0, 500000000, 800000000, 1200000000], dtype='timedelta64[ns]')
result = to_timedelta([0., 0.5, 0.8, 1.2], unit='s', box=False)
tm.assert_numpy_array_equal(expected, result)
def test_to_timedelta_invalid(self):
# bad value for errors parameter
msg = "errors must be one of"
tm.assert_raises_regex(ValueError, msg, to_timedelta,
['foo'], errors='never')
# these will error
pytest.raises(ValueError, lambda: to_timedelta([1, 2], unit='foo'))
pytest.raises(ValueError, lambda: to_timedelta(1, unit='foo'))
# time not supported ATM
pytest.raises(ValueError, lambda: to_timedelta(time(second=1)))
assert to_timedelta(time(second=1), errors='coerce') is pd.NaT
pytest.raises(ValueError, lambda: to_timedelta(['foo', 'bar']))
tm.assert_index_equal(TimedeltaIndex([pd.NaT, pd.NaT]),
to_timedelta(['foo', 'bar'], errors='coerce'))
tm.assert_index_equal(TimedeltaIndex(['1 day', pd.NaT, '1 min']),
to_timedelta(['1 day', 'bar', '1 min'],
errors='coerce'))
# gh-13613: these should not error because errors='ignore'
invalid_data = 'apple'
assert invalid_data == to_timedelta(invalid_data, errors='ignore')
invalid_data = ['apple', '1 days']
tm.assert_numpy_array_equal(
np.array(invalid_data, dtype=object),
to_timedelta(invalid_data, errors='ignore'))
invalid_data = pd.Index(['apple', '1 days'])
tm.assert_index_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
invalid_data = Series(['apple', '1 days'])
tm.assert_series_equal(invalid_data, to_timedelta(
invalid_data, errors='ignore'))
def test_to_timedelta_via_apply(self):
# GH 5458
expected = Series([np.timedelta64(1, 's')])
result = Series(['00:00:01']).apply(to_timedelta)
tm.assert_series_equal(result, expected)
result = Series([to_timedelta('00:00:01')])
tm.assert_series_equal(result, expected)
def test_to_timedelta_on_missing_values(self):
# GH5438
timedelta_NaT = np.timedelta64('NaT')
actual = pd.to_timedelta(Series(['00:00:01', np.nan]))
expected = Series([np.timedelta64(1000000000, 'ns'),
timedelta_NaT], dtype='<m8[ns]')
assert_series_equal(actual, expected)
actual = pd.to_timedelta(Series(['00:00:01', pd.NaT]))
assert_series_equal(actual, expected)
actual = pd.to_timedelta(np.nan)
assert actual.value == timedelta_NaT.astype('int64')
actual = pd.to_timedelta(pd.NaT)
assert actual.value == timedelta_NaT.astype('int64')
| bsd-3-clause |
jdmcbr/geopandas | benchmarks/io.py | 2 | 3194 | import os
import shutil
import tempfile
import warnings
import numpy as np
from shapely.geometry import Point
from geopandas import GeoDataFrame, GeoSeries, read_file, read_parquet, read_feather
# TEMP: hide warning from to_parquet
warnings.filterwarnings("ignore", message=".*initial implementation of Parquet.*")
format_dict = {
"ESRI Shapefile": (
".shp",
lambda gdf, filename: gdf.to_file(filename, driver="ESRI Shapefile"),
lambda filename: read_file(filename, driver="ESRI Shapefile"),
),
"GeoJSON": (
".json",
lambda gdf, filename: gdf.to_file(filename, driver="GeoJSON"),
lambda filename: read_file(filename, driver="GeoJSON"),
),
"GPKG": (
".gpkg",
lambda gdf, filename: gdf.to_file(filename, driver="GeoJSON"),
lambda filename: read_file(filename, driver="GeoJSON"),
),
"Parquet": (
".parquet",
lambda gdf, filename: gdf.to_parquet(filename),
lambda filename: read_parquet(filename),
),
"Feather": (
".feather",
lambda gdf, filename: gdf.to_feather(filename),
lambda filename: read_feather(filename),
),
}
class Bench:
params = ["ESRI Shapefile", "GeoJSON", "GPKG", "Parquet", "Feather"]
param_names = ["file_format"]
def setup(self, file_format):
self.ext, self.writer, self.reader = format_dict[file_format]
num_points = 20000
xs = np.random.rand(num_points)
ys = np.random.rand(num_points)
self.points = GeoSeries([Point(x, y) for (x, y) in zip(xs, ys)])
self.df = GeoDataFrame(
{
"geometry": self.points,
"x": xs,
"y": ys,
"s": np.zeros(num_points, dtype="object"),
}
)
self.tmpdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tmpdir, "frame" + self.ext)
self.writer(self.df, self.filename)
def teardown(self, file_format):
shutil.rmtree(self.tmpdir)
class BenchFrame(Bench):
params = ["ESRI Shapefile", "GeoJSON", "GPKG", "Parquet", "Feather"]
param_names = ["file_format"]
def time_write(self, file_format):
with tempfile.TemporaryDirectory() as tmpdir:
out_filename = os.path.join(tmpdir, "frame" + self.ext)
self.writer(self.df, out_filename)
def time_read(self, file_format):
self.reader(self.filename)
class BenchSeries(Bench):
params = ["ESRI Shapefile", "GeoJSON", "GPKG"]
param_names = ["file_format"]
def setup(self, file_format):
super().setup(file_format)
self.filename_series = os.path.join(self.tmpdir, "series" + self.ext)
self.writer(self.points, self.filename_series)
def time_write_series(self, file_format):
with tempfile.TemporaryDirectory() as tmpdir:
out_filename = os.path.join(tmpdir, "series" + self.ext)
self.writer(self.points, out_filename)
def time_read_series(self, file_format):
GeoSeries.from_file(self.filename_series)
def time_read_series_from_frame(self, file_format):
GeoSeries.from_file(self.filename)
| bsd-3-clause |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.