metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JohnGBaker/tess-short-binaries",
"score": 3
} |
#### File: src/mcmc/decimate.py
```python
import numpy as np
import argparse
import matplotlib
import matplotlib.pyplot as plt
#The idea here is that we successively pass over the data, decimating progressive factors of two
#In each pass, we perform LLSF over surrounding N points, then compare likelihood of that fit
#both using the full data and using a factor of two decimation for the target points.
#If the difference is smaller than some tolerance (weighted by the original data rms err)
#then we keep the decimated version
# data are in a np array of the form [t,x,sigma^2,tmin,tmax]
# line info is in the form [t0, x0, slope]
def chi2(data,line):
res=data[:,1]-line[1]-(data[:,0]-line[0])*line[2]
return np.sum(res*res/data[:,2])
def DoLLSF(data,tref=None):
sig2=1/np.sum(data[:,2]**(-1))
t0=sig2*np.sum(data[:,0]/data[:,2])
x0=sig2*np.sum(data[:,1]/data[:,2])
t2sum=np.sum((data[:,0]-t0)**2/data[:,2])
xtsum=np.sum((data[:,0]-t0)*(data[:,1]-x0)/data[:,2])
slope=xtsum/t2sum;
#print("\n1/sig2=",1/sig2,"t2sum=",t2sum,"xtsum=",xtsum)
#print("t0,x0,slope",t0,x0,slope)
#print(" data=",data)
if(tref is None):tref=t0
return np.array([tref,x0+(tref-t0)*slope,slope])
def subData(data,line,dchitol):
#We will replace the data with a single point, requiring that
# 1. llsf fit for this data + other data is unchanged
# -require slope and x0 variations of delta chi2 vanish
# 2. the derivative of chi^2 wrt llsf intercept at mean time is preserved
#
# deltachi = sum[ (xi -x0 -(ti-t0)*s)^2 / sig2i ] - (xnew -x0 -(tnew-t0)*s)^2 / sig2new
#
# d^2deltachi/dx0^2 = 0 -> 1/sig2new = sum(1/sig2i)
# and
# d deltachi/dx0 = 0 -> xnew -x0 -s*(tnew-t0) = sig2new * sum((xi-x0-s*(ti-t0))/sig2i)
# = xd-x0
# where xd=sig2new*sum(xi/sig2i), and we write the line setting t0=t0d=sig2new*sum(ti/sig2i)
# and
# d deltachi/ds = 0 = -sum((ti-t0)*(xi-x0-s*(ti-t0))/sig2i) + (tnew-t0)*(xnew-x0-s*(tnew-t0))/sig2new
# = -sum((ti-t0)*ri/sig2i) + (tnew-t0)*(xd-x0)/sig2new
# where ri = xi-x0-s*(ti-t0)
#
# For the last equation, if xd!=x0, we can set tnew to solve, but we constrain tnew to be within the
# time limits of the data.
# We also constrain the size of the resulting deltachi to be below some limit, after solving as above
global nsub, nsubtfail,nsubchi2fail
nsub+=1
sig2new=1/np.sum(data[:,2]**(-1))
t0d=sig2new*np.sum(data[:,0]/data[:,2])
xd=sig2new*np.sum(data[:,1]/data[:,2])
slope=line[2]
x0=(t0d-line[0])*slope+line[1]
#print("line0=",t0d,x0,slope)
trel=data[:,0]-t0d;
res=(data[:,1]-x0-trel*slope)
#compute new t point to ensure that slope matches line
trsum=np.sum(trel*res/data[:,2])
#xsum=np.sum((data[:,1]-x0)/data[:,2])
xsum=(xd-x0)/sig2new
if(xsum==0):
if(trsum==0):toff=0
else: return data
else: toff=trsum/xsum
dataTmax=data[-1,4]
dataTmin=data[0,3]
if(dataTmax-t0d <= toff ):
#print("fail tmax")
nsubtfail+=1
return data
if(dataTmin-t0d >= toff ):
#print("fail tmin")
nsubtfail+=1
return data
tnew=t0d+toff
#compute new xval
xnew=xd+slope*(tnew-t0d)
#print("xd,tnew,xnew",xd,tnew,xnew)
dchi=(np.sum(res*res/data[:,2])-(xd-x0)**2/sig2new)
if(dchi>dchitol):
#print("fail dchi=",dchi,">",dchitol)
nsubchi2fail+=1
return data
return np.array([[tnew,xnew,sig2new,dataTmin,dataTmax]])
def reduceDataChunk(segment,target,tol):
line=DoLLSF(segment)
n=len(segment)
if( n - len(target) < 2):
#in this case there is no real solution to the formal problem I pose
#if there is just 1 remaining point, then a solution could be found, but it
#will be set at the location of the remaining point and will not satisfy the
#time-range condition
return target
redchi2=chi2(segment,line)/(n-2)
global nchi2,nchi2fail
nchi2+=1
if(redchi2>1+tol):
#print("fail redchi2-1=",redchi2-1)
nchi2fail+=1
return target
return subData(target,line,tol*n)
def reduceDataPass(data,chunksize,tol,segwid=3):
ndata=len(data)
nchunk=int(ndata/chunksize)+1 #nchunk gives enough to cover the dataset+1
segsize=int(segwid*chunksize)
noff=int((nchunk*chunksize-ndata)/2) #half the amount of overhang beyond the data
#noff=int((nchunk*chunksize-ndata)*np.random.rand())
nfirst=chunksize #This gives the location of the first (leftmost) chunk boundary, i.e. the index of the first datapoint in the second chunk.
if(noff>0):nfirst-=noff
for i in range(nchunk):
#print("\n****\ni=",i)
#set the range of the target chunk constraining within bounds
itargleft=nfirst+(i-1)*chunksize
if(itargleft<0):itargleft=0
itargright=nfirst+i*chunksize
if(itargright>ndata):itargright=ndata
target=data[itargleft:itargright]
#time grouping test:
dtmax=0;dtmin=target[-1,0]-target[0,0]
for k in range(len(target)-1):
dt=target[k+1,0]-target[k,0]
if(dt>dtmax):dtmax=dt
#for the time grouping test dtmin we expand to the nearest neighbor points (if any)
for k in range(max(0,itargleft-1),min(ndata-1,itargright+1)):
dt=data[k+1,0]-data[k,0]
if(dt<dtmin):dtmin=dt
if(len(target)<2 or dtmax/dtmin > 30):
#target too short or times not grouped
replacement=target.copy()
else: #passed test so continue
#print(" target=",target)
#set the range of the surrounding segment
isegleft=int((itargleft+itargright-segsize)/2)
if(isegleft<0):isegleft=0
isegright=isegleft+segsize
if(isegright>ndata):isegright=ndata
#print(" ",isegleft,"--",itargleft,"++",itargright,"--",isegright)
segment=data[isegleft:isegright]
#print(" segment=",segment)
replacement=reduceDataChunk(segment,target,tol).copy()
#diagnostics:
#newseg=np.concatenate((data[isegleft:itargleft],replacement,data[itargright:isegright]),axis=0)
#llsf=DoLLSF(segment,tref=0)[1:3]
#nllsf=DoLLSF(newseg,tref=0)[1:3]
#print(" replacement=",replacement)
if(i==0):newdata=replacement
else: newdata=np.append(newdata,replacement,axis=0)
#print(" newdata=",newdata)
#print(" LLSF: ",llsf,"->",nllsf," delta=",llsf-nllsf)
return newdata
def zeroCounters():
global nchi2,nchi2fail,nsub,nsubtfail,nsubchi2fail
nchi2=0
nchi2fail=0
nsub=0
nsubtfail=0
nsubchi2fail=0
def decimate(origdata, lev, maxpass=1000, ntemper=20, csscale=1000, npretemper=0,verbose=False):
#first configure the data. Internally, we work with 5-column data:
# t, flux, err**2, tmin, tmax
#We also support 3 column data: t,flux,err
data=origdata.copy()
threecols=data.shape[1]==3
if threecols:
data=np.array([[d[0],d[1],d[2]**2,d[0],d[0]] for d in data])
#first we tune some parameters based on 'lev' option
#Note that I find similar levels of concentration [and net num of samples] on the peak region for segw=csmin*nwid~75 with csmin varying from 4->10
#These tests are done with
#segw=75,tol=0.25 segw=150,tol=0.25 segw=150,tol=0.5 segw=75,tol=0.5
#2: n=523 nev=321 F=.61 764 / 1182 = .64 533 / 799 = .67 338 / 476 = .71
#3: n=736 nev=472 F=.64 704 / 1158 = .61 523 / 823 = .64 330 / 487 = .68
#4: n=783 nev=421 F=.54 747 / 1196 = .62 536 / 909 = .59 368 / 659 = .56
#5: n=900 nev=494 F=.55 784 / 1389 = .56 617 /1174 = .53 386 / 744 = .52
#6: n=796 nev=425 F=.53 728 / 1306 = .62 670 /1140 = .59 437 / 782 = .56
#7: n=877 nev=485 F=.55 812 / 1409 = .58
#8: n=917 nev=512 F=.56 797 / 1324 = .60 684 /1253 = .55 384 / 769 = .50
#9: n=908 nev=504 F=.55
#10:n=908 nev=493 F=.54 787 / 1283 = .61 695 /1167 = .60
#11:n=1022 nev=476 F=.46
#12:n=926 nev=398 F=.43 753 / 1317 = .57 666 /1137 = .59
#14:n=1109 nev=513 F=.46 819 / 1433 = .57 664 /1188 = .56
segw=150;tol=0.2;csmin=10
#here we set up some scalings for these params blending between the following guides
#lev=0:segw=1000,tol=0.05,csmin=25 #few % red of lens reg. but next 10x reduced overall
#lev=5:segw=150,tol=0.2,csmin=10 #reduction by factor of ~30 overall
#lev=10:segw=60,tol=0.5,csmin=2 #reduction by factor ~100 overall
#lev=15:segw=25,tol=1.0,csmin=2 #reduction by factor >200 overall
if(lev<=5):
x=lev/5.0
segw=int(np.exp(np.log(1000)*(1-x)+np.log(150)*x))
tol=np.exp(np.log(0.05)*(1-x)+np.log(0.2)*x)
csmin=int(25*(1-x)+10*x)
#csmin=10
elif(lev<=10):
x=(lev-5)/5.0
segw=int(np.exp(np.log(150)*(1-x)+np.log(60)*x))
tol=np.exp(np.log(0.2)*(1-x)+np.log(0.5)*x)
csmin=int(10*(1-x)+2.0*x)
else:
x=(lev-10)/5.0
segw=int(np.exp(np.log(60)*(1-x)+np.log(25)*x))
tol=np.exp(np.log(0.5)*(1-x)+np.log(1.0)*x)
csmin=2
if(verbose):print("segw,csmin,tol:",segw,csmin,tol)
nwid=int(segw/csmin)
##Now for the actual decimation algorithm
lastcs=0
doneAtSize=False
for i in range(maxpass):
zeroCounters()
#with pretempering we begin with a pass of small chunk smoothing to make it less likely to cut small features.
if(i<npretemper):
chunksize=int(csmin*np.exp(np.log((1+csscale/csmin))*(i/(1.0+npretemper))))
ieff=0
else:
ieff=i-npretemper
chunksize=int(csmin+csscale/(ieff/ntemper*(1+ieff/ntemper)+1))
if(chunksize==lastcs and doneAtSize):
#already tried this case
continue
#print(i, "ieff=",ieff)
#print("Trying chunksize=",chunksize)
newdata = reduceDataPass(data,chunksize,tol,nwid)
#print("data size ",len(data),"->",len(newdata))
#print("fail rate: chi2:",nchi2fail/(nchi2+2e-18),"sub t:",nsubtfail/(nsub+2e-18),"sub chi2:",nsubchi2fail/(nsub+2e-18))
#datallsf=DoLLSF(origdata,tref=0)
#newdatallsf=DoLLSF(newdata,tref=0)
#print("llsf:",datallsf[1:3],"->",newdatallsf[1:3]," delta=",(newdatallsf-datallsf)[1:3])
#termination condition
if(len(newdata)==len(data) and lastcs==chunksize and i>npretemper):
if(chunksize<=csmin):
break
else: doneAtSize=True
else:doneAtSize=False
lastcs=chunksize
data=newdata
if threecols:
data=np.array([[d[0],d[1],np.sqrt(d[2])] for d in data])
return data
def main():
parser = argparse.ArgumentParser(description='Attempt to decimate data losing minimal information.')
parser.add_argument('fname', metavar='chain_file', type=str, help='Input file path')
parser.add_argument('-lev', default="5",help='Level of aggressiveness in data reduction')
parser.add_argument('-anscol', type=int, default="-1",help='Level of aggressiveness in data reduction')
parser.add_argument('-plot', action="store_true", help='Plot results instead of saving to file.')
parser.add_argument('-evalonly', action="store_true", help='Perform evaluation from precomputed results.')
parser.add_argument('-esterr', action="store_true", help='Roughly estimate error bar level from the first few points.')
parser.add_argument('-q', action="store_true", help='Run in quiet mode with minimal screen output.')
args = parser.parse_args()
lev=int(args.lev)
tag="lev"+str(lev)
data=np.loadtxt(args.fname) #Assume reading in t,x,sigma
#data=np.array([[t,np.random.normal(),1] for t in range(300)])#fake data
#data=np.array([[t,0.1*(t%2)+t,1] for t in range(10)])#fake data
#data=np.array([[d[0],d[1],d[2]**2,d[0],d[0]] for d in data])
#err=np.std([d[2] for d in data[:1600]])
#print("err=",err)
#err=np.std([d[2] for d in data[:400]])
#print("err=",err)
#err=np.std([d[2] for d in data[:100]])
#print("err=",err)
#print("err=",err)
tcol=0
dcol=1
if(args.anscol>=0):
ans=np.array([[d[0],d[args.anscol]] for d in data])
if(args.anscol<=tcol):tcol+=1
if(args.anscol<=dcol):dcol+=1
#print("ans:",ans.shape)
if(args.esterr):
err=np.std([d[2] for d in data[:25]])
if(not args.q):print("Using err=",err)
data=np.array([[d[tcol],d[dcol],err**2,d[tcol],d[tcol]] for d in data])
else:
data=np.array([[d[tcol],d[dcol],d[dcol+1]**2,d[tcol],d[tcol]] for d in data])
origdata=data.copy()
data=decimate(data,lev,verbose=not args.q)
if(not args.q):print("nsamples:",len(origdata),"->",len(newdata))
if(plot):
plt.errorbar(origdata[:,0],origdata[:,1],yerr=np.sqrt(origdata[:,2]),fmt="+")
plt.errorbar(newdata[:,0],newdata[:,1],yerr=np.sqrt(newdata[:,2]),fmt=".")
icut=int(len(newdata)*9/10)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*4/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*3/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)*2/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)/5)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
icut=int(len(newdata)/10)
plt.errorbar(newdata[:icut,0],newdata[:icut,1],yerr=np.sqrt(newdata[:icut,2]),fmt=".")
if(args.anscol>=0 and False):
plt.plot(ans[:,0],ans[:,1],"k-",linewidth=2)
newdata=np.array([[d[0],d[1],np.sqrt(d[2])] for d in newdata])
if(".txt" in args.fname):
outfile=args.fname.replace(".txt","_"+tag+".dat")
elif(".dat" in args.fname):
outfile=args.fname.replace(".dat","_"+tag+".dat")
elif(".out" in args.fname):
outfile=args.fname.replace(".out","_"+tag+".dat")
else:
outfile=args.fname+".dat"
if(not args.plot and args.anscol>=0):
#Given a noise free 'answer' we estimate errors from the decimation in two ways.
#Method 1: Estimate the reduced chi2 deviation of the true data from the decimated data
# In this case the true values are linearly interpolated to the decimated sample points
# and the variance comes from the decimated data estimate.
diff=newdata[:,1]-np.interp(newdata[:,0],ans[:,0],ans[:,1])
var=newdata[:,2]**2
#print(diff,var)
ncount=len(diff)
rchi2a=np.sum(diff*diff/var)/ncount
#Method 2: Estimate the reduced chi2 deviation of the decimated data from the true data
# In this case the decimated values are linearly interpolated to the original sample points
# and the variance comes from the original data err estimate.
diff=ans[:,1]-np.interp(ans[:,0],newdata[:,0],newdata[:,1])
var=origdata[:,2]
#print(diff,var)
ncount=len(diff)
rchi2b=np.sum(diff*diff/var)/ncount
if(not args.q):
print("err estimates")
print(rchi2a,rchi2b)
else:
print(args.fname,len(origdata),len(newdata),rchi2a,rchi2b)
if(args.plot):
plt.show()
elif(not args.evalonly):
if(not args.q):print("outfile=",outfile)
np.savetxt(outfile,newdata)
if __name__ == "__main__": main()
```
#### File: src/mcmc/HB_MCMC2.py
```python
from astropy.timeseries import LombScargle
import pandas as pd
import numpy as np
import json
#import matplotlib as mpl
#import matplotlib.pyplot as plt
import astroquery
from astroquery.mast import Catalogs,Observations
#import re
import sys
import os
#dirp='../../../TessSLB/src/LightCurveCode'
#if dirp not in sys.path: sys.path.append(dirp)
#dirp='../../../TessSLB/src/LightCurveCode/ptmcmc/cython'
#if dirp not in sys.path: sys.path.append(dirp)
dirp='../ptmcmc/python'
if dirp not in sys.path: sys.path.append(dirp)
dirp='ptmcmc/python'
if dirp not in sys.path: sys.path.append(dirp)
import ptmcmc
import ptmcmc_analysis
#import pyHB_Feb as pyHB for runs with code before Apr 7.
import pyHB
#import BrowseSLBs
import copy
#import warnings
import scipy
import glob
import pickle
import re
useM=False
def fold_lc(times,fluxes,errs,Pfold,downfac=1.0,decimate_level=None,rep=False):
#if decimate level is set, that overrides the native downsampling/binning
#method using the decimate.py approach with the specified level.
phases=(np.array(times)/Pfold)%1
isort=np.argsort(phases)
phases=phases[isort]
fluxes=np.array(fluxes)[isort]
errs=np.array(errs)[isort]
nold=len(times)
if decimate_level is not None and decimate_level>=0:
import decimate
if rep: print('nold,Pfold,decimate_lev:',nold,Pfold,decimate_level,times[0],"< t <",times[-1])
data=np.array([[phases[i],fluxes[i],errs[i]] for i in range(len(phases))])
newdata=decimate.decimate(data,lev=decimate_level,npretemper=0,verbose=True)
fphases=newdata[:,0]
ffluxes=newdata[:,1]
ferrs=newdata[:,2]
else:
if rep: print('nold,Pfold,downfac:',nold,Pfold,downfac,times[0],"< t <",times[-1])
groupwidth=(times[-1]-times[0])*(1+0.1/nold)/nold/Pfold #frac period bin size
groupwidth*=downfac
#print('mean errs=',errs.mean())
if rep: print('groupwidth=',groupwidth, 'mean group size=',groupwidth*nold)
fphases=[]
ffluxes=[]
ferrs=[]
i=0
j=0
while(i<nold):
#print(i,j)
xs=[]
ys=[]
es=[]
tr=phases[0]+groupwidth*j
while(i<nold and phases[i]<tr):
#print(i,times[i],tr)
xs.append(phases[i])
ys.append(fluxes[i])
es.append(errs[i])
i+=1
#print(tr,xs,ys,es)
if(len(xs)>0):
xs=np.array(xs)
ys=np.array(ys)
es=np.array(es)
ws=1/es**2
w=np.sum(ws)
x=np.sum(xs*ws)/w
y=np.sum(ys*ws)/w
v=np.sum((ys-y)**2*ws)/w
#print(ys)
#print(es)
#print(np.sqrt(1/w),np.sqrt(v/len(xs)),np.sqrt(np.sum((ys-y)**2)/len(xs)**2))
e=np.sqrt(1/w+v/len(xs))#Not 100% sure this is right
#if rep:print(xs,ys,es,'-->',x,y,e,1/w,v)
fphases.append(x)
ffluxes.append(y)
ferrs.append(e)
j+=1
fphases=np.array(fphases)
ffluxes=np.array(ffluxes)
ferrs=np.array(ferrs)
#print('mean err=',ferrs.mean())
return fphases,ffluxes,ferrs
def weighted_likelihood(ftimes,ffluxes,ferrs,x,sp,constraint_weight=10000,lctype=3,marginalized_noise_pars=None):
pars=sp.get_pars(x);
if sp.out_of_bounds(pars): #Hopefully doesn't happen?
lr=sp.live_ranges()
lmeans=np.mean(lr,axis=1)
lwidths=lr[:,1]-lr[:,0]
print('A par is our of range: dpar/hwidth:\n',(x-lmeans)/lwidths*2)
parwt=np.sum((pars-sp.get_pars(lmeans))**2)
#print(lmeans,parwt)
return -2e18*(1+parwt*0)
else:
mlike=pyHB.likelihood(ftimes,ffluxes,ferrs,pars,lctype=lctype)
if marginalized_noise_pars is not None:
alpha,beta0=marginalized_noise_pars
mlike=-alpha*np.log(1-mlike/beta0)
if constraint_weight > 0:
roche_frac=pyHB.test_roche_lobe(pars)
mlike-=constraint_weight*max([0,roche_frac-1.0])
#print(x,mlike)
#print(roche_frac,pars)
return mlike
def adjust_sectors(data, verbose=False):
sector_tag='sector'
sectors=data[sector_tag].unique()
if verbose: print('sectors',sectors)
if(len(sectors)>1):
medians=np.array([np.median(data.flux[data[sector_tag]==sec]) for sec in sectors])
offsets=medians-medians.mean()
#print('offsets',offsets)
for i in range(len(sectors)):
data.loc[data[sector_tag]==sectors[i],'flux']/=1+offsets[i]/medians.mean()
if verbose:
print('Adjusted sector levels:',offsets)
print('Adjusted sector factors:',1+offsets/medians.mean())
return data
#*******************
# Approx symmetries
#*******************
def invert_binary_symmetry_transf(s, randoms):
sp=s.getSpace()
iinc=sp.requireIndex("inc")
parvals=s.get_params()
parvals[iinc]=np.pi-parvals[iinc]
return ptmcmc.state(s,parvals);
def back_view_symmetry_transf(s, randoms):
#This is based on an observed approximate symmetry if we switch to
#back side view (omega0->omega0+pi) and also swap the combination
#logM+2*log_rad_resc between starts 1 and 2.
#We realize the latter by:
#logrr1 -> logrr2 + 0.5*(logM2-logM1)
#logrr2 -> sim
#i.e. preserving M1,M2
#
#The jacobian is trivial
sp=s.getSpace()
iom0=sp.requireIndex("omega0")
im1=sp.requireIndex("logM1")
im2=sp.requireIndex("logM2")
irr1=sp.requireIndex("log_rad1_resc")
irr2=sp.requireIndex("log_rad2_resc")
parvals=s.get_params()
parvals[iom0]+=np.pi
dm=(parvals[im1]-parvals[im2])/2
if parvals[iom0]>np.pi:parvals[iom0]-=2*np.pi
newrr1=parvals[irr2]-dm
newrr2=parvals[irr1]+dm
parvals[irr1]=newrr1
parvals[irr2]=newrr2
return ptmcmc.state(s,parvals);
#############################################################################
class HB_likelihood(ptmcmc.likelihood):
def __init__(self,id,data,period=None,Mstar=None,massTol=0,lensMax=0,eMax=None,maxperiod=14,fixperiod=None,downfac=1.0,constraint_weight=10000,outname="",rep=False,forceM1gtM2=False,rescalesdict={},rescalefac=1.0,viz=False,lctype=3,pins={},prior_dict={},min_per_bin=0,savePfig="",marginalize_noise=False,decimate_level=None,use_syms=False):
self.bestL=None
self.forceM1gtM2=forceM1gtM2
self.lctype=lctype
## Prepare data ##
if True:
data=adjust_sectors(data)
data[['time','flux','err']].to_csv(outname+'_adjusted.dat')
self.data=data
self.constraint_weight=constraint_weight
self.rep=rep
#dofold=(period is not None)
dofold=True
if dofold:
## Compute period and fold data ##
if period is not None and period>0: fperiod=period
else:
print("Computing folding period")
#For TESS data we set some reasonable limits on the Period
minimum_period=0.25
maximum_period=14
#Because our lightcurves are not nearly sinusoidal, it is
#essential to use more terms in the Fourier model underlying
#the Lomb-Scargle analysis. Otherwise a harmonic is likely to
#dominate. We also find the standard 5 samples per peak to be
#insufficient.
frequency, power = LombScargle(data['time'].values,data['flux'].values,nterms=15).autopower(minimum_frequency=1/maximum_period,maximum_frequency=1/minimum_period,samples_per_peak=50)
#print('LombScargle samples:',len(power))
#ilfcut=int(len(power)/20)+1
ilfcut=int(len(power))
if0=0
for i,f in enumerate(frequency):
if 1/f < maxperiod:
if0=i
break
imax=if0+np.argmax(power[if0:ilfcut])
pm,p0,pp=power[imax-1:imax+2]
eps=(pm-pp)/(pm+pp-2*p0)/2
f0=frequency[imax]
df=frequency[imax+1]-f0
fmax=f0+df*eps
if rep:
print('Lomb-Scargle raw f,P=',f0,1/f0)
print(' fit f,P=',fmax,1/fmax)
fperiod=1.0/fmax
if rep and viz:
import matplotlib.pyplot as plt
#print('Lomb-Scargle period',fperiod)
fig, ax1 = plt.subplots()
ax1.plot(frequency,power)
ax1.plot(frequency[if0:ilfcut],power[if0:ilfcut])
if True: #add inset
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition,mark_inset)
#ax2=plt.axes([0,0,1,1])
#inspos=InsetPosition(ax1,[0.4,0.4,0.5,0.5])
#ax2.set_axes_locator(inspos)
#mark_inset(ax1, ax2, loc1=2, loc2=4, fc="none", ec='0.5')
ax2=ax1.inset_axes([0.45,0.45,0.5,0.5])
ax2.plot(frequency,power)
ax2.plot(frequency[if0:ilfcut],power[if0:ilfcut])
ax2.set_xlim(fmax*0.9,fmax*1.1)
ax1.indicate_inset_zoom(ax2)
ax1.set_title(str(id))
ax1.set_xlabel("frequency (1/day)")
if len(savePfig)>0 and savePfig!="None":
plt.savefig(savePfig)
plt.close()
else:
plt.show()
#sys.exit
doubler=1#set to 2 to fold on double period
if(fixperiod is not None):
ffold=fixperiod*doubler
fperiod=fixperiod
else:ffold=fperiod*doubler
self.fperiod=fperiod
times=data['time'].values
ndataraw=len(times)
dts=np.diff(times)
dt=np.percentile(dts,90)
obs_time=sum(dts[dts<dt*1.5])
if rep: print('typical dt=',dt,'observation time',obs_time)
obs_cycles=obs_time/self.fperiod
cycles=(times[-1]-times[0])/self.fperiod
n_per=cycles*downfac
while n_per<min_per_bin and n_per<ndataraw/2:
downfac*=2
n_per=cycles*downfac
if rep:
print('Folding period',ffold)
print('Data has',cycles,'cycles')
print('Estimated n per downsampled bin:',n_per,'>',min_per_bin)
self.fphases,self.ffluxes,self.ferrs=fold_lc(data['time'].values,data['flux'].values,data['err'].values,ffold,downfac=downfac,rep=rep,decimate_level=decimate_level)
self.ftimes=self.fphases*ffold+int(data['time'].values[0]/ffold)*ffold
if self.rep:
array=np.vstack((self.ftimes,self.ffluxes,self.ferrs)).T
pd.DataFrame(data=array,columns=['ftime','fflux','ferr']).to_csv(outname+'_folded.dat')
print('Folded data length is',len(self.ftimes))
else: #no fold
self.ftimes=data['time'].values
self.ffluxes=data['flux'].values
self.fperiod=period
#wts=1/self.ferrs**2
wts=1+0*self.ffluxes
ffmean=np.sum(self.ffluxes*wts)/np.sum(wts)
#logFmean=np.log10(ffmean+50)
if False and self.rep:
print('ffmean',ffmean)
print('ftimes',self.ftimes)
print('ffluxes',self.ffluxes)
## Set up parameter space
## This piggybacks on the parameter space tool from pyHB
if lctype==2:
sp=copy.deepcopy(pyHB.sp2)
elif lctype==3:
sp=copy.deepcopy(pyHB.sp3)
#Set pinned params
for name in pins:
names=sp.live_names()
if name in names:
val=pins[name]
if val is None:
val=np.mean(sp.live_ranges()[names.index(name)])
if rep: print('Pinning param: '+name+'='+str(val))
sp.pin(name,val)
#Allow periods within a factor of just over 2% of specified
sp.reset_range('logP',[np.log10(self.fperiod/1.02),np.log10(self.fperiod*1.02)])
sp.pin('logP',np.log10(self.fperiod))
if 'logTanom' in sp.live_names(): sp.pin('logTanom',0)
#sp.reset_range('log(F+50)',[logFmean-dlogF,logFmean+dlogF])
#if not allow_blend: sp.pin('blend_frac',-3)
if(Mstar is not None):
if massTol==0:
sp.pin('logM1',np.log10(M1))
else:
sp.reset_range('logM1',[np.log10(Mstar/(1+massTol)),np.log10(Mstar*(1+massTol))])
if eMax is not None:
if eMax>0:sp.reset_range('e',[0,eMax])
else:sp.pin('e',0)
self.sp=sp
#T0 is not meaningful beyond modP
sp.reset_range('T0',[self.ftimes[0],self.ftimes[0]+self.fperiod])
#Expand the mass range for test
sp.reset_range('logM1',[-1.5,2.5])
#Prep noise_marginialization
self.marginalized_noise_pars=None
if marginalize_noise:
par='ln_noise_resc'
if par in sp.live_names():
#note: we assume zero mean on the log noise scaling
#otherwise we copy the steps from below to set the scale
#FIXME: It would be much better to move this after, but
# then we also need to movev the specification of
# the ptmcmc space to after that. Not hard...
scale = sp.live_ranges()[sp.live_names().index(par)][1]
if prior_dict is not None:
if 'ln_noise_resc' in prior_dict:
pardict=prior_dict[par]
if not isinstance(pardict,dict):raise ValueError('While processing user prior data for parameter "'+par+'". Expected value associated with this par to be a dict, but got '+str(pardict))
if 'scale' in pardict:
scale=pardict['scale']
sp.pin('ln_noise_resc',0)
sigma0 = scale / (len(data['time'].values)/len(self.ftimes)) #matches how prior is set below
alpha0 = 2 + 1/(np.exp(4*sigma0**2)-1)
beta0 = np.exp(6*sigma0**2) / ( np.exp(4*sigma0**2) - 1 )
alpha = alpha0 + len(self.ftimes)/2
self.marginalized_noise_pars=(alpha,beta0)
if rep: print('Noise level marginalization activated with sigma0=',sigma0,'-->alpha0,alpha,beta0=',alpha0,alpha,beta0)
else:
raise ValueError('No free noise parameter to marginialize.')
###Compute SNR
#pars0=[-10,1,10000,0,0,0,0,logFmean,0]
#logMlens, Mstar, Pdays, e, sini, omgf, T0overP,logFp50,Fblend=pars0
#SNR=np.sqrt(-llike0*2)
print('sp:live',sp.live_names())
print('mins/maxs',sp.live_ranges().T)
print('pinvals',sp.pinvals)
#Set up stateSpace
names=sp.live_names()
ranges=sp.live_ranges()
npar=len(names)
space=ptmcmc.stateSpace(dim=npar);
space.set_names(names);
wraps=['Omega','Omega0','T0']#T0 not meaningfor beyond T0%period
centers=[0]*npar
scales=[1]*npar
for i in range(npar):
name=names[i]
xmin=ranges[i,0]
xmax=ranges[i,1]
if name in wraps:
space.set_bound(name,ptmcmc.boundary('wrap','wrap',xmin,xmax))
#else:
# space.set_bound(name,ptmcmc.boundary('limit','limit',xmin,xmax)) #May not be needed
#set prior info
centers[i]=(xmax+xmin)/2.0
scales[i]=(xmax-xmin)/2.0
types=['uni']*npar
types[names.index('inc')]='polar'
#These should be gaussian, if present
for pname in ['logTanom', 'mu_1', 'tau_1', 'mu_2', 'tau_2', 'alpha_ref_1', 'alpha_ref_2', 'ln_beam_resc_1', 'ln_beam_resc_2', 'ln_alp_Teff_1', 'ln_alp_Teff_2', 'flux_tune', 'ln_noise_resc', 'alp_Teff_1', 'alp_Teff_2','alp_rad1_resc','alp_rad2_resc']:
if pname in names:
types[names.index(pname)]='gaussian'
sp.reset_range(pname,[float('-inf'),float('inf')])
if prior_dict is not None:
for par in prior_dict:
if par in names:
ipar=names.index(par)
pardict=prior_dict[par]
if not isinstance(pardict,dict):raise ValueError('While processing user prior data for parameter "'+par+'". Expected value associated with this par to be a dict, but got '+str(pardict))
if 'center' in pardict:
centers[ipar]=pardict['center']
if 'scale' in pardict:
scales[ipar]=pardict['scale']
if 'type' in pardict:
types[ipar]=pardict['type']
#If ln_noise_scale fitting is included, we reduce the prior width if we have already downsampled the data
if 'ln_noise_resc' in names:
pname='ln_noise_resc'
print('Rescaling noise fitting prior scale[ln_noise_resc] =',scales[names.index(pname)],'by the folding factor.')
scales[names.index(pname)] /= len(data['time'].values)/len(self.ftimes)
#some rescaling for better Gaussian proposals
rescales=[1]*npar
for name in rescalesdict:
if name in names:
rescales[names.index(name)]=rescalesdict[name]
rescales=[val*rescalefac for val in rescales]
if use_syms:
#Add information about potential symmetries
if rep: print("Applying symmetry transform.")
space.addSymmetry(ptmcmc.involution(space,"invert_binary",0,invert_binary_symmetry_transf))
space.addSymmetry(ptmcmc.involution(space,"back_view",0,back_view_symmetry_transf))
print("HB_likelihood::setup: space="+space.show())
self.basic_setup(space, types, centers, scales, rescales);
def evaluate_log(self,s):
params=s.get_params()
done=False
if self.forceM1gtM2:
#Here we hard-code M1,M2 indices, could do better...
im1=0
im2=1
if params[im2]>params[im1]:
result = -1e100
done=True
if not done:
#print(params)
result=weighted_likelihood(self.ftimes,self.ffluxes,self.ferrs,params,self.sp,self.constraint_weight,self.lctype,marginalized_noise_pars=self.marginalized_noise_pars)
if False:
global count
print(count)
count+=1
print("state:",s.get_string())
print(" logL={0:.13g}".format(result))
if self.bestL is None or result>self.bestL:
self.bestX=params
self.bestL=result
return result
def report(self):
print('Best fit results:')
print(' pars =',bestXp)
print(' SNR =',self.SNR)
print(' chi2 =',-bestL)
print(' fit percent = %5.2f'%((1-bestL/llike0)*100.0))
def getModels(self,parslist):
if self.lctype==2:
models=[pyHB.lightcurve2(self.ftimes,self.sp.get_pars(pars)) for pars in parslist]
elif self.lctype==3:
models=[pyHB.lightcurve3(self.ftimes,self.sp.get_pars(pars)) for pars in parslist]
return models
count=0
def read_data_from_sector_files(id,basepath,edgeskip=0.5,allowsecs=None,trueerr=1.0,tmin=None,tmax=None):
if tmin is not None or tmax is not None: print('read_data_from_sector_files: Time limits are not yet implemented and will be ignored!')
if allowsecs is None:allowsecs=range(1,20)
#print('id=',id)
datafiles=glob.glob(basepath+'/*/*/tesslc_'+str(id)+'.pkl')
found_in_sectors=[]
df=pd.DataFrame(columns=['sector','time','flux','err'])
for path in datafiles:
data=pickle.load(open(path,'rb'))
sector=int(re.findall(r'sector_(\d*)',path)[0])
found_in_sectors+=[sector]
if not sector in allowsecs:continue
flux = data[6]
time = data[4]
fluxerr = data[8]*trueerr
dt=time[1]-time[0]
iedgeskip=int(edgeskip/dt)
#print('iedgeskip',iedgeskip)
if(iedgeskip>0):#process edge skips
keeps=np.array([True]*len(time))
keeps[0:iedgeskip]=False
keeps[-iedgeskip:]=False
for i in range(1,len(time)):
if keeps[i] and time[i]-time[i-1]>0.5: #gap >1/2 day
#print('cut detected at t =',time[i])
#print(time[i-1],time[i],time[i]-time[i-1])
keeps[i-iedgeskip:i]=False
#print('skipping from',time[i-iedgeskip],'to',time[i+iedgeskip])
keeps[i:i+iedgeskip]=False
flux=flux[keeps]
time=time[keeps]
fluxerr=fluxerr[keeps]
#print('time',time)
ddf=pd.DataFrame([[sector,time[i],flux[i],fluxerr[i]] for i in range(len(time))],columns=['sector','time','flux','err'])
#print(ddf)
df=df.append(ddf,ignore_index=True)
df=df.sort_values('time')
#print(df)
print("Found in sectors",found_in_sectors)
return df
def read_data_from_file(id,path,edgeskip=0.5,trueerr=1.0,tmin=None,tmax=None,weight_err_dt=True,verbose=False):
#trueerr is our estimate of the 1-sigma error on the data at flux=1,
# otherwise err ~ sqrt(flux)
#This routine intended for data that are normalized to near-unity flux
#If weight_err_dt, then trueerr applies if the local cadence is
# 30min (1/48 day) otherwise the unit flux err is trueerr/sqrt(dt).
# At longer gaps dt is assumed 1/48.
if verbose: print('Reading data from file:',path)
if path.endswith(".fits"):
if verbose: print('This seems to be a FITS file.')
from astropy.io import fits
f=fits.open(path)
time=f[1].data["TIME"]
flux=f[1].data["CORR_FLUX"]
flux=flux/np.median(flux)
if verbose: print('time.shape',time.shape)
data=np.column_stack((time,flux,0*flux)) #Maybe there is useful error info??
if verbose: print('data.shape',data.shape)
else:
if verbose: print('Will read as text data.')
data=np.genfromtxt(path,skip_header=1)
#assumed format t flux other_flux flag
data=data[data[:,3]==0] #clean out the flagged rows
if tmin is not None: data=data[data[:,0]>=tmin]
if tmax is not None: data=data[data[:,0]<=tmax]
flux = data[:,1]
time = data[:,0]
cadfac=np.diff(time,prepend=2*time[0]-time[1])*48
fluxerr = np.sqrt(flux/np.minimum(1,cadfac))*trueerr
#dt=time[1]-time[0]
#iedgeskip=int(edgeskip/dt)
#print('iedgeskip',iedgeskip)
if(edgeskip>0):#process edge skips
#keeps=np.array([True]*len(time))
#keeps[0:iedgeskip]=False
#keeps[-iedgeskip:]=False
keeps = np.logical_and( time-time[0]>edgeskip, time[-1]-time>edgeskip )
for i in range(1,len(time)):
if keeps[i] and time[i]-time[i-1]>0.5: #gap >1/2 day
if verbose:
print('cut detected at t =',time[i])
print(time[i-1],time[i],time[i]-time[i-1])
#keeps[i-iedgeskip:i]=False
#keeps[i:i+iedgeskip]=False
#print('skipping from',time[i-iedgeskip],'to',time[i+iedgeskip])
keeps=np.logical_and(keeps,
np.logical_or(
time<time[i-1]-edgeskip,
time>time[i]+edgeskip ) )
if verbose: print('skipping from',time[i-1]-edgeskip,'to',time[i]+edgeskip)
flux=flux[keeps]
time=time[keeps]
fluxerr=fluxerr[keeps]
sector=0
df=pd.DataFrame([[sector,time[i],flux[i],fluxerr[i]] for i in range(len(time))],columns=['sector','time','flux','err'])
return df
#//***************************************************************************************8
#//main test program
def main(argv):
ptmcmc.Init()
#//prep command-line options
#Options opt(true);
opt=ptmcmc.Options()
#//Add some command more line options
#data specific flags
opt.add("seed","Pseudo random number grenerator seed in [0,1). (Default=-1, use clock to seed.)","-1")
##opt.add("precision","Set output precision digits. (Default 13).","13")
opt.add("outname","Base name for output files (Default 'mcmc_output').","mcmc_output")
#data_model flags
opt.add("data_style","Provide data model flags as a json string","")
opt.add("id","TIC_ID","")
opt.add("period","Set fixed period for folding and model. (Default None)","None")
opt.add("datafile","Explicitly indicate the data file.","")
opt.add("Mstar","Override TIC (primary) star mass. (Default None)","None")
opt.add("sectors","Only use these sectors (comma-separated)","")
opt.add("tlimits","Set tmin,tmax time limits, outside which to ignore data. (def=none)","")
opt.add("noTIC","Set to 1 to skip any online query about TIC id.","0")
opt.add("trueerr","scalefactor of the error following JS's code. (def=1)","1")
opt.add('min_per_bin','Minimum mean number of samples per bin after folding and downsampling.(Default 0)','0')
opt.add('decimate_level','Level (0-15) to apply in decimat.py data decimation algorithm. Larger numbr is more aggressive. Overrides native downsampling. (Default none.)','-1')
opt.add('edgeskip','Size of region to exclude from data near data gaps in days. (Default=0.5)','0.5')
#//Create the sampler
#ptmcmc_sampler mcmc;
#hb model style flags
opt.add("hb_style","Provide heartbeat model style flags as a json string","")
opt.add("datadir","directory where processed sector data files are located",".")
opt.add("eMax","Set max value for eccentricity. (Default 0.95)","0.95")
opt.add("massTol","Uniform confidence width factor for TIC mass. (Default 0.2)","0.2")
opt.add("plotSamples","File with samples to plot, (eg chain output)","")
opt.add("nPlot","If plotting samples, how many to sample curves to include","20")
opt.add("downfac","Extra downsampling factor in lightcurve folding.","1")
opt.add("Roche_wt","Weight factor for Roche-limit constraint (def 10000).","10000")
opt.add("M1gtM2","Set to 1 to force M1>M2. (def=0)","0")
#opt.add('blend','Set to 1 to vary the blending flux','0')
opt.add('lctype','Light curve model version. Options are 2 or 3. (Default 3)','3')
opt.add('pins','json formatted string with parname:pinvalue pairs','{}')
opt.add('marginalize_noise','Set to 1 to analytically marginalize noise scaling.','0')
opt.add('rescales','Rescaling factors to base proposals, etc., as json formatted string with parname:value pairs','{}')
#for MCMC
opt.add("mcmc_style","Provide mcmc flags as a json string","{}")
opt.add('rescalefac','Rescale factor for gaussian proposals. Default=1','1')
#Other
opt.add("savePfig","Location to save period fig file in plotting mode (Default: interactive display).","")
opt.add("saveLfig","Location to save lightcurve fig file in plotting mode (Default: interactive display).","")
s0=ptmcmc.sampler(opt)
rep=s0.reporting()
opt.parse(argv)
#Process flags:
intf=lambda x: int(x)
pval=lambda name:opt.value(name)
getpar=lambda name,typ:style.get(name,typ(opt.value(name)) if len(opt.value(name))>0 or typ==str else None)
getboolpar=lambda name:style.get(name,(opt.value(name)!='0'))
getNpar=lambda name,typ:style.get(name,typ(opt.value(name)) if opt.value(name)!='None' else None)
#basic
outname=opt.value('outname')
seed=float(opt.value('seed'))
#viz only option
do_plot = opt.value('plotSamples')!="" or int(opt.value('nPlot'))==0
ncurves=int(opt.value('nPlot'))
sampfiles=opt.value('plotSamples')
saveLfig=opt.value('saveLfig')
savePfig=opt.value('savePfig')
#data
style={}
if opt.value('data_style')!='':
style=opt.value('data_style')
if style.startswith('{'):
style=json.loads(style)
else:
with open(style,'r') as sfile:
style=json.load(sfile)
id=getpar('id',int)
datadir=getpar('datadir',str)
massTol=getpar('massTol',float)
noTIC=getboolpar('noTIC')
Mstar=getNpar('Mstar',float)
sectors=getpar('sectors',str)
tlimits=getpar('tlimits',str)
trueerr=getpar('trueerr',float)
decimate_level=getpar('decimate_level',int)
datafile=getpar('datafile',str)
period=getNpar('period',float)
if period is None and opt.value('period') != 'None':period=float(opt.value('period'))
downfac=getpar('downfac',float)
min_per_bin=getpar('min_per_bin',float)
if min_per_bin <=0 and opt.value('min_per_bin')!='0':min_per_bin=float(opt.value('min_per_bin'))
if rep:print('decimate-level',decimate_level)
edgeskip=getpar('edgeskip',float)
if edgeskip ==0.5 and opt.value('edgeskip')!='0.5':edgeskip=float(opt.value('edgeskip'))
datastyle=style
# HB model style
style={}
if rep: print('hb_style',opt.value('hb_style'))
if opt.value('hb_style')!='':
style=opt.value('hb_style')
if style.startswith('{'):
style=json.loads(style)
else:
with open(style,'r') as sfile:
style=json.load(sfile)
if rep: print('processed:',"'"+json.dumps(style)+"'")
Roche_wt=getpar('Roche_wt',float)
pindict=getpar('pins',json.loads)
eMax=getpar('eMax',float)
forceM1gtM2=getboolpar('M1gtM2')
marginalize_noise=getboolpar('marginalize_noise')
use_syms=False #May change based on mcmc_options
rescalesdict=getpar('rescales',json.loads)
#blend=getboolpar('blend')
lctype=getpar('lctype',int)
prior_dict=style.get('prior',{})
if rep: print('Roche_wt,emax,lctype:',Roche_wt,eMax,lctype)
#Process mcmc options
style=opt.value('mcmc_style')
if style.startswith('{'):
style=json.loads(style)
else:
with open(style,'r') as sfile:
style=json.load(sfile)
mcmc_options=style
hb_mcmc_flags=['rescalefac']
style={}
optlist=[]
no_arg_flags=['de_mixing','gauss_temp_scaled','prop_adapt_more','pt_reboot_grad']
keys=list(mcmc_options.keys())
for key in keys:
if key in hb_mcmc_flags:
style[key]= mcmc_options[key]
del mcmc_options[key]
for key in mcmc_options:
arg=mcmc_options[key]
if key in no_arg_flags:
if arg: optlist.append('--'+key)
else:
optlist.append('--'+key+'='+str(arg))
rescalefac=getpar('rescalefac',float)
if rep: print('rescalefac=',rescalefac)
if 'sym_prop_frac' in mcmc_options:
if rep: print('sym_prop_frac=',mcmc_options['sym_prop_frac'])
if mcmc_options['sym_prop_frac']>0:
use_syms=True
#Pass to ptmcmc
opt.parse(optlist)
#Get TIC catalog info:
if noTIC:
TICData = None
else:
try:
TICData = Catalogs.query_object('TIC '+str(id),radius=0.0011,catalog='TIC')#0.011 deg is 2 px
if rep: print(TICData['ID','Tmag','Vmag','ra','dec','d','objType','lumclass','Teff','mass','rad'][0])
#print(TICData.columns)
except:
if rep:print("**TIC Query Failed**")
if rep:print("id=",id)
TICData=None
if TICData is not None:
if rep:print('Vmag',TICData['Vmag'][0], 'Teff',TICData['Teff'][0])
Rstar=None
Mstar=None
global useM
if useM:
if TICdata is None:
useM=False
if rep:print('Cannot "useM" since I have no TIC data! Overriding')
if massTol==0 and str(TICData['rad'][0]).isnumeric: #Don't fix radius if we are varying the mass
Rstar=TICData['rad'][0]
if rep:print('Rstar=',Rstar)
if Mstar is None and not np.isnan(float(TICData['mass'][0])):
Mstar=TICData['mass'][0]
#print('float(Mstar)',float(Mstar))
if rep:print('Mstar(TIC)=',Mstar)
if rep:print('Mstar=',Mstar)
allowsecs=None
if sectors!='':
allowsecs=sectors.split(',')
allowsecs=[int(sec) for sec in allowsecs]
tmin=None
tmax=None
if tlimits!='':
tlims=tlimits.split(',')
if len(tlims)<2:tlims.append('')
if tlims[0].isnumeric():tmin=float(tlims[0])
if tlims[1].isnumeric():tmax=float(tlims[1])
if rep: print('Constraining',tmin,'< t <',tmax)
#Prepare the data:
if datafile=='':
dfg=read_data_from_sector_files(id,datadir,edgeskip=0.5,allowsecs=allowsecs,trueerr=trueerr,tmin=tmin,tmax=tmax)
else:
if datafile.startswith('/'):
filepath=datafile
else:
filepath=datadir+'/'+datafile
dfg=read_data_from_file(id,filepath,edgeskip=edgeskip,trueerr=trueerr,tmin=tmin,tmax=tmax,verbose=rep)
if rep:
print('Trimmed data length is',len(dfg))
dfg[['time','flux','err']].to_csv(outname+'_trimmed.dat')
#//Create the likelihood
fixperiod=None
if period is not None and period<0:
period=-period
fixperiod=period
like=HB_likelihood(id,dfg,period,Mstar,massTol=massTol,eMax=eMax,maxperiod=20,fixperiod=fixperiod,downfac=downfac,constraint_weight=Roche_wt,outname=outname,rep=rep,forceM1gtM2=forceM1gtM2,rescalesdict=rescalesdict,rescalefac=rescalefac,viz=do_plot,lctype=lctype,pins=pindict,prior_dict=prior_dict,min_per_bin=min_per_bin,savePfig=savePfig,marginalize_noise=marginalize_noise,decimate_level=decimate_level,use_syms=use_syms)
if fixperiod is None:
dataPfile="data_style_Pfit.json"
if len(savePfig)>0 or not os.path.exists(dataPfile):
#Only overwrite when savePfig flag is set
datastyle['period']=like.fperiod
datastyle['period-note']='period determined automatically by Lomb-Scargle'
with open(dataPfile,'w') as dpf:
json.dump(datastyle,dpf,indent=4)
do_residual=True
resid_rescaled=False
if(do_plot):
#Plot samples instead of running chains
t=like.ftimes
ts=np.linspace(t[0],t[-1],300)
data=like.ffluxes
if ncurves>0:
if sampfiles.startswith('[') and sampfiles.endswith(']'):
if ',' in sampfiles:
sampfiles=sampfiles[1:-1].split(',')
else:
sampfiles=sampfiles[1:-1].split()
else:
sampfiles=[sampfiles]
nmaxs=[None for x in sampfiles]
for i in range(len(sampfiles)):
if ':' in sampfiles[i]:
sampfiles[i],nmaxs[i]=sampfiles[i].split(':')
if i>0 and len(sampfiles[i])==0:sampfiles[i]=sampfiles[i-1]
if len(nmaxs[i])==0:nmaxs[i]=None
print('samples files:',sampfiles)
print('sample nmaxs:',nmaxs)
modelsets=[]
residsets=[]
for i in range(len(sampfiles)):
sfile=sampfiles[i]
n=nmaxs[i]
print('Processing',sfile)
chain=ptmcmc_analysis.chainData(sfile,useLike=True)
if n is None or not '%' in n and int(n)>chain.getSteps():
n=chain.getSteps()
elif '%' in n:
n=int(min(100,float(n[:-1]))/100*chain.getSteps())
else: n=int(n)
nmaxs[i]=str(n)
rows,samples=chain.get_samples(ncurves,nmax=n,good_length=n//10,return_rows=True)
print('sample_rows:',rows)
colnames=chain.names
for att in ['samp','post','like']:
if att in colnames:
print('mean',att,np.mean(chain.data[rows][:,colnames.index(att)]))
print('mean pars:',np.mean(samples,axis=0))
print('std pars:',np.std(samples,axis=0))
#print("samples:")
#for sample in samples:print(sample)
#cnames=chain.names[chain.names.index('post')+1:]
cnames=chain.names[chain.ipar0:]
idx=[cnames.index(name) for name in like.sp.live_names()]
print(idx,cnames,like.sp.live_names())
psamples=[like.sp.get_pars([pars[idx[i]] for i in range(len(idx))]) for pars in samples]
if lctype==2:
lightcurve=pyHB.lightcurve2
elif lctype==3:
lightcurve=pyHB.lightcurve3
models=[lightcurve(ts,p[:-1]) for p in psamples]
modelsets.append(models)
roches=[pyHB.test_roche_lobe(p,verbose=True) for p in psamples[-1:] ]
print('roche fracs:',roches)
if do_residual:
resc=[1]*len(psamples)
if 'ln_noise_resc' in cnames:
resid_rescaled=True
iresc=cnames.index('ln_noise_resc')
resc=np.exp([p[iresc] for p in psamples])
#resids=[(data-lightcurve(t,p[:-1])) for p in psamples]
resids=[(data-lightcurve(t,psamples[j][:-1]))/resc[j] for j in range(len(psamples))]
residsets.append(resids)
else: modelsets =[]
import matplotlib.pyplot as plt
if do_residual:
fig, axs = plt.subplots(2, 1, figsize=[6.4, 6.4],sharex=True)
fig.subplots_adjust(hspace=0)
ax=axs[0]
rax=axs[1]
else:
fig, axs = plt.subplots(1, 1)
ax=axs
plt.subplots_adjust(bottom=0.25)
lims0=None
lims1=None
ax.errorbar(t,data,yerr=like.ferrs,ls='None',label='data')
if do_residual:rax.errorbar(t,data*0,yerr=like.ferrs,ls='None')
colors=['r','b','g','y','m','c','k']
for i in range(len(modelsets)):
label=sampfiles[i]+':'+nmaxs[i]
col=colors[i]
for model in modelsets[i]:
ax.plot(ts,model,col,alpha=0.2,label=label)
lims0=autoscale(model,lims0)
label=None
if do_residual:
for resid in residsets[i]:
rax.plot(t,resid,col,ls='None',marker='.',alpha=0.2,label=label)
lims1=autoscale(resid,lims1)
rawftimes=like.data['time']%(like.fperiod)+int(like.data['time'][0]/like.fperiod)*like.fperiod
#-0*like.data['time'][0]%(like.fperiod)+like.ftimes[0]
ax.plot(rawftimes,like.data['flux'],'k.',ls='None',markersize=0.5,label='raw data')
lims0=autoscale(like.data['flux'],lims0)
ax.set_ylim(lims0)
rax.set_ylim(lims1)
leg=plt.figlegend(loc='upper center',fontsize='small',bbox_to_anchor=(0.5, 0.20))
#leg=ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),fancybox=True, shadow=True)
for lh in leg.legendHandles:
lh.set_alpha(1)
#Title
title=str(id)
if do_residual:
title+=' with residual'
if resid_rescaled:
title+=' (noise model scaled)'
ax.set_title(title)
#plt.tight_layout()
if len(saveLfig)>0:
plt.savefig(saveLfig)
plt.close()
else:
plt.show()
return
if seed<0:seed=np.random.random();
#//report
if rep:print("\noutname = '"+outname+"'")
#//Should probably move this to ptmcmc/bayesian
ptmcmc.resetRNGseed(seed);
space=like.getObjectStateSpace();
if rep:print("like.nativeSpace=\n"+space.show())
Npar=space.size();
if rep:print("Npar=",Npar)
s0.setup(like)
s=s0.clone();
s.initialize();
print('initialization done')
s.run(outname,0);
def autoscale(y,lims=None,tol=0.0025,expand=0.10):
#Cut up to tol fraction of the extreme data before autoscaling
ysort=np.sort(y)
icut=int(tol*len(y))
ymin,ymax=ysort[icut],ysort[-(1+icut)]
dy=(ymax-ymin)*expand
ymin=ymin-dy
ymax=ymax+dy
if lims is not None:
ymin=min(lims[0],ymin)
ymax=max(lims[1],ymax)
return [ymin,ymax]
if __name__ == "__main__":
import sys
argv=sys.argv[:]
del argv[0]
main(argv)
```
#### File: src/mcmc/HB_MCMC.py
```python
from astropy.stats import LombScargle
import pandas as pd
import numpy as np
#import matplotlib as mpl
#import matplotlib.pyplot as plt
import astroquery
from astroquery.mast import Catalogs,Observations
#import re
import sys
dirp='../../../TessSLB/src/LightCurveCode'
if dirp not in sys.path: sys.path.append(dirp)
dirp='../../../TessSLB/src/LightCurveCode/ptmcmc/cython'
if dirp not in sys.path: sys.path.append(dirp)
dirp='../../MCMC/ptmcmc/python'
if dirp not in sys.path: sys.path.append(dirp)
import ptmcmc
import ptmcmc_analysis
import pyAvst
#import BrowseSLBs
import copy
#import warnings
import scipy
import glob
import pickle
import re
#import scipy.linalg
#import scipy.optimize as opt
#warnings.simplefilter("ignore")
#importlib.reload(pyAvst)
useM=True
def fold_lc(times,fluxes,errs,Pfold,downfac=1.0):
phases=(np.array(times)/Pfold)%1
isort=np.argsort(phases)
phases=phases[isort]
fluxes=np.array(fluxes)[isort]
errs=np.array(errs)[isort]
nold=len(times)
groupwidth=(times[-1]-times[0])*(1+0.1/nold)/nold/Pfold
groupwidth*=downfac
#print('mean errs=',errs.mean())
#print('groupwidth=',groupwidth, 'mean group size=',groupwidth*nold)
fphases=[]
ffluxes=[]
ferrs=[]
i=0
j=0
while(i<nold):
#print(i,j)
xs=[]
ys=[]
es=[]
tr=phases[0]+groupwidth*j
while(i<nold and phases[i]<tr):
#print(i,times[i],tr)
xs.append(phases[i])
ys.append(fluxes[i])
es.append(errs[i])
i+=1
#print(tr,xs,ys,es)
if(len(xs)>0):
xs=np.array(xs)
ys=np.array(ys)
es=np.array(es)
ws=1/es**2
w=np.sum(ws)
x=np.sum(xs*ws)/w
y=np.sum(ys*ws)/w
v=np.sum((ys-y)**2*ws)/w
#print(ys)
#print(es)
#print(np.sqrt(1/w),np.sqrt(v/len(xs)),np.sqrt(np.sum((ys-y)**2)/len(xs)**2))
e=np.sqrt(1/w+v/len(xs))#Not 100% sure this is right
#print(xs,ys,es,'-->',x,y,e)
fphases.append(x)
ffluxes.append(y)
ferrs.append(e)
j+=1
fphases=np.array(fphases)
ffluxes=np.array(ffluxes)
ferrs=np.array(ferrs)
#print('mean err=',ferrs.mean())
return fphases,ffluxes,ferrs
def weighted_likelihood(ftimes,ffluxes,ferrs,x,sp,Rstar,constraint_weight=10000):
pars=sp.get_pars(x);
if sp.out_of_bounds(pars): #Hopefully doesn't happen?
lmeans=np.mean(sp.live_ranges(),axis=1)
parwt=np.sum((pars-sp.get_pars(lmeans))**2)
#print(lmeans,parwt)
return 2e18*(1+parwt*0)
else:
mlike=pyAvst.likelihood(ftimes,ffluxes,ferrs,pars,ulambda=0)
if constraint_weight > 0:
roche_frac=pyAvst.test_roche_lobe(pars,Rstar=Rstar)
mlike-=constraint_weight*max([0,roche_frac-0.8])
#print(x,mlike)
#print(roche_frac,pars)
return mlike
def weighted_likelihood_lferr0(ftimes,ffluxes,lferr0,x,sp,Rstar,constraint_weight=10000):
pars=sp.get_pars(x);
if sp.out_of_bounds(pars): #Hopefully doesn't happen?
print('pars out of bounds:',pars)
lmeans=np.mean(sp.live_ranges(),axis=1)
parwt=np.sum((pars-sp.get_pars(lmeans))**2)
#print(lmeans,parwt)
return 2e18*(1+parwt*0)
else:
mlike=pyAvst.likelihood_log10ferr0(ftimes,ffluxes,lferr0,pars,ulambda=0)
if constraint_weight > 0:
roche_frac=pyAvst.test_roche_lobe(pars,Rstar=Rstar)
mlike-=constraint_weight*max([0,roche_frac-0.8])
#print(x,mlike)
#print(roche_frac,pars)
return mlike
def adjust_sectors(data):
sector_tag='sector'
sectors=data[sector_tag].unique()
print('sectors',sectors)
if(len(sectors)>1):
medians=np.array([np.median(data.flux[data[sector_tag]==sec]) for sec in sectors])
offsets=medians-medians.mean()
#print('offsets',offsets)
for i in range(len(sectors)):
data.loc[data[sector_tag]==sectors[i],'flux']/=1+offsets[i]/medians.mean()
print('Adjusted sector levels:',offsets)
print('Adjusted sector factors:',1+offsets/medians.mean())
return data
class SLB_likelihood(ptmcmc.likelihood):
def __init__(self,id,data,period=None,lferr0=None,Mstar=None,Rstar=None,massTol=0,lensMax=0,eMax=None,maxperiod=14,fixperiod=None,dlogF=0.01,downfac=1.0,constraint_weight=10000,outname=""):
self.Rstar=Rstar
self.bestL=None
## Prepare data ##
if True: data=adjust_sectors(data)
self.data=data
self.constraint_weight=constraint_weight
#dofold=(period is None)
dofold=True
if dofold:
## Compute period and fold data ##
if period is not None: fperiod=period
else:
print("Computing folding period")
frequency, power = LombScargle(data['time'].values,data['flux'].values).autopower()
ilfcut=int(len(power)/20)+1
if0=0
for i,f in enumerate(frequency):
if 1/f < maxperiod:
if0=i
break
fmax=frequency[if0:ilfcut][np.argmax(power[if0:ilfcut])]
fperiod=1.0/fmax
doubler=1#set to 2 to fold on double period
if(fixperiod is not None):
ffold=fixperiod*doubler
fperiod=fixperiod
else:ffold=fperiod*doubler
self.fperiod=fperiod
print('Folding period',ffold)
cycles=len(data['time'].values)/48.0/(fperiod/doubler)
print('Data has',cycles,'cycles')
self.fphases,self.ffluxes,self.ferrs=fold_lc(data['time'].values,data['flux'].values,data['err'].values,ffold,downfac=downfac)
self.ftimes=self.fphases*ffold
array=np.vstack((self.ftimes,self.ffluxes,self.ferrs)).T
pd.DataFrame(data=array,columns=['ftime','fflux','ferr']).to_csv(outname+'_folded.dat')
else: #no fold
self.ftimes=data['time'].values
self.ffluxes=data['flux'].values
self.fperiod=period
#wts=1/self.ferrs**2
wts=1+0*self.ffluxes
ffmean=np.sum(self.ffluxes*wts)/np.sum(wts)
print('ffmean',ffmean)
logFmean=np.log10(ffmean+50)
print('logFmean',logFmean)
print('ftimes',self.ftimes)
print('ffluxes',self.ffluxes)
## Set up parameter space
## This piggybacks on the parameter space tool from pyAvst
sp=copy.deepcopy(pyAvst.sp)
#Allow periods within a factor of just over 2% of specified
sp.reset_range('Pdays',[self.fperiod/1.02,self.fperiod*1.02])
sp.pin('Pdays',self.fperiod)
sp.reset_range('log(F+50)',[logFmean-dlogF,logFmean+dlogF])
if(Mstar is not None):
if massTol==0:
sp.pin('Mstar',Mstar)
else:
sp.reset_range('Mstar',[Mstar/(1+massTol),Mstar*(1+massTol)])
if lensMax>0:sp.reset_range('logMlens',[-1.0,np.log10(lensMax)])
if eMax is not None:
if eMax>0:sp.reset_range('e',[0,eMax])
else:sp.pin('e',0)
self.sp=sp
###Compute SNR
pars0=[-10,1,10000,0,0,0,0,logFmean,0]
#logMlens, Mstar, Pdays, e, sini, omgf, T0overP,logFp50,Fblend=pars0
#llike0=pyAvst.likelihood(self.ftimes,self.ffluxes,self.ferrs,pars0,ulambda=0)
if lferr0 is None:
llike0=pyAvst.likelihood(self.ftimes,self.ffluxes,self.ferrs,pars0,ulambda=0)
else:
llike0=pyAvst.likelihood_log10ferr0(self.ftimes,self.ffluxes,lferr0,pars0,ulambda=0)
self.lferr0=lferr0
SNR=np.sqrt(-llike0*2)
print('sp:live',sp.live_names())
print('mins/maxs',sp.live_ranges().T)
print('pinvals',sp.pinvals)
#Set up stateSpace
names=sp.live_names()
ranges=sp.live_ranges()
npar=len(names)
space=ptmcmc.stateSpace(dim=npar);
space.set_names(names);
wraps=['omgf','T0overP']
centers=[0]*npar
scales=[1]*npar
types=['uni']*npar
for i in range(npar):
name=names[i]
xmin=ranges[i,0]
xmax=ranges[i,1]
if name in wraps:
space.set_bound(name,ptmcmc.boundary('wrap','wrap',xmin,xmax))
#else:
# space.set_bound(name,ptmcmc.boundary('limit','limit',xmin,xmax)) #May not be needed
#set prior info
centers[i]=(xmax+xmin)/2.0
scales[i]=(xmax-xmin)/2.0
print("SLB_likelihood::setup: space="+space.show())
self.basic_setup(space, types, centers, scales);
def evaluate_log(self,s):
params=s.get_params()
#print(params)
if self.lferr0 is None:
result=weighted_likelihood(self.ftimes,self.ffluxes,self.ferrs,params,self.sp,self.Rstar,self.constraint_weight)
else:
result=weighted_likelihood_lferr0(self.ftimes,self.ffluxes,self.lferr0,params,self.sp,self.Rstar,self.constraint_weight)
if False:
global count
print(count)
count+=1
print("state:",s.get_string())
print(" logL={0:.13g}".format(result))
if self.bestL is None or result>self.bestL:
self.bestX=params
self.bestL=result
return result
def report(self):
print('Best fit results:')
print(' pars =',bestXp)
print(' SNR =',self.SNR)
print(' chi2 =',-bestL)
print(' fit percent = %5.2f'%((1-bestL/llike0)*100.0))
def getModels(self,parslist):
models=[pyAvst.lightcurve(self.ftimes,self.sp.get_pars(pars)) for pars in parslist]
return models
count=0
def read_data_from_sector_files(id,basepath,edgeskip=0.5,allowsecs=None):
if allowsecs is None:allowsecs=range(1,20)
#print('id=',id)
datafiles=glob.glob(basepath+'/*/*/tesslc_'+str(id)+'.pkl')
found_in_sectors=[]
df=pd.DataFrame(columns=['sector','time','flux','err'])
df=df.sort_values('time')
for path in datafiles:
data=pickle.load(open(path,'rb'))
sector=int(re.findall(r'sector_(\d*)',path)[0])
found_in_sectors+=[sector]
if not sector in allowsecs:continue
flux = data[6]
time = data[4]
fluxerr = data[8]
dt=time[1]-time[0]
iedgeskip=int(edgeskip/dt)
#print('iedgeskip',iedgeskip)
if(iedgeskip>0):#process edge skips
keeps=np.array([True]*len(time))
keeps[0:iedgeskip]=False
keeps[-iedgeskip:]=False
for i in range(1,len(time)):
if keeps[i] and time[i]-time[i-1]>0.5: #gap >1/2 day
#print('cut detected at t =',time[i])
#print(time[i-1],time[i],time[i]-time[i-1])
keeps[i-iedgeskip:i]=False
#print('skipping from',time[i-iedgeskip],'to',time[i+iedgeskip])
keeps[i:i+iedgeskip]=False
flux=flux[keeps]
time=time[keeps]
fluxerr=fluxerr[keeps]
#print('time',time)
ddf=pd.DataFrame([[sector,time[i],flux[i],fluxerr[i]] for i in range(len(time))],columns=['sector','time','flux','err'])
#print(ddf)
df=df.append(ddf,ignore_index=True)
#print(df)
print("Found in sectors",found_in_sectors)
return df
#//***************************************************************************************8
#//main test program
def main(argv):
ptmcmc.Init()
#//prep command-line options
#Options opt(true);
opt=ptmcmc.Options()
#//Add some command more line options
##opt.add("nchains","Number of consequtive chain runs. Default 1","1")
opt.add("id","TIC_ID","")
opt.add("datadir","directory where processed sector data files are located",".")
opt.add("seed","Pseudo random number grenerator seed in [0,1). (Default=-1, use clock to seed.)","-1")
##opt.add("precision","Set output precision digits. (Default 13).","13")
opt.add("outname","Base name for output files (Default 'mcmc_output').","mcmc_output")
opt.add("period","Set fixed period for folding and model. (Default None)","None")
opt.add("eMax","Set max value for eccentricity. (Default 0.2)","0.2")
opt.add("dlogF","Prior halfwidth for log10(F). (Default 0.01)","0.01")
opt.add("Mstar","Override TIC star mass. (Default None)","None")
opt.add("massTol","Uniform confidence width factor for TIC mass. (Default 0.2)","0.2")
opt.add("plotSamples","File with samples to plot, (eg chain output)","")
opt.add("nPlot","If plotting samples, how many to sample curves to include","20")
opt.add("downfac","Extra downsampling factor in lightcurve folding.","1")
opt.add("Roche_wt","Weight factor for Roche-limit constraint (def 10000).","10000")
opt.add("secs","Only use these sectors (comma-separated)","")
opt.add("l10ferr","log10 of fractional flux err. (def =-3.25)","-3.25")
#int Nlead_args=1;
#//Create the sampler
#ptmcmc_sampler mcmc;
s0=ptmcmc.sampler(opt)
opt.parse(argv)
#Get TIC catalog info:
id=int(opt.value('id'))
datadir=opt.value('datadir')
outname=opt.value('outname')
massTol=float(opt.value('massTol'))
Roche_wt=float(opt.value('Roche_wt'))
try:
TICData = Catalogs.query_object('TIC '+str(id),radius=0.0011,catalog='TIC')#0.011 deg is 2 px
print(TICData['ID','Tmag','Vmag','ra','dec','d','objType','lumclass','Teff','mass','rad'][0])
#print(TICData.columns)
except:
print("**TIC Query Failed**")
print("id=",id)
TICData=None
if TICData is not None:
print('Vmag',TICData['Vmag'][0], 'Teff',TICData['Teff'][0])
Rstar=None
Mstar=None
if useM:
if massTol==0 and str(TICData['rad'][0]).isnumeric: #Don't fix radius if we are varying the mass
Rstar=TICData['rad'][0]
print('Rstar=',Rstar)
Mstar=None
if not np.isnan(float(TICData['mass'][0])):
Mstar=TICData['mass'][0]
#print('float(Mstar)',float(Mstar))
print('Mstar(TIC)=',Mstar)
if opt.value('Mstar')!='None':Mstar=float(opt.value('Mstar'))
print('Mstar=',Mstar)
allowsecs=None
if opt.value('secs')!="":
allowsecs=opt.value('secs').split(',')
allowsecs=[int(sec) for sec in allowsecs]
#Prepare the data:
dfg=read_data_from_sector_files(id,datadir,edgeskip=0.5,allowsecs=allowsecs)
#//Create the likelihood
fixperiod=None
if opt.value('period')=="None":
period=None
else:
period=float(opt.value('period'))
if period<0:
period=-period
fixperiod=period
eMax=float(opt.value('eMax'))
dlogF=float(opt.value('dlogF'))
downfac=float(opt.value('downfac'))
lferr0=float(opt.value('l10ferr'))
like=SLB_likelihood(id,dfg,period,lferr0,Mstar,Rstar,massTol=massTol,lensMax=0,eMax=eMax,maxperiod=20,fixperiod=fixperiod,dlogF=dlogF,downfac=downfac,constraint_weight=Roche_wt,outname=outname)
if(opt.value('plotSamples')!="" or int(opt.value('nPlot'))==0):
#Plot samples instead of running chains
ncurves=int(opt.value('nPlot'))
t=like.ftimes
ts=np.linspace(t[0],t[-1],300)
data=like.ffluxes
if ncurves>0:
chain=ptmcmc_analysis.chainData(opt.value('plotSamples'))
samples=chain.get_samples(ncurves)
print("samples:")
for sample in samples:print(sample)
models=[pyAvst.lightcurve(ts,like.sp.get_pars(pars)) for pars in samples]
roches=[pyAvst.test_roche_lobe(like.sp.get_pars(pars),Rstar=like.Rstar) for pars in samples]
print('roche fracs:',roches)
else: models =[]
import matplotlib.pyplot as plt
plt.errorbar(t,data,yerr=like.ferrs,ls='None')
for model in models:
plt.plot(ts,model,'r',alpha=0.2)
plt.plot(like.data['time']%(like.fperiod),like.data['flux'],'k.',ls='None',markersize=0.5)
plt.show()
return
seed=float(opt.value('seed'))
if seed<0:seed=random.random();
#istringstream(opt.value("precision"))>>output_precision;
#istringstream(opt.value("outname"))>>outname;
#//report
#cout.precision(output_precision);
print("\noutname = '"+outname+"'")
#cout<<"seed="<<seed<<endl;
#cout<<"Running on "<<omp_get_max_threads()<<" thread"<<(omp_get_max_threads()>1?"s":"")<<"."<<endl;
#//Should probably move this to ptmcmc/bayesian
ptmcmc.resetRNGseed(seed);
#globalRNG.reset(ProbabilityDist::getPRNG());//just for safety to keep us from deleting main RNG in debugging.
#//Get the space/prior for use here
#stateSpace space;
#shared_ptr<const sampleable_probability_function> prior;
space=like.getObjectStateSpace();
print("like.nativeSpace=\n"+space.show())
#prior=like->getObjectPrior();
#cout<<"Prior is:\n"<<prior->show()<<endl;
#valarray<double> scales;prior->getScales(scales);
#//Read Params
Npar=space.size();
print("Npar=",Npar)
#//Bayesian sampling [assuming mcmc]:
#//Set the proposal distribution
#int Ninit;
#proposal_distribution *prop=ptmcmc_sampler::new_proposal_distribution(Npar,Ninit,opt,prior.get(),&scales);
#cout<<"Proposal distribution is:\n"<<prop->show()<<endl;
#//set up the mcmc sampler (assuming mcmc)
#//mcmc.setup(Ninit,*like,*prior,*prop,output_precision);
#mcmc.setup(*like,*prior,output_precision);
#mcmc.select_proposal();
s0.setup(like)
#//Testing (will break testsuite)
#s=like.draw_from_prior();
#print("test state:",s.get_string())
#print("logL=",like.evaluate_log(s))
#//Prepare for chain output
#ss<<outname;
#string base=ss.str();
#//Loop over Nchains
#for(int ic=0;ic<Nchain;ic++){
s=s0.clone();
s.initialize();
print('initialization done')
s.run(outname,0);
# //s->analyze(base,ic,Nsigma,Nbest,*like);
#del s;
#}
#//Dump summary info
#cout<<"best_post "<<like->bestPost()<<", state="<<like->bestState().get_string()<<endl;
#//delete data;
#//delete signal;
#delete like;
#}
if __name__ == "__main__":
import sys
argv=sys.argv[:]
del argv[0]
main(argv)
``` |
{
"source": "JohnGDR08g/TelloWithLabVIEW",
"score": 3
} |
#### File: TelloWithLabVIEW/TelloVision_LV2018/videoOnly.py
```python
import cv2,time
udpAdd='udp://@192.168.10.1:11111'
cap=cv2.VideoCapture(udpAdd)
if not cap.isOpened():
print('VideoCapture not opened, try again...')
cap.open(udpAdd)
start=time.time()
while time.time()-start<25:
print('trying to grab a frame...')
ret, frame=cap.read()
if frame is not None:
break
time.sleep(0.2)
def startcap():
ret, frame=cap.read()
frame=cv2.resize(frame,(640,480),interpolation=cv2.INTER_NEAREST)
# frame=cv2.GaussianBlur(frame,(1,1),0)
return frame
def stopcap():
cap.release()
return 0
``` |
{
"source": "JohnGemini/fuel-plugin-external-zabbix",
"score": 2
} |
#### File: files/scripts/check_api.py
```python
import urllib2
import sys
import simplejson as json
import ConfigParser
from zabbix_checks_logger import get_logger
CONF_FILE = '/etc/zabbix/check_api.conf'
class OSAPI(object):
"""Openstack API"""
def __init__(self, logger, config):
self.logger = logger
self.config = config
self.username = self.config.get('api', 'user')
self.password = self.config.get('api', 'password')
self.tenant_name = self.config.get('api', 'tenant')
self.endpoint_keystone = self.config.get('api',
'keystone_endpoints'
).split(',')
self.token = None
self.tenant_id = None
self.get_token()
def get_timeout(self, service):
try:
return int(self.config.get('api', '%s_timeout' % service))
except ConfigParser.NoOptionError:
return 1
def get_token(self):
data = json.dumps({
"auth":
{
'tenantName': self.tenant_name,
'passwordCredentials':
{
'username': self.username,
'password': <PASSWORD>
}
}
})
for keystone in self.endpoint_keystone:
self.logger.info("Trying to get token from '%s'" % keystone)
try:
request = urllib2.Request(
'%s/tokens' % keystone,
data=data,
headers={
'Content-type': 'application/json'
})
data = json.loads(
urllib2.urlopen(
request, timeout=self.get_timeout('keystone')).read())
self.token = data['access']['token']['id']
self.tenant_id = data['access']['token']['tenant']['id']
self.logger.debug("Got token '%s'" % self.token)
return
except Exception as e:
self.logger.debug("Got exception '%s'" % e)
self.logger.critical(0)
sys.exit(1)
def check_api(self, url, service):
self.logger.info("Trying '%s' on '%s'" % (service, url))
try:
request = urllib2.Request(url,
headers={
'X-Auth-Token': self.token,
})
urllib2.urlopen(request, timeout=self.get_timeout(service))
except Exception as e:
self.logger.debug("Got exception from '%s' '%s'" % (service, e))
self.logger.critical(0)
sys.exit(1)
self.logger.critical(1)
def main():
config = ConfigParser.RawConfigParser()
config.read(CONF_FILE)
logger = get_logger(config.get('api', 'log_level'))
API = OSAPI(logger, config)
if len(sys.argv) < 5:
logger.critical('No argvs, dunno what to do')
sys.exit(1)
map = config.get('api', '%s_map' % sys.argv[1])
url = '%s://%s:%s/%s' % (sys.argv[2], sys.argv[3], sys.argv[4], map)
url = url % API.__dict__
API.check_api(url, sys.argv[1])
if __name__ == "__main__":
main()
``` |
{
"source": "john-g-g/snitch-rules",
"score": 2
} |
#### File: john-g-g/snitch-rules/FOP.py
```python
VERSION = 3.8
# Import the key modules
import collections, filecmp, os, re, subprocess, sys
# Check the version of Python for language compatibility and subprocess.check_output()
MAJORREQUIRED = 3
MINORREQUIRED = 1
if sys.version_info < (MAJORREQUIRED, MINORREQUIRED):
raise RuntimeError(
"FOP requires Python {reqmajor}.{reqminor} or greater, but Python {ismajor}.{isminor} is being used to run this program.".format(
reqmajor=MAJORREQUIRED, reqminor=MINORREQUIRED, ismajor=sys.version_info.major,
isminor=sys.version_info.minor))
# Import a module only available in Python 3
from urllib.parse import urlparse
# Compile regular expressions to match important filter parts (derived from Wladimir Palant's Adblock Plus source code)
ELEMENTDOMAINPATTERN = re.compile(r"^([^\/\*\|\@\"\!]*?)#\@?#")
FILTERDOMAINPATTERN = re.compile(r"(?:\$|\,)domain\=([^\,\s]+)$")
ELEMENTPATTERN = re.compile(r"^([^\/\*\|\@\"\!]*?)(#\@?#)([^{}]+)$")
OPTIONPATTERN = re.compile(r"^(.*)\$(~?[\w\-]+(?:=[^,\s]+)?(?:,~?[\w\-]+(?:=[^,\s]+)?)*)$")
# Compile regular expressions that match element tags and pseudo classes and strings and tree selectors; "@" indicates either the beginning or the end of a selector
SELECTORPATTERN = re.compile(
r"(?<=[\s\[@])([a-zA-Z]*[A-Z][a-zA-Z0-9]*)((?=([\[\]\^\*\$=:@#\.]))|(?=(\s(?:[+>~]|\*|[a-zA-Z][a-zA-Z0-9]*[\[:@\s#\.]|[#\.][a-zA-Z][a-zA-Z0-9]*))))")
PSEUDOPATTERN = re.compile(r"(\:[a-zA-Z\-]*[A-Z][a-zA-Z\-]*)(?=([\(\:\@\s]))")
REMOVALPATTERN = re.compile(r"((?<=([>+~,]\s))|(?<=(@|\s|,)))(\*)(?=([#\.\[\:]))")
ATTRIBUTEVALUEPATTERN = re.compile(r"^([^\'\"\\]|\\.)*(\"(?:[^\"\\]|\\.)*\"|\'(?:[^\'\\]|\\.)*\')")
TREESELECTOR = re.compile(r"(\\.|[^\+\>\~\\\ \t])\s*([\+\>\~\ \t])\s*(\D)")
UNICODESELECTOR = re.compile(r"\\[0-9a-fA-F]{1,6}\s[a-zA-Z]*[A-Z]")
# Compile a regular expression that describes a completely blank line
BLANKPATTERN = re.compile(r"^\s*$")
# Compile a regular expression that validates commit comments
COMMITPATTERN = re.compile(r"^(A|M|P)\:\s(\((.+)\)\s)?(.*)$")
# List the files that should not be sorted, either because they have a special sorting system or because they are not filter files
IGNORE = (
"CC-BY-SA.txt", "easytest.txt", "GPL.txt", "MPL.txt", "filter.txt", "exclusions.txt", "general_extensions.txt",
"antiadblock.txt", "foreign.txt", "whitelist.txt", "general_js_api.txt", "whitelist_stealth.txt",
"enhancedstats-addon.txt", "fanboy-tracking", "firefox-regional", "other")
# List all Adblock Plus options (excepting domain, which is handled separately), as of version 1.3.9
KNOWNOPTIONS = ("document", "elemhide", "generichide", "genericblock",
"font", "image", "match-case", "object", "media", "protobuf",
"object-subrequest", "popup", "script", "websocket", "ther",
"stylesheet", "subdocument", "third-party", "xmlhttprequest",
"mp4", "urlblock", "empty", "jsinject", "content", "important")
# List the supported revision control system commands
REPODEF = collections.namedtuple("repodef",
"name, directory, locationoption, repodirectoryoption, checkchanges, difference, commit, pull, push")
GIT = REPODEF(["git"], "./.git/", "--work-tree=", "--git-dir=", ["status", "-s", "--untracked-files=no"], ["diff"],
["commit", "-m"], ["pull"], ["push"])
HG = REPODEF(["hg"], "./.hg/", "-R", None, ["stat", "-q"], ["diff"], ["commit", "-m"], ["pull"], ["push"])
REPOTYPES = (GIT, HG)
wait = input("PRESS ENTER TO START SORTING.")
def start():
""" Print a greeting message and run FOP in the directories
specified via the command line, or the current working directory if
no arguments have been passed."""
greeting = "FOP (Filter Orderer and Preener) version {version}".format(version=VERSION)
characters = len(str(greeting))
print("=" * characters)
print(greeting)
print("=" * characters)
# Convert the directory names to absolute references and visit each unique location
places = sys.argv[1:]
if places:
places = [os.path.abspath(place) for place in places]
for place in sorted(set(places)):
main(place)
print()
else:
main(os.getcwd())
def main(location):
""" Find and sort all the files in a given directory, committing
changes to a repository if one exists."""
# Check that the directory exists, otherwise return
if not os.path.isdir(location):
print("{location} does not exist or is not a folder.".format(location=location))
return
# Set the repository type based on hidden directories
repository = None
for repotype in REPOTYPES:
if os.path.isdir(os.path.join(location, repotype.directory)):
repository = repotype
break
# If this is a repository, record the initial changes; if this fails, give up trying to use the repository
if repository:
try:
basecommand = repository.name
if repository.locationoption.endswith("="):
basecommand.append(
"{locationoption}{location}".format(locationoption=repository.locationoption, location=location))
else:
basecommand.extend([repository.locationoption, location])
if repository.repodirectoryoption:
if repository.repodirectoryoption.endswith("="):
basecommand.append(
"{repodirectoryoption}{location}".format(repodirectoryoption=repository.repodirectoryoption,
location=os.path.normpath(
os.path.join(location, repository.directory))))
else:
basecommand.extend([repository.repodirectoryoption, location])
command = basecommand + repository.checkchanges
originaldifference = True if subprocess.check_output(command) else False
except(subprocess.CalledProcessError, OSError):
print(
"The command \"{command}\" was unable to run; FOP will therefore not attempt to use the repository tools. On Windows, this may be an indication that you do not have sufficient privileges to run FOP - the exact reason why is unknown. Please also ensure that your revision control system is installed correctly and understood by FOP.".format(
command=" ".join(command)))
repository = None
# Work through the directory and any subdirectories, ignoring hidden directories
print("\nPrimary location: {folder}".format(folder=os.path.join(os.path.abspath(location), "")))
for path, directories, files in os.walk(location):
for direct in directories[:]:
if direct.startswith(".") or direct in IGNORE:
directories.remove(direct)
print("Current directory: {folder}".format(folder=os.path.join(os.path.abspath(path), "")))
directories.sort()
for filename in sorted(files):
address = os.path.join(path, filename)
extension = os.path.splitext(filename)[1]
# Sort all text files that are not blacklisted
if extension == ".txt" and filename not in IGNORE:
fopsort(address)
# Delete unnecessary backups and temporary files
if extension == ".orig" or extension == ".temp":
try:
os.remove(address)
except(IOError, OSError):
# Ignore errors resulting from deleting files, as they likely indicate that the file has already been deleted
pass
# If in a repository, offer to commit any changes
if repository:
commit(repository, basecommand, originaldifference)
def fopsort(filename):
""" Sort the sections of the file and save any modifications."""
temporaryfile = "{filename}.temp".format(filename=filename)
CHECKLINES = 10
section = []
lineschecked = 1
filterlines = elementlines = 0
# Read in the input and output files concurrently to allow filters to be saved as soon as they are finished with
with open(filename, "r", encoding="utf-8", newline="\n") as inputfile, open(temporaryfile, "w", encoding="utf-8",
newline="\n") as outputfile:
# Combines domains for (further) identical rules
def combinefilters(uncombinedFilters, DOMAINPATTERN, domainseparator):
combinedFilters = []
for i in range(len(uncombinedFilters)):
domains1 = re.search(DOMAINPATTERN, uncombinedFilters[i])
if i + 1 < len(uncombinedFilters) and domains1:
domains2 = re.search(DOMAINPATTERN, uncombinedFilters[i + 1])
if not domains1 or i + 1 == len(uncombinedFilters) or not domains2 or len(
domains1.group(1)) == 0 or len(domains2.group(1)) == 0:
# last filter or filter didn't match regex or no domains
combinedFilters.append(uncombinedFilters[i])
elif domains1.group(0).replace(domains1.group(1), domains2.group(1), 1) != domains2.group(0):
# non-identical filters shouldn't be combined
combinedFilters.append(uncombinedFilters[i])
elif re.sub(DOMAINPATTERN, "", uncombinedFilters[i]) == re.sub(DOMAINPATTERN, "",
uncombinedFilters[i + 1]):
# identical filters. Try to combine them...
newDomains = "{d1}{sep}{d2}".format(d1=domains1.group(1), sep=domainseparator, d2=domains2.group(1))
newDomains = domainseparator.join(
sorted(set(newDomains.split(domainseparator)), key=lambda domain: domain.strip("~")))
if newDomains.count("~") > 0 and newDomains.count("~") != newDomains.count(domainseparator) + 1:
# skip combining rules with both included and excluded domains. It can go wrong in many ways and is not worth the code needed to do it correctly
combinedFilters.append(uncombinedFilters[i])
else:
domainssubstitute = domains1.group(0).replace(domains1.group(1), newDomains, 1)
uncombinedFilters[i + 1] = re.sub(DOMAINPATTERN, domainssubstitute, uncombinedFilters[i])
else:
# non-identical filters shouldn't be combined
combinedFilters.append(uncombinedFilters[i])
return combinedFilters
# Writes the filter lines to the file
def writefilters():
if elementlines > filterlines:
uncombinedFilters = sorted(set(section), key=lambda rule: re.sub(ELEMENTDOMAINPATTERN, "", rule))
outputfile.write("{filters}\n".format(
filters="\n".join(combinefilters(uncombinedFilters, ELEMENTDOMAINPATTERN, ","))))
else:
uncombinedFilters = sorted(set(section), key=str.lower)
outputfile.write("{filters}\n".format(
filters="\n".join(combinefilters(uncombinedFilters, FILTERDOMAINPATTERN, "|"))))
for line in inputfile:
minlength = 5
curentline = line[:-1]
linelength = len(curentline)
if linelength == 0:
continue
if (linelength < minlength and curentline[0] != "!"):
minlinetext = re.sub("^\s+|\n|\r|\s+$", '', curentline)
print("***Warning***: The line length \"{minlinetext}\" is less than {minlength} ".format(
minlinetext=minlinetext, minlength=minlength))
line = line.strip()
if not re.match(BLANKPATTERN, line):
# Include comments verbatim and, if applicable, sort the preceding section of filters and save them in the new version of the file
if line[0] == "!" or line[:8] == "%include" or line[0] == "[" and line[-1] == "]":
if section:
writefilters()
section = []
lineschecked = 1
filterlines = elementlines = 0
outputfile.write("{line}\n".format(line=line))
else:
# Neaten up filters and, if necessary, check their type for the sorting algorithm
elementparts = re.match(ELEMENTPATTERN, line)
if elementparts:
domains = elementparts.group(1).lower()
if lineschecked <= CHECKLINES:
elementlines += 1
lineschecked += 1
line = elementtidy(domains, elementparts.group(2), elementparts.group(3))
else:
if lineschecked <= CHECKLINES:
filterlines += 1
lineschecked += 1
line = filtertidy(line)
# Add the filter to the section
section.append(line)
# At the end of the file, sort and save any remaining filters
if section:
writefilters()
# Replace the existing file with the new one only if alterations have been made
if not filecmp.cmp(temporaryfile, filename):
# Check the operating system and, if it is Windows, delete the old file to avoid an exception (it is not possible to rename files to names already in use on this operating system)
if os.name == "nt":
os.remove(filename)
os.rename(temporaryfile, filename)
print("Sorted: {filename}".format(filename=os.path.abspath(filename)))
else:
os.remove(temporaryfile)
def filtertidy(filterin):
""" Sort the options of blocking filters and make the filter text
lower case if applicable."""
optionsplit = re.match(OPTIONPATTERN, filterin)
if not optionsplit:
# Remove unnecessary asterisks from filters without any options and return them
return removeunnecessarywildcards(filterin)
else:
# If applicable, separate and sort the filter options in addition to the filter text
filtertext = removeunnecessarywildcards(optionsplit.group(1))
optionlist = optionsplit.group(2).lower().replace("_", "-").split(",")
domainlist = []
removeentries = []
for option in optionlist:
# Detect and separate domain options
if option[0:7] == "domain=":
domainlist.extend(option[7:].split("|"))
removeentries.append(option)
elif option.strip("~") not in KNOWNOPTIONS:
isReplace = len([i for i in optionlist if "replace=" in i]) > 0
isProtoBuf =len([i for i in optionlist if "protobuf=" in i]) > 0
isApp =len([i for i in optionlist if "app=" in i]) > 0
if (isReplace or isProtoBuf or isApp):
if (isReplace):
optionlist = optionsplit.group(2).replace("_", "-").split(",")
if (isApp):
optionlist = optionsplit.group(2).split(",")
else:
print(
"Warning: The option \"{option}\" used on the filter \"{problemfilter}\" is not recognised by FOP".format(
option=option, problemfilter=filterin))
# Sort all options other than domain alphabetically
# For identical options, the inverse always follows the non-inverse option ($image,~image instead of $~image,image)
optionlist = sorted(set(filter(lambda option: option not in removeentries, optionlist)),
key=lambda option: (option[1:] + "~") if option[0] == "~" else option)
# If applicable, sort domain restrictions and append them to the list of options
if domainlist:
optionlist.append("domain={domainlist}".format(
domainlist="|".join(sorted(set(domainlist), key=lambda domain: domain.strip("~")))))
# Return the full filter
return "{filtertext}${options}".format(filtertext=filtertext, options=",".join(optionlist))
def elementtidy(domains, separator, selector):
""" Sort the domains of element hiding rules, remove unnecessary
tags and make the relevant sections of the rule lower case."""
# Order domain names alphabetically, ignoring exceptions
if "," in domains:
domains = ",".join(sorted(set(domains.split(",")), key=lambda domain: domain.strip("~")))
# Mark the beginning and end of the selector with "@"
selector = "@{selector}@".format(selector=selector)
each = re.finditer
# Make sure we don't match items in strings (e.g., don't touch Width in ##[style="height:1px; Width: 123px;"])
selectorwithoutstrings = selector
selectoronlystrings = ""
while True:
stringmatch = re.match(ATTRIBUTEVALUEPATTERN, selectorwithoutstrings)
if stringmatch == None: break
selectorwithoutstrings = selectorwithoutstrings.replace(
"{before}{stringpart}".format(before=stringmatch.group(1), stringpart=stringmatch.group(2)),
"{before}".format(before=stringmatch.group(1)), 1)
selectoronlystrings = "{old}{new}".format(old=selectoronlystrings, new=stringmatch.group(2))
# Clean up tree selectors
for tree in each(TREESELECTOR, selector):
if tree.group(0) in selectoronlystrings or not tree.group(0) in selectorwithoutstrings: continue
replaceby = " {g2} ".format(g2=tree.group(2))
if replaceby == " ": replaceby = " "
selector = selector.replace(tree.group(0), "{g1}{replaceby}{g3}".format(g1=tree.group(1), replaceby=replaceby,
g3=tree.group(3)), 1)
# Remove unnecessary tags
for untag in each(REMOVALPATTERN, selector):
untagname = untag.group(4)
if untagname in selectoronlystrings or not untagname in selectorwithoutstrings: continue
bc = untag.group(2)
if bc == None:
bc = untag.group(3)
ac = untag.group(5)
selector = selector.replace("{before}{untag}{after}".format(before=bc, untag=untagname, after=ac),
"{before}{after}".format(before=bc, after=ac), 1)
# Make the remaining tags lower case wherever possible
for tag in each(SELECTORPATTERN, selector):
tagname = tag.group(1)
if tagname in selectoronlystrings or not tagname in selectorwithoutstrings: continue
if re.search(UNICODESELECTOR, selectorwithoutstrings) != None: break
ac = tag.group(3)
if ac == None:
ac = tag.group(4)
selector = selector.replace("{tag}{after}".format(tag=tagname, after=ac),
"{tag}{after}".format(tag=tagname.lower(), after=ac), 1)
# Make pseudo classes lower case where possible
for pseudo in each(PSEUDOPATTERN, selector):
pseudoclass = pseudo.group(1)
if pseudoclass in selectoronlystrings or not pseudoclass in selectorwithoutstrings: continue
ac = pseudo.group(3)
selector = selector.replace("{pclass}{after}".format(pclass=pseudoclass, after=ac),
"{pclass}{after}".format(pclass=pseudoclass.lower(), after=ac), 1)
# Remove the markers from the beginning and end of the selector and return the complete rule
return "{domain}{separator}{selector}".format(domain=domains, separator=separator, selector=selector[1:-1])
def commit(repository, basecommand, userchanges):
""" Commit changes to a repository using the commands provided."""
difference = subprocess.check_output(basecommand + repository.difference)
if not difference:
print("\nNo changes have been recorded by the repository.")
return
print("\nThe following changes have been recorded by the repository:")
try:
print(difference.decode("utf-8"))
except UnicodeEncodeError:
print("\nERROR: DIFF CONTAINED UNKNOWN CHARACTER(S). Showing unformatted diff instead:\n");
print(difference)
try:
# Persistently request a suitable comment
while True:
comment = input("Please enter a valid commit comment or quit:\n")
if checkcomment(comment, userchanges):
break
# Allow users to abort the commit process if they do not approve of the changes
except (KeyboardInterrupt, SystemExit):
print("\nCommit aborted.")
return
print("Comment \"{comment}\" accepted.".format(comment=comment))
try:
# Commit the changes
command = basecommand + repository.commit + [comment]
subprocess.Popen(command).communicate()
print("\nConnecting to server. Please enter your password if required.")
# Update the server repository as required by the revision control system
for command in repository[7:]:
command = basecommand + command
subprocess.Popen(command).communicate()
print()
except(subprocess.CalledProcessError):
print("Unexpected error with the command \"{command}\".".format(command=command))
raise subprocess.CalledProcessError("Aborting FOP.")
except(OSError):
print("Unexpected error with the command \"{command}\".".format(command=command))
raise OSError("Aborting FOP.")
print("Completed commit process successfully.")
def isglobalelement(domains):
""" Check whether all domains are negations."""
for domain in domains.split(","):
if domain and not domain.startswith("~"):
return False
return True
def removeunnecessarywildcards(filtertext):
""" Where possible, remove unnecessary wildcards from the beginnings
and ends of blocking filters."""
whitelist = False
hadStar = False
if filtertext[0:2] == "@@":
whitelist = True
filtertext = filtertext[2:]
while len(filtertext) > 1 and filtertext[0] == "*" and not filtertext[1] == "|" and not filtertext[1] == "!":
filtertext = filtertext[1:]
hadStar = True
while len(filtertext) > 1 and filtertext[-1] == "*" and not filtertext[-2] == "|":
filtertext = filtertext[:-1]
hadStar = True
if hadStar and filtertext[0] == "/" and filtertext[-1] == "/":
filtertext = "{filtertext}*".format(filtertext=filtertext)
if filtertext == "*":
filtertext = ""
if whitelist:
filtertext = "@@{filtertext}".format(filtertext=filtertext)
return filtertext
def checkcomment(comment, changed):
""" Check the commit comment and return True if the comment is
acceptable and False if it is not."""
sections = re.match(COMMITPATTERN, comment)
if sections == None:
print("The comment \"{comment}\" is not in the recognised format.".format(comment=comment))
else:
indicator = sections.group(1)
if indicator == "M":
# Allow modification comments to have practically any format
return True
elif indicator == "A" or indicator == "P":
if not changed:
print(
"You have indicated that you have added or removed a rule, but no changes were initially noted by the repository.")
else:
address = sections.group(4)
if not validurl(address):
print("Unrecognised address \"{address}\".".format(address=address))
else:
# The user has changed the subscription and has written a suitable comment message with a valid address
return True
print()
return False
def validurl(url):
""" Check that an address has a scheme (e.g. http), a domain name
(e.g. example.com) and a path (e.g. /), or relates to the internal
about system."""
addresspart = urlparse(url)
if addresspart.scheme and addresspart.netloc and addresspart.path:
return True
elif addresspart.scheme == "about":
return True
else:
return False
if __name__ == '__main__':
start()
print("DONE")
wait = input("PRESS ENTER TO EXIT.")
``` |
{
"source": "johngian/iam-profile-faker",
"score": 3
} |
#### File: iam-profile-faker/iam_profile_faker/factory.py
```python
import json
import random
from faker import Faker
class IAMFaker(object):
def __init__(self, locale=None):
self.fake = Faker(locale)
def schema(self):
"""Profile v2 schema faker."""
return 'https://person-api.sso.mozilla.com/schema/v2/profile'
def login_method(self):
"""Profile v2 login_method faker."""
login_methods = [
'email', 'github', 'google-oauth2', 'ad|Mozilla-LDAP', 'oauth2|firefoxaccounts'
]
return random.choice(login_methods)
def user_id(self, login_method=None):
"""Profile v2 user_id attribute faker."""
user_ids = [
'email|{}'.format(self.fake.pystr(min_chars=24, max_chars=24)),
'github|{}'.format(self.fake.pyint()),
'google-oauth2|{}'.format(self.fake.pyint()),
'ad|Mozilla-LDAP|{}'.format(self.fake.user_name()),
'oauth2|firefoxaccounts|{}'.format(self.fake.pystr(min_chars=32, max_chars=32))
]
if login_method:
for uid in user_ids:
if uid.startswith(login_method):
return uid
return random.choice(user_ids)
def usernames(self):
"""Profile v2 usernames faker."""
values = {}
for _ in range(random.randint(0, 5)):
values[self.fake.slug()] = self.fake.user_name()
return values
def identities(self):
"""Profile v2 identities faker."""
values = {}
for _ in range(random.randint(0, 5)):
values[self.fake.slug()] = self.fake.uri()
return values
def ssh_public_keys(self):
"""Profile v2 public SSH key faker."""
values = {}
for _ in range(random.randint(0, 5)):
content = self.fake.pystr(min_chars=250, max_chars=500)
email = self.fake.email()
values[self.fake.slug()] = 'ssh-rsa {} {}'.format(content, email)
return values
def pgp_public_keys(self):
"""Profile v2 public PGP key faker."""
values = {}
for _ in range(random.randint(0, 5)):
pgp_key = '-----BEGIN PGP PUBLIC KEY BLOCK-----\n\n'
pgp_key += self.fake.pystr(min_chars=250, max_chars=500)
pgp_key += '\n-----END PGP PUBLIC KEY BLOCK-----\n'
values[self.fake.slug()] = pgp_key
return values
def access_level(self):
"""Profile v2 access level faker."""
values = {}
for publisher in ['ldap', 'mozilliansorg']:
values[publisher] = {}
for _ in range(random.randint(0, 5)):
values[publisher][self.fake.slug()] = self.fake.pybool()
return values
def office_location(self):
"""Profile v2 office location faker."""
locations = [
'Berlin', 'Paris', 'London', 'Toronto', 'Mountain View',
'San Francisco', 'Vancouver', 'Portland', 'Beijing', 'Taipei'
]
return random.choice(locations)
def preferred_languages(self):
"""Profile v2 preferred languages faker."""
values = []
for _ in range(random.randint(0, 5)):
values.append(self.fake.language_code())
return values
def pronouns(self):
"""Profile v2 pronouns faker."""
return random.choice([None, 'he/him', 'she/her', 'they/them'])
def uris(self):
"""Profile v2 URIs faker."""
values = {}
for _ in range(random.randint(0, 5)):
values[self.fake.slug()] = self.fake.uri()
return values
def phone_numbers(self):
"""Profile v2 phone_numbers faker."""
values = {}
for _ in range(random.randint(0, 5)):
values[self.fake.slug()] = self.fake.phone_number()
return values
def create(self):
"""Method to generate fake profile v2 objects."""
login_method = self.login_method()
created = self.fake.date_time()
last_modified = self.fake.date_time_between_dates(datetime_start=created)
obj = {
'$schema': self.schema(),
'login_method': login_method,
'user_id': self.user_id(login_method=login_method),
'active': self.fake.pybool(),
'last_modified': last_modified.isoformat(),
'created': created.isoformat(),
'usernames': self.usernames(),
'first_name': self.fake.first_name(),
'last_name': self.fake.last_name(),
'primary_email': self.fake.email(),
'identities': self.identities(),
'ssh_public_keys': self.ssh_public_keys(),
'pgp_public_keys': self.pgp_public_keys(),
'access_information': self.access_level(),
'fun_title': self.fake.sentence(),
'description': self.fake.paragraph(),
'location_preference': self.fake.country(),
'office_location': self.office_location(),
'timezone': self.fake.timezone(),
'preferred_languages': self.preferred_languages(),
'tags': self.fake.words(),
'pronouns': self.pronouns(),
'picture': self.fake.image_url(),
'uris': self.uris(),
'phone_numbers': self.phone_numbers(),
'alternative_name': self.fake.name()
}
return obj
class V2ProfileFactory(object):
def create(self, export_json=False):
"""Generate fake profile v2 object."""
faker = IAMFaker()
output = faker.create()
if export_json:
return json.dumps(output)
return output
def create_batch(self, count, export_json=False):
"""Generate batch fake profile v2 objects."""
faker = IAMFaker()
batch = []
for _ in range(count):
obj = faker.create()
batch.append(obj)
if export_json:
return json.dumps(batch)
return batch
``` |
{
"source": "johngian/mozillians",
"score": 2
} |
#### File: mozillians/common/authbackend.py
```python
import base64
import hashlib
import re
from django.db import transaction
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from mozilla_django_oidc.auth import OIDCAuthenticationBackend
from mozillians.common.templatetags.helpers import get_object_or_none
from mozillians.users.models import IdpProfile
from mozillians.users.tasks import send_userprofile_to_cis
# Only allow the following login flows
# Passwordless > Google > Github, FxA > LDAP
# There is no way to downgrade
ALLOWED_IDP_FLOWS = {
IdpProfile.PROVIDER_PASSWORDLESS: IdpProfile.MFA_ACCOUNTS + [
IdpProfile.PROVIDER_PASSWORDLESS,
IdpProfile.PROVIDER_GOOGLE,
],
IdpProfile.PROVIDER_GOOGLE: IdpProfile.MFA_ACCOUNTS + [
IdpProfile.PROVIDER_PASSWORDLESS,
IdpProfile.PROVIDER_GOOGLE,
],
IdpProfile.PROVIDER_GITHUB: IdpProfile.MFA_ACCOUNTS,
IdpProfile.PROVIDER_FIREFOX_ACCOUNTS: IdpProfile.MFA_ACCOUNTS,
IdpProfile.PROVIDER_LDAP: [
IdpProfile.PROVIDER_LDAP
]
}
def calculate_username(email):
"""Calculate username from email address."""
email = email.split('@')[0]
username = re.sub(r'[^\w.@+-]', '-', email)
username = username[:settings.USERNAME_MAX_LENGTH]
suggested_username = username
count = 0
while User.objects.filter(username=suggested_username).exists():
count += 1
suggested_username = '%s%d' % (username, count)
if len(suggested_username) > settings.USERNAME_MAX_LENGTH:
# We failed to calculate a name for you, default to a
# email digest.
return base64.urlsafe_b64encode(hashlib.sha1(email).digest()).rstrip('=')
return suggested_username
class MozilliansAuthBackend(OIDCAuthenticationBackend):
"""Override OIDCAuthenticationBackend to provide custom functionality."""
def create_user(self, claims):
user = super(MozilliansAuthBackend, self).create_user(claims)
# Ensure compatibility with OIDC conformant mode
auth0_user_id = claims.get('user_id') or claims.get('sub')
IdpProfile.objects.create(
profile=user.userprofile,
auth0_user_id=auth0_user_id,
email=claims.get('email'),
primary=True
)
return user
def filter_users_by_claims(self, claims):
"""Override default method to store claims."""
self.claims = claims
users = super(MozilliansAuthBackend, self).filter_users_by_claims(claims)
# Checking the primary email returned 0 users,
# before creating a new user we should check if the identity returned exists
if not users:
# Ensure compatibility with OIDC conformant mode
auth0_user_id = claims.get('user_id') or claims.get('sub')
idps = IdpProfile.objects.filter(auth0_user_id=auth0_user_id)
user_ids = idps.values_list('profile__user__id', flat=True).distinct()
return self.UserModel.objects.filter(id__in=user_ids)
return users
def check_authentication_method(self, user):
"""Check which Identity is used to login.
This method, depending on the current status of the IdpProfile
of a user, enforces MFA logins and creates the IdpProfiles.
Returns the object (user) it was passed unchanged.
"""
if not user:
return None
profile = user.userprofile
# Ensure compatibility with OIDC conformant mode
auth0_user_id = self.claims.get('user_id') or self.claims.get('sub')
email = self.claims.get('email')
# Get current_idp
current_idp = get_object_or_none(IdpProfile, profile=profile, primary=True)
# Get or create new `user_id`
obj, _ = IdpProfile.objects.get_or_create(
profile=profile,
email=email,
auth0_user_id=auth0_user_id)
# Update/Save the Github username
if 'github|' in auth0_user_id:
obj.username = self.claims.get('nickname', '')
obj.save()
# Do not allow downgrades.
if current_idp and obj.type < current_idp.type:
msg = u'Please use {0} as the login method to authenticate'
messages.error(self.request, msg.format(current_idp.get_type_display()))
return None
# Mark other `user_id` as `primary=False`
idp_q = IdpProfile.objects.filter(profile=profile)
with transaction.atomic():
idp_q.exclude(auth0_user_id=auth0_user_id, email=email).update(primary=False)
# Mark current `user_id` as `primary=True`
idp_q.filter(auth0_user_id=auth0_user_id, email=email).update(primary=True)
# Update CIS
send_userprofile_to_cis.delay(profile.pk)
return user
def authenticate(self, **kwargs):
"""Override default method to add multiple Identity Profiles in an account."""
user = super(MozilliansAuthBackend, self).authenticate(**kwargs)
return self.check_authentication_method(user)
```
#### File: mozillians/graphql/views.py
```python
from django.http import Http404
from django.views.decorators.csrf import csrf_exempt
import waffle
from graphene_django.views import GraphQLView
class MozilliansGraphQLView(GraphQLView):
"""Class Based View to handle GraphQL requests."""
@csrf_exempt
def dispatch(self, *args, **kwargs):
"""Override dispatch method to allow the use of multiple decorators."""
if not waffle.flag_is_active(self.request, 'enable_graphql'):
raise Http404()
return super(MozilliansGraphQLView, self).dispatch(*args, **kwargs)
``` |
{
"source": "johngian/remo",
"score": 2
} |
#### File: profiles/api/views.py
```python
from collections import namedtuple
from datetime import datetime, timedelta
from django.db.models import Count, Q
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.utils.timezone import now
import django_filters
from rest_framework.views import APIView
from rest_framework.viewsets import ReadOnlyModelViewSet
from rest_framework.response import Response
from remo.profiles.api.serializers import (PeopleKPISerializer,
UserProfileDetailedSerializer,
UserSerializer)
from remo.base.utils import get_quarter
from remo.profiles.models import UserProfile
KPI_WEEKS = 12
# Number of activities
CORE = 4
ACTIVE = 1
CASUAL = 1
INACTIVE = 0
# Activities Thresholds:
# The max period in which we are looking to find reports for a user
# both in past and future, measured in weeks
CASUAL_INACTIVE = 4
ACTIVE_CORE = 2
class UserProfileFilter(django_filters.FilterSet):
groups = django_filters.CharFilter(name='groups__name')
functional_areas = django_filters.CharFilter(
name='userprofile__functional_areas__name')
mentor = django_filters.CharFilter(name='userprofile__mentor')
city = django_filters.CharFilter(name='userprofile__city')
region = django_filters.CharFilter(name='userprofile__region')
country = django_filters.CharFilter(name='userprofile__country')
twitter = django_filters.CharFilter(name='userprofile__twitter_account')
jabber = django_filters.CharFilter(name='userprofile__jabber_id')
irc_name = django_filters.CharFilter(name='userprofile__irc_name')
wiki_profile = django_filters.CharFilter(
name='userprofile__wiki_profile_url')
irc_channels = django_filters.CharFilter(name='userprofile__irc_channels')
linkedin = django_filters.CharFilter(name='userprofile__linkedin_url')
facebook = django_filters.CharFilter(name='userprofile__facebook_url')
diaspora = django_filters.CharFilter(name='userprofile__diaspora_url')
class Meta:
model = User
fields = ('first_name', 'last_name')
class UserProfileViewSet(ReadOnlyModelViewSet):
"""Returns a list of Reps profiles."""
serializer_class = UserSerializer
model = User
queryset = User.objects.all()
filter_class = UserProfileFilter
def get_queryset(self):
queryset = self.queryset.filter(
groups__name='Rep', userprofile__registration_complete=True)
return queryset
def retrieve(self, request, pk):
user = get_object_or_404(self.get_queryset(), pk=pk)
serializer = UserProfileDetailedSerializer(
user.userprofile, context={'request': request})
return Response(serializer.data)
class PeopleKPIFilter(django_filters.FilterSet):
"""Filter for People KPI end-point."""
category = django_filters.CharFilter(
name='userprofile__functional_areas__name')
initiative = django_filters.CharFilter(
name='ng_reports__campaign__name')
country = django_filters.CharFilter(name='userprofile__country')
class Meta:
model = UserProfile
fields = ['country', 'category', 'initiative']
class PeopleKPIView(APIView):
def get(self, request):
"""Returns serialized data for People KPI."""
queryset = User.objects.filter(groups__name='Rep',
userprofile__registration_complete=True)
people = PeopleKPIFilter(request.query_params, queryset=queryset)
weeks = int(request.query_params.get('weeks', KPI_WEEKS))
# Total number of Reps
total = people.count()
# Total Reps added in the last quarter
joined_date = get_quarter()[1]
quarter_total = people.qs.filter(
userprofile__date_joined_program__gte=joined_date).count()
# Current quarter start
current_quarter_start = get_quarter()[1]
# Total Reps joined the previous quarter
previous_quarter_end = current_quarter_start - timedelta(days=1)
previous_quarter_start = get_quarter(previous_quarter_end)[1]
total_reps_range = [previous_quarter_start, previous_quarter_end]
previous_quarter_total = people.qs.filter(
userprofile__date_joined_program__range=total_reps_range).count()
diff = quarter_total - previous_quarter_total
try:
# Percentage change of events compared with previous week
quarter_ratio = diff/float(previous_quarter_total) * 100
except ZeroDivisionError:
if diff > 0:
quarter_ratio = 100
else:
quarter_ratio = 0
# Total Reps added this week
today = datetime.combine(now().date(), datetime.min.time())
current_week_start = today - timedelta(days=now().weekday())
prev_week_start = current_week_start - timedelta(weeks=1)
week_total = people.qs.filter(
userprofile__date_joined_program__gte=current_week_start).count()
# Total Reps added the previous week
query_range = [prev_week_start, current_week_start]
prev_week_total = people.qs.filter(
userprofile__date_joined_program__range=query_range).count()
diff = week_total - prev_week_total
try:
# Percentage change of events compared with previous week
week_ratio = diff/float(prev_week_total) * 100
except ZeroDivisionError:
if diff > 0:
week_ratio = 100
else:
week_ratio = 0
weekly_count = []
for i in range(weeks):
start = current_week_start - timedelta(weeks=i)
end = start + timedelta(weeks=1)
# Total number of reps (per week) for the past 6 weeks
count = people.qs.filter(
userprofile__date_joined_program__range=[start, end]).count()
weekly_count.append({'week': weeks-i, 'people': count})
# Get the number of reports for each user.
# Activity metrics:
# Inactive: No activity within 8 weeks (4 past, 4 future)
# Casual: 1 activity within 8 weeks (4 past, 4 future)
# Active: 1 activity within 4 weeks (2 past, 2 future)
# Core: 4 activities within 4 weeks (2 past, 2 future)
def get_activity_query(query, start_date=None, offset=0, invert=False):
if not start_date:
start_date = today
date_range = [start_date - timedelta(weeks=offset),
start_date + timedelta(weeks=offset)]
q_args = Q(ng_reports__report_date__range=date_range)
if invert:
q_args = ~q_args
return (query.filter(q_args).distinct()
.annotate(num_reports=Count('ng_reports')))
core_active_query = get_activity_query(people.qs, offset=ACTIVE_CORE)
# Active contributors - 8 weeks
active_contributors = core_active_query.filter(num_reports__gte=ACTIVE,
num_reports__lt=CORE)
num_active = active_contributors.count()
# Core contributors - 8 weeks
num_core = core_active_query.filter(num_reports__gte=CORE).count()
# Inactive contributors - 16 weeks
num_inactive = get_activity_query(people.qs,
offset=CASUAL_INACTIVE,
invert=True).count()
# Casual contributors
active_ids = core_active_query.values_list('id', flat=True)
num_casual = (get_activity_query(people.qs,
offset=CASUAL_INACTIVE)
.exclude(id__in=active_ids).count())
weekly_contribution = []
for i in range(weeks):
start = current_week_start - timedelta(weeks=i)
# Conversion points per week
core_active_query = get_activity_query(people.qs,
start_date=start,
offset=ACTIVE_CORE)
# Active contributors
active_contributors = core_active_query.filter(
num_reports__gte=ACTIVE, num_reports__lt=CORE)
active_weekly = active_contributors.count()
# Core contributors
core_weekly = core_active_query.filter(
num_reports__gte=CORE).count()
# Inactive contributors
inactive_weekly = get_activity_query(people.qs,
start_date=start,
offset=CASUAL_INACTIVE,
invert=True).count()
# Casual contributors
active_ids = core_active_query.values_list('id', flat=True)
casual_weekly = (get_activity_query(people.qs,
start_date=start,
offset=CASUAL_INACTIVE)
.exclude(id__in=active_ids).count())
weekly_contribution.append({'week': weeks-i,
'core': core_weekly,
'active': active_weekly,
'casual': casual_weekly,
'inactive': inactive_weekly
})
kwargs = {
'total': total,
'quarter_total': quarter_total,
'quarter_growth_percentage': quarter_ratio,
'week_total': week_total,
'week_growth_percentage': week_ratio,
'total_per_week': weekly_contribution,
'inactive_week': num_inactive,
'casual_week': num_casual,
'active_week': num_active,
'core_week': num_core
}
kpi = namedtuple('PeopleKPI', kwargs.keys())(*kwargs.values())
serializer = PeopleKPISerializer(kpi)
return Response(serializer.data)
```
#### File: django_browserid/tests/test_helpers.py
```python
from django.utils.functional import lazy
from mock import patch
from nose.tools import eq_
from django_browserid import helpers
from django_browserid.tests import TestCase
def _lazy_request_args():
return {'siteName': 'asdf'}
lazy_request_args = lazy(_lazy_request_args, dict)
class BrowserIDInfoTests(TestCase):
def setUp(self):
patcher = patch('django_browserid.helpers.render_to_string')
self.addCleanup(patcher.stop)
self.render_to_string = patcher.start()
def test_defaults(self):
with self.settings(BROWSERID_REQUEST_ARGS={'foo': 'bar', 'baz': 1}):
output = helpers.browserid_info()
eq_(output, self.render_to_string.return_value)
expected_info = {
'loginUrl': '/browserid/login/',
'logoutUrl': '/browserid/logout/',
'csrfUrl': '/browserid/csrf/',
'requestArgs': {'foo': 'bar', 'baz': 1},
}
self.render_to_string.assertCalledWith('browserid/info.html', {'info': expected_info})
def test_lazy_request_args(self):
with self.settings(BROWSERID_REQUEST_ARGS=lazy_request_args()):
output = helpers.browserid_info()
eq_(output, self.render_to_string.return_value)
expected_info = {
'loginUrl': '/browserid/login/',
'logoutUrl': '/browserid/logout/',
'csrfUrl': '/browserid/csrf/',
'requestArgs': {'siteName': 'asdf'},
}
self.render_to_string.assertCalledWith('browserid/info.html', {'info': expected_info})
class BrowserIDJSTests(TestCase):
def test_basic(self):
output = helpers.browserid_js()
self.assertHTMLEqual(output, """
<script type="text/javascript" src="https://login.persona.org/include.js"></script>
<script type="text/javascript" src="static/browserid/api.js"></script>
<script type="text/javascript" src="static/browserid/browserid.js"></script>
""")
def test_no_shim(self):
output = helpers.browserid_js(include_shim=False)
self.assertHTMLEqual(output, """
<script type="text/javascript" src="static/browserid/api.js"></script>
<script type="text/javascript" src="static/browserid/browserid.js"></script>
""")
def test_custom_shim(self):
with self.settings(BROWSERID_SHIM='http://example.com/test.js'):
output = helpers.browserid_js()
self.assertHTMLEqual(output, """
<script type="text/javascript" src="http://example.com/test.js"></script>
<script type="text/javascript" src="static/browserid/api.js"></script>
<script type="text/javascript" src="static/browserid/browserid.js"></script>
""")
def test_autologin_email(self):
"""
If BROWSERID_AUTOLOGIN_ENABLED is True, do not include the shim
and include the autologin mock script.
"""
with self.settings(BROWSERID_AUTOLOGIN_ENABLED=True):
output = helpers.browserid_js()
self.assertHTMLEqual(output, """
<script type="text/javascript" src="static/browserid/api.js"></script>
<script type="text/javascript" src="static/browserid/autologin.js"></script>
<script type="text/javascript" src="static/browserid/browserid.js"></script>
""")
class BrowserIDCSSTests(TestCase):
def test_basic(self):
output = helpers.browserid_css()
self.assertHTMLEqual(output, """
<link rel="stylesheet" href="static/browserid/persona-buttons.css" />
""")
class BrowserIDButtonTests(TestCase):
def test_basic(self):
button = helpers.browserid_button(text='asdf', next='1234', link_class='fake-button',
href="/test", attrs={'target': '_blank'})
self.assertHTMLEqual(button, """
<a href="/test" class="fake-button" data-next="1234" target="_blank">
<span>asdf</span>
</a>
""")
def test_json_attrs(self):
button = helpers.browserid_button(text='qwer', next='5678', link_class='fake-button',
attrs='{"target": "_blank"}')
self.assertHTMLEqual(button, """
<a href="#" class="fake-button" data-next="5678" target="_blank">
<span>qwer</span>
</a>
""")
class BrowserIDLoginTests(TestCase):
def test_login_class(self):
with self.settings(LOGIN_REDIRECT_URL='/'):
button = helpers.browserid_login(link_class='go button')
self.assertHTMLEqual(button, """
<a href="#" class="go button browserid-login" data-next="/">
<span>Sign in</span>
</a>
""")
def test_default_class(self):
"""
If no class is provided, it should default to
'browserid-login persona-button'.
"""
with self.settings(LOGIN_REDIRECT_URL='/'):
button = helpers.browserid_login()
self.assertHTMLEqual(button, """
<a href="#" class="browserid-login persona-button" data-next="/">
<span>Sign in</span>
</a>
""")
def test_color_class(self):
with self.settings(LOGIN_REDIRECT_URL='/'):
button = helpers.browserid_login(color='dark')
self.assertHTMLEqual(button, """
<a href="#" class="browserid-login persona-button dark" data-next="/">
<span>Sign in</span>
</a>
""")
def test_color_custom_class(self):
"""
If using a color and a custom link class, persona-button should
be added to the link class.
"""
with self.settings(LOGIN_REDIRECT_URL='/'):
button = helpers.browserid_login(link_class='go button', color='dark')
self.assertHTMLEqual(button, """
<a href="#" class="go button browserid-login persona-button dark" data-next="/">
<span>Sign in</span>
</a>
""")
def test_next(self):
button = helpers.browserid_login(next='/foo/bar')
self.assertHTMLEqual(button, """
<a href="#" class="browserid-login persona-button" data-next="/foo/bar">
<span>Sign in</span>
</a>
""")
def test_next_default(self):
"""next should default to LOGIN_REDIRECT_URL"""
with self.settings(LOGIN_REDIRECT_URL='/foo/bar'):
button = helpers.browserid_login()
self.assertHTMLEqual(button, """
<a href="#" class="browserid-login persona-button" data-next="/foo/bar">
<span>Sign in</span>
</a>
""")
class BrowserIDLogoutTests(TestCase):
def test_logout_class(self):
with self.settings(LOGOUT_REDIRECT_URL='/'):
button = helpers.browserid_logout(link_class='go button')
self.assertHTMLEqual(button, """
<a href="/browserid/logout/" class="go button browserid-logout" data-next="/">
<span>Sign out</span>
</a>
""")
def test_next(self):
button = helpers.browserid_logout(next='/foo/bar')
self.assertHTMLEqual(button, """
<a href="/browserid/logout/" class="browserid-logout" data-next="/foo/bar">
<span>Sign out</span>
</a>
""")
def test_next_default(self):
"""next should default to LOGOUT_REDIRECT_URL"""
with self.settings(LOGOUT_REDIRECT_URL='/foo/bar'):
button = helpers.browserid_logout()
self.assertHTMLEqual(button, """
<a href="/browserid/logout/" class="browserid-logout" data-next="/foo/bar">
<span>Sign out</span>
</a>
""")
``` |
{
"source": "JohnGiorgi/allennlp-multi-label-classification",
"score": 2
} |
#### File: allennlp-multi-label-classification/tests/test_model.py
```python
from pathlib import Path
import pytest
from allennlp.common.testing import ModelTestCase
class TestMultiLabelClassifier(ModelTestCase):
def setup_method(self):
super().setup_method()
# We need to override the path set by AllenNLP
self.FIXTURES_ROOT = Path("tests/fixtures")
self.set_up_model(
self.FIXTURES_ROOT / "experiment.jsonnet",
self.FIXTURES_ROOT / "data" / "reuters-21578" / "train.jsonl",
)
def test_forward_pass_runs_correctly(self):
training_tensors = self.dataset.as_tensor_dict()
output_dict = self.model(**training_tensors)
output_dict = self.model.make_output_human_readable(output_dict)
assert "logits" in output_dict.keys()
assert "probs" in output_dict.keys()
assert "labels" in output_dict.keys()
@pytest.mark.skip(reason="takes far too long to run")
def test_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
``` |
{
"source": "JohnGiorgi/ChemProt-to-Standoff",
"score": 3
} |
#### File: JohnGiorgi/ChemProt-to-Standoff/chemprot_to_standoff.py
```python
import argparse
import errno
import os
import re
from glob import glob
def main(**kwargs):
"""Converts the ChemProt corpus to a Brat-flavoured Standoff format.
"""
make_dir(kwargs['output'])
for filepath in glob(os.path.join(kwargs['input'], '*.tsv')):
filename = os.path.splitext(filepath)[0]
# Convert abstract file
if re.search('abstracts', filename):
with open(filepath, 'r') as in_file:
converted_abstracts = convert_abstracts_to_standoff(in_file)
write_to_disk(converted_abstracts, ext='txt')
# Convert entity file
elif re.search('entities', filename):
with open(filepath, 'r') as in_file:
converted_entities = convert_entities_to_standoff(in_file)
# Convert relation file
elif re.search('gold_standard', filename):
with open(filepath, 'r') as in_file:
converted_relations = convert_relations_to_standoff(in_file)
# Concat converted entities and relations so we can save one dict to disk
for pmid in converted_entities:
if pmid in converted_relations:
converted_entities[pmid] = f'{converted_entities[pmid]}\n{converted_relations[pmid]}'
write_to_disk(converted_entities, ext='ann')
def convert_abstracts_to_standoff(f):
"""Given a file open for reading, returns a dict containing abstract texts keyed by PMID.
Given a `*_abstracts.tsv` file from the ChemProt corpus which is open for reading, returns
a dictionary containing the abstract texts, keyed by PubMed IDs (PMIDs)
Args:
f (TextIO): An `*_abstracts.tsv` fril from the ChemProt corpus, open for reading.
Returns:
A dictionary containing the abstract texts from `f`, keyed by PubMed IDs (PMIDs).
"""
file_contents = f.read().strip().split('\n')
return {abst.split('\t')[0]: '\n'.join(abst.split('\t')[1:]) for abst in file_contents}
def convert_entities_to_standoff(f):
"""Given a file open for reading, returns a dict containing entity annotations keyed by PMID.
Given a `*_entities.tsv` file from the ChemProt corpus which is open for reading, returns
a dictionary containing the entity annotations, keyed by PubMed IDs (PMIDs)
Args:
f (TextIO): An `*_entities.tsv` fril from the ChemProt corpus, open for reading.
Returns:
A dictionary containing the entity annotations from `f`, keyed by PubMed IDs (PMIDs).
"""
converted_entities = {}
for line in f:
if line:
pmid, T, label, start_offset, end_offset, text = line.strip().split('\t')
if pmid not in converted_entities:
converted_entities[pmid] = []
converted_entities[pmid].append(f'{T}\t{label} {start_offset} {end_offset}\t{text}')
# Convert list of annotations to a string
converted_entities = {pmid: '\n'.join(anns) for (pmid, anns) in converted_entities.items()}
return converted_entities
def convert_relations_to_standoff(f):
"""Given a file open for reading, returns a dict containing relation annotations keyed by PMID.
Given a `*_relations.tsv` file from the ChemProt corpus which is open for reading, returns
a dictionary containing the relation annotations, keyed by PubMed IDs (PMIDs)
Args:
f (TextIO): An `*_relations.tsv` fril from the ChemProt corpus, open for reading.
Returns:
A dictionary containing the relation annotations from `f`, keyed by PubMed IDs (PMIDs).
"""
R = 1 # Counter for the relation identifier in standoff format
converted_relations = {}
for line in f:
if line:
pmid, label, arg_1, arg_2 = line.strip().split('\t')
if pmid not in converted_relations:
converted_relations[pmid] = []
R = 1
converted_relations[pmid].append(f'R{R}\t{label} {arg_1} {arg_2}')
R += 1
# Convert list of annotations to a string
converted_relations = {pmid: '\n'.join(anns) for (pmid, anns) in converted_relations.items()}
return converted_relations
def write_to_disk(converted_dict, ext='txt'):
"""
"""
for pmid, item in converted_dict.items():
filename = os.path.join(kwargs['output'], f'{pmid}.{ext}')
with open(filename, 'w') as f:
f.write(item)
# https://stackoverflow.com/questions/273192/how-can-i-create-a-directory-if-it-does-not-exist#273227
def make_dir(directory):
"""Creates a directory at `directory` if it does not already exist.
"""
try:
os.makedirs(directory)
except OSError as err:
if err.errno != errno.EEXIST:
raise
if __name__ == '__main__':
description = ()
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-i', '--input', help='Filepath to ChemProt corpus.',
type=str, required=True)
parser.add_argument('-o', '--output', help='Directory to save converted corpus.',
type=str, required=True)
kwargs = vars(parser.parse_args())
main(**kwargs)
``` |
{
"source": "JohnGiorgi/pubmed-lookup",
"score": 3
} |
#### File: pubmed-lookup/pubmed_lookup/command_line.py
```python
import argparse
import sys
from pubmed_lookup import PubMedLookup, Publication
def pubmed_citation(args=sys.argv[1:], out=sys.stdout):
"""Get a citation via the command line using a PubMed ID or PubMed URL"""
parser = argparse.ArgumentParser(
description='Get a citation using a PubMed ID or PubMed URL')
parser.add_argument('query', help='PubMed ID or PubMed URL')
parser.add_argument(
'-m', '--mini', action='store_true', help='get mini citation')
parser.add_argument(
'-e', '--email', action='store', help='set user email', default='')
args = parser.parse_args(args=args)
lookup = PubMedLookup(args.query, args.email)
publication = Publication(lookup, resolve_doi=False)
if args.mini:
out.write(publication.cite_mini() + '\n')
else:
out.write(publication.cite() + '\n')
def pubmed_url(args=sys.argv[1:], resolve_doi=True, out=sys.stdout):
"""
Get a publication URL via the command line using a PubMed ID or PubMed URL
"""
parser = argparse.ArgumentParser(
description='Get a publication URL using a PubMed ID or PubMed URL')
parser.add_argument('query', help='PubMed ID or PubMed URL')
parser.add_argument(
'-d', '--doi', action='store_false', help='get DOI URL')
parser.add_argument(
'-e', '--email', action='store', help='set user email', default='')
args = parser.parse_args(args=args)
lookup = PubMedLookup(args.query, args.email)
publication = Publication(lookup, resolve_doi=args.doi)
out.write(publication.url + '\n')
```
#### File: pubmed-lookup/pubmed_lookup/test_pubmed_lookup.py
```python
import copy
import os
import unittest
from io import StringIO
from pubmed_lookup import command_line, Publication, PubMedLookup
article_url = 'https://www.pnas.org/content/109/12/4674'
citation = (
'<NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2012). '
'Arabidopsis synchronizes jasmonate-mediated defense with insect '
'circadian behavior. Proc Natl Acad Sci U S A 109(12): 4674-7.')
doi_url = 'http://dx.doi.org/10.1073/pnas.1116368109'
mini_citation = '<NAME> - Covington MF - 2012 - Proc Natl Acad Sci U S A'
pmid = '22331878'
class TestConsole(unittest.TestCase):
"""Test command-line tools."""
def setUp(self):
self.out = StringIO()
def test_pubmed_citation(self):
command_line.pubmed_citation([pmid], out=self.out)
output = self.out.getvalue()
self.assertEqual(output, citation + '\n')
def test_pubmed_citation_m(self):
command_line.pubmed_citation(['-m', pmid], out=self.out)
output = self.out.getvalue()
self.assertEqual(output, mini_citation + '\n')
def test_pubmed_citation_mini(self):
command_line.pubmed_citation(['--mini', pmid], out=self.out)
output = self.out.getvalue()
self.assertEqual(output, mini_citation + '\n')
def test_pubmed_url(self):
command_line.pubmed_url([pmid], out=self.out)
output = self.out.getvalue()
self.assertEqual(output, article_url + '\n')
def test_pubmed_url_d(self):
command_line.pubmed_url(['-d', pmid], out=self.out)
output = self.out.getvalue()
self.assertEqual(output, doi_url + '\n')
def test_pubmed_url_doi(self):
command_line.pubmed_url(['--doi', pmid], out=self.out)
output = self.out.getvalue()
self.assertEqual(output, doi_url + '\n')
class TestPublication(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Get publication record
email = ''
cls.pmid = pmid
cls.lookup = PubMedLookup(cls.pmid, email)
cls.master_record = Publication(cls.lookup)
# Set frequently used expected results
cls.authors = '<NAME>, <NAME>, <NAME>, <NAME>, ' \
'<NAME>'
cls.issue = '12'
cls.journal = 'Proc Natl Acad Sci U S A'
cls.pages = '4674-7'
cls.title = 'Arabidopsis synchronizes jasmonate-mediated defense ' \
'with insect circadian behavior.'
cls.volume = '109'
cls.year = '2012'
cls.citation_data = {
'authors': cls.authors,
'year': cls.year,
'title': cls.title,
'journal': cls.journal,
'volume': cls.volume,
'issue': cls.issue,
'pages': cls.pages,
}
cls.base_citation = '{authors} ({year}). {title} {journal}'.format(
**cls.citation_data)
def setUp(self):
self.record = copy.copy(self.master_record)
def test_fields(self):
self.assertEqual(self.record.pmid, self.pmid)
self.assertEqual(
self.record.pubmed_url,
'http://www.ncbi.nlm.nih.gov/pubmed/{}'.format(pmid))
self.assertEqual(self.record.title, self.title)
self.assertEqual(self.record.authors, self.authors)
self.assertEqual(self.record.first_author, '<NAME>')
self.assertEqual(self.record.last_author, '<NAME>')
self.assertEqual(self.record.journal, self.journal)
self.assertEqual(self.record.volume, self.volume)
self.assertEqual(self.record.year, self.year)
self.assertEqual(self.record.month, 3)
self.assertEqual(self.record.day, '20')
self.assertEqual(self.record.issue, self.issue)
self.assertEqual(self.record.pages, self.pages)
self.assertEqual(len(self.record.abstract), 1604)
def test_authors_et_al(self):
self.assertEqual(self.record.authors_et_al(), self.authors)
self.assertEqual(
self.record.authors_et_al(max_authors=3),
'<NAME>, <NAME>, <NAME>, et al.')
self.assertEqual(
self.record.authors_et_al(max_authors=10), self.authors)
def test_cite_mini(self):
self.assertEqual(self.record.cite_mini(), mini_citation)
def test_cite(self):
self.assertEqual(
self.record.cite(), '{} {volume}({issue}): {pages}.'.format(
self.base_citation, **self.citation_data))
def test_cite_without_pages(self):
self.record.pages = ''
self.assertEqual(self.record.cite(), '{} {volume}({issue}).'.format(
self.base_citation, **self.citation_data))
def test_cite_without_issue(self):
self.record.issue = ''
self.assertEqual(self.record.cite(), '{} {volume}: {pages}.'.format(
self.base_citation, **self.citation_data))
def test_cite_without_issue_pages(self):
self.record.issue = ''
self.record.pages = ''
self.assertEqual(self.record.cite(), '{} {volume}.'.format(
self.base_citation, **self.citation_data))
def test_cite_without_issue_volume(self):
self.record.issue = ''
self.record.volume = ''
self.assertEqual(self.record.cite(), '{} {pages}.'.format(
self.base_citation, **self.citation_data))
def test_cite_without_issue_pages_volume(self):
self.record.issue = ''
self.record.pages = ''
self.record.volume = ''
self.assertEqual(self.record.cite(), '{}.'.format(self.base_citation))
@unittest.skipIf(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == 'true',
"Skipping this test on Travis CI.")
def test_doi(self):
self.assertEqual(
self.record.url, 'https://www.pnas.org/content/109/12/4674')
def test_missing_doi(self):
del self.record.record['DOI']
self.record.set_article_url()
self.assertEqual(self.record.url, '')
def test_invalid_doi(self):
self.record.record.update({'DOI': 'not a valid DOI'})
self.record.set_article_url()
self.assertEqual(self.record.url, 'http://dx.doi.org/not a valid DOI')
def test_dont_resolve_doi(self):
record = Publication(self.lookup, resolve_doi=False)
self.assertEqual(
record.url, doi_url)
class TestPubMedLookup(unittest.TestCase):
def setUp(self):
self.email = ''
self.pubmed_url = 'http://www.ncbi.nlm.nih.gov/pubmed/{}'.format(pmid)
self.pmid = pmid
def test_pmid_and_url_return_same_record(self):
self.assertEqual(
PubMedLookup(self.pmid, self.email).record,
PubMedLookup(self.pubmed_url, self.email).record)
def test_parse_pubmed_url(self):
self.assertEqual(
PubMedLookup.parse_pubmed_url(self.pubmed_url), self.pmid)
def test_invalid_query(self):
with self.assertRaises(RuntimeError):
PubMedLookup('not a valid query', self.email)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JohnGiorgi/PyTorch-Image-Retrieval",
"score": 2
} |
#### File: JohnGiorgi/PyTorch-Image-Retrieval/data_loader.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from torch.utils.data import Dataset
from torchvision import transforms, datasets
from PIL import Image
def train_data_loader(data_path, img_size, use_augment=False):
if use_augment:
data_transforms = transforms.Compose([
transforms.RandomOrder([
transforms.RandomApply([transforms.ColorJitter(contrast=0.5)], .5),
transforms.Compose([
transforms.RandomApply([transforms.ColorJitter(saturation=0.5)], .5),
transforms.RandomApply([transforms.ColorJitter(hue=0.1)], .5),
])
]),
transforms.RandomApply([transforms.ColorJitter(brightness=0.125)], .5),
transforms.RandomApply([transforms.RandomRotation(15)], .5),
transforms.RandomResizedCrop(img_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
data_transforms = transforms.Compose([
transforms.RandomResizedCrop(img_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image_dataset = datasets.ImageFolder(data_path, data_transforms)
return image_dataset
def test_data_loader(data_path):
# return full path
queries_path = [os.path.join(data_path, 'query', path) for path in os.listdir(os.path.join(data_path, 'query'))]
references_path = [os.path.join(data_path, 'reference', path) for path in
os.listdir(os.path.join(data_path, 'reference'))]
return queries_path, references_path
def test_data_generator(data_path, img_size):
img_size = (img_size, img_size)
data_transforms = transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_image_dataset = TestDataset(data_path, data_transforms)
return test_image_dataset
class TestDataset(Dataset):
def __init__(self, img_path_list, transform=None):
self.img_path_list = img_path_list
self.transform = transform
def __getitem__(self, index):
img_path = self.img_path_list[index]
img = Image.open(img_path)
if self.transform is not None:
img = self.transform(img)
return img_path, img
def __len__(self):
return len(self.img_path_list)
if __name__ == '__main__':
query, refer = test_data_loader('./')
print(query)
print(refer)
```
#### File: JohnGiorgi/PyTorch-Image-Retrieval/inference.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from torch.utils.data import DataLoader
from data_loader import test_data_generator
import numpy as np
def retrieve(model, queries, db, img_size, infer_batch_size):
query_paths = queries
reference_paths = db
query_img_dataset = test_data_generator(queries, img_size=img_size)
reference_img_dataset = test_data_generator(db, img_size=img_size)
query_loader = DataLoader(query_img_dataset, batch_size=infer_batch_size, shuffle=False, num_workers=4,
pin_memory=True)
reference_loader = DataLoader(reference_img_dataset, batch_size=infer_batch_size, shuffle=False, num_workers=4,
pin_memory=True)
model.eval()
model.cuda()
query_paths, query_vecs = batch_process(model, query_loader)
reference_paths, reference_vecs = batch_process(model, reference_loader)
assert query_paths == queries and reference_paths == db, "order of paths should be same"
# DBA and AQE
query_vecs, reference_vecs = db_augmentation(query_vecs, reference_vecs, top_k=10)
query_vecs, reference_vecs = average_query_expansion(query_vecs, reference_vecs, top_k=5)
sim_matrix = calculate_sim_matrix(query_vecs, reference_vecs)
indices = np.argsort(sim_matrix, axis=1)
indices = np.flip(indices, axis=1)
retrieval_results = {}
# Evaluation: mean average precision (mAP)
# You can change this part to fit your evaluation skim
for (i, query) in enumerate(query_paths):
query = query.split('/')[-1].split('.')[0]
ranked_list = [reference_paths[k].split('/')[-1].split('.')[0] for k in indices[i]]
ranked_list = ranked_list[:1000]
retrieval_results[query] = ranked_list
return retrieval_results
def db_augmentation(query_vecs, reference_vecs, top_k=10):
"""
Database-side feature augmentation (DBA)
<NAME>, et al. "End-to-end Learning of Deep Visual Representations for Image Retrieval,"
International Journal of Computer Vision. 2017.
https://link.springer.com/article/10.1007/s11263-017-1016-8
"""
weights = np.logspace(0, -2., top_k+1)
# Query augmentation
sim_mat = calculate_sim_matrix(query_vecs, reference_vecs)
indices = np.argsort(-sim_mat, axis=1)
top_k_ref = reference_vecs[indices[:, :top_k], :]
query_vecs = np.tensordot(weights, np.concatenate([np.expand_dims(query_vecs, 1), top_k_ref], axis=1), axes=(0, 1))
# Reference augmentation
sim_mat = calculate_sim_matrix(reference_vecs, reference_vecs)
indices = np.argsort(-sim_mat, axis=1)
top_k_ref = reference_vecs[indices[:, :top_k+1], :]
reference_vecs = np.tensordot(weights, top_k_ref, axes=(0, 1))
return query_vecs, reference_vecs
def average_query_expansion(query_vecs, reference_vecs, top_k=5):
"""
Average Query Expansion (AQE)
<NAME>, et al. "Total Recall: Automatic Query Expansion with a Generative Feature Model for Object Retrieval,"
International Conference of Computer Vision. 2007.
https://www.robots.ox.ac.uk/~vgg/publications/papers/chum07b.pdf
"""
# Query augmentation
sim_mat = calculate_sim_matrix(query_vecs, reference_vecs)
indices = np.argsort(-sim_mat, axis=1)
top_k_ref_mean = np.mean(reference_vecs[indices[:, :top_k], :], axis=1)
query_vecs = np.concatenate([query_vecs, top_k_ref_mean], axis=1)
# Reference augmentation
sim_mat = calculate_sim_matrix(reference_vecs, reference_vecs)
indices = np.argsort(-sim_mat, axis=1)
top_k_ref_mean = np.mean(reference_vecs[indices[:, 1:top_k+1], :], axis=1)
reference_vecs = np.concatenate([reference_vecs, top_k_ref_mean], axis=1)
return query_vecs, reference_vecs
def calculate_sim_matrix(query_vecs, reference_vecs):
query_vecs, reference_vecs = postprocess(query_vecs, reference_vecs)
return np.dot(query_vecs, reference_vecs.T)
def batch_process(model, loader):
feature_vecs = []
img_paths = []
for data in loader:
paths, inputs = data
feature_vec = _get_feature(model, inputs.cuda())
feature_vec = feature_vec.detach().cpu().numpy() # (batch_size, channels)
for i in range(feature_vec.shape[0]):
feature_vecs.append(feature_vec[i])
img_paths = img_paths + paths
return img_paths, np.asarray(feature_vecs)
def _get_features_from(model, x, feature_names):
features = {}
def save_feature(name):
def hook(m, i, o):
features[name] = o.data
return hook
for name, module in model.named_modules():
_name = name.split('.')[-1]
if _name in feature_names:
module.register_forward_hook(save_feature(_name))
model(x)
return features
def _get_feature(model, x):
model_name = model.__class__.__name__
if model_name == 'EmbeddingNetwork':
feature = model(x)
elif model_name == 'ResNet':
features = _get_features_from(model, x, ['fc'])
feature = features['fc']
elif model_name == 'DenseNet':
features = _get_features_from(model, x, ['classifier'])
feature = features['classifier']
else:
raise ValueError("Invalid model name: {}".format(model_name))
return feature
def postprocess(query_vecs, reference_vecs):
"""
Postprocessing:
1) Moving the origin of the feature space to the center of the feature vectors
2) L2-normalization
"""
# centerize
query_vecs, reference_vecs = _centerize(query_vecs, reference_vecs)
# l2 normalization
query_vecs = _l2_normalize(query_vecs)
reference_vecs = _l2_normalize(reference_vecs)
return query_vecs, reference_vecs
def _centerize(v1, v2):
concat = np.concatenate([v1, v2], axis=0)
center = np.mean(concat, axis=0)
return v1-center, v2-center
def _l2_normalize(v):
norm = np.expand_dims(np.linalg.norm(v, axis=1), axis=1)
if np.any(norm == 0):
return v
return v / norm
```
#### File: JohnGiorgi/PyTorch-Image-Retrieval/main.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from data_loader import train_data_loader, test_data_loader
# Load initial models
from networks import EmbeddingNetwork
# Load batch sampler and train loss
from datasets import BalancedBatchSampler
from losses import BlendedLoss, MAIN_LOSS_CHOICES
from trainer import fit
from inference import retrieve
def load(file_path):
model.load_state_dict(torch.load(file_path))
print('model loaded!')
return model
def infer(model, queries, db):
retrieval_results = retrieve(model, queries, db, input_size, infer_batch_size)
return list(zip(range(len(retrieval_results)), retrieval_results.items()))
def get_arguments():
args = argparse.ArgumentParser()
args.add_argument('--dataset-path', type=str)
args.add_argument('--model-save-dir', type=str)
args.add_argument('--model-to-test', type=str)
# Hyperparameters
args.add_argument('--epochs', type=int, default=20)
args.add_argument('--model', type=str,
choices=['densenet161', 'resnet101', 'inceptionv3', 'seresnext'],
default='densenet161')
args.add_argument('--input-size', type=int, default=224, help='size of input image')
args.add_argument('--num-classes', type=int, default=64, help='number of classes for batch sampler')
args.add_argument('--num-samples', type=int, default=4, help='number of samples per class for batch sampler')
args.add_argument('--embedding-dim', type=int, default=128, help='size of embedding dimension')
args.add_argument('--feature-extracting', type=bool, default=False)
args.add_argument('--use-pretrained', type=bool, default=True)
args.add_argument('--lr', type=float, default=1e-4)
args.add_argument('--scheduler', type=str, choices=['StepLR', 'MultiStepLR'])
args.add_argument('--attention', action='store_true')
args.add_argument('--loss-type', type=str, choices=MAIN_LOSS_CHOICES)
args.add_argument('--cross-entropy', action='store_true')
args.add_argument('--use-augmentation', action='store_true')
# Mode selection
args.add_argument('--mode', type=str, default='train', help='mode selection: train or test.')
return args.parse_args()
if __name__ == '__main__':
config = get_arguments()
dataset_path = config.dataset_path
# Model parameters
model_name = config.model
input_size = config.input_size
embedding_dim = config.embedding_dim
feature_extracting = config.feature_extracting
use_pretrained = config.use_pretrained
attention_flag = config.attention
# Training parameters
nb_epoch = config.epochs
loss_type = config.loss_type
cross_entropy_flag = config.cross_entropy
scheduler_name = config.scheduler
lr = config.lr
# Mini-batch parameters
num_classes = config.num_classes
num_samples = config.num_samples
use_augmentation = config.use_augmentation
infer_batch_size = 64
log_interval = 50
""" Model """
model = EmbeddingNetwork(model_name=model_name,
embedding_dim=embedding_dim,
feature_extracting=feature_extracting,
use_pretrained=use_pretrained,
attention_flag=attention_flag,
cross_entropy_flag=cross_entropy_flag)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
if config.mode == 'train':
""" Load data """
print('dataset path', dataset_path)
train_dataset_path = dataset_path + '/train/train_data'
img_dataset = train_data_loader(data_path=train_dataset_path, img_size=input_size,
use_augment=use_augmentation)
# Balanced batch sampler and online train loader
train_batch_sampler = BalancedBatchSampler(img_dataset, n_classes=num_classes, n_samples=num_samples)
online_train_loader = torch.utils.data.DataLoader(img_dataset,
batch_sampler=train_batch_sampler,
num_workers=4,
pin_memory=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Gather the parameters to be optimized/updated.
params_to_update = model.parameters()
print("Params to learn:")
if feature_extracting:
params_to_update = []
for name, param in model.named_parameters():
if param.requires_grad:
params_to_update.append(param)
print("\t", name)
else:
for name, param in model.named_parameters():
if param.requires_grad:
print("\t", name)
# Send the model to GPU
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=1e-4)
if scheduler_name == 'StepLR':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=8, gamma=0.1)
elif scheduler_name == 'MultiStepLR':
if use_augmentation:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 30], gamma=0.1)
else:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[10, 15, 20], gamma=0.1)
else:
raise ValueError('Invalid scheduler')
# Loss function
loss_fn = BlendedLoss(loss_type, cross_entropy_flag)
# Train (fine-tune) model
fit(online_train_loader, model, loss_fn, optimizer, scheduler, nb_epoch,
device=device, log_interval=log_interval, save_model_to=config.model_save_dir)
elif config.mode == 'test':
test_dataset_path = dataset_path + '/test/test_data'
queries, db = test_data_loader(test_dataset_path)
model = load(file_path=config.model_to_test)
result_dict = infer(model, queries, db)
```
#### File: JohnGiorgi/PyTorch-Image-Retrieval/networks.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
from senet import se_resnext101_32x4d
class BaseNetwork(nn.Module):
""" Load Pretrained Module """
def __init__(self, model_name, embedding_dim, feature_extracting, use_pretrained):
super(BaseNetwork, self).__init__()
self.model_name = model_name
self.embedding_dim = embedding_dim
self.feature_extracting = feature_extracting
self.use_pretrained = use_pretrained
self.model_ft = initialize_model(self.model_name,
self.embedding_dim,
self.feature_extracting,
self.use_pretrained)
def forward(self, x):
out = self.model_ft(x)
return out
class SelfAttention(nn.Module):
""" Self attention Layer
https://github.com/heykeetae/Self-Attention-GAN"""
def __init__(self, in_dim, activation):
super(SelfAttention, self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out + x
return out
class EmbeddingNetwork(BaseNetwork):
""" Wrapping Modules to the BaseNetwork """
def __init__(self, model_name, embedding_dim, feature_extracting, use_pretrained,
attention_flag=False, cross_entropy_flag=False, edge_cutting=False):
super(EmbeddingNetwork, self).__init__(model_name, embedding_dim, feature_extracting, use_pretrained)
self.attention_flag = attention_flag
self.cross_entropy_flag = cross_entropy_flag
self.edge_cutting = edge_cutting
self.model_ft_convs = nn.Sequential(*list(self.model_ft.children())[:-1])
self.model_ft_embedding = nn.Sequential(*list(self.model_ft.children())[-1:])
if self.attention_flag:
if self.model_name == 'densenet161':
self.attention = SelfAttention(2208, 'relu')
elif self.model_name == 'resnet101':
self.attention = SelfAttention(2048, 'relu')
elif self.model_name == 'inceptionv3':
self.attention = SelfAttention(2048, 'relu')
elif self.model_name == 'seresnext':
self.attention = SelfAttention(2048, 'relu')
if self.cross_entropy_flag:
self.fc_cross_entropy = nn.Linear(self.model_ft.classifier.in_features, 1000)
def forward(self, x):
x = self.model_ft_convs(x)
x = F.relu(x, inplace=True)
if self.attention_flag:
x = self.attention(x)
if self.edge_cutting:
x = F.adaptive_avg_pool2d(x[:, :, 1:-1, 1:-1], output_size=1).view(x.size(0), -1)
else:
x = F.adaptive_avg_pool2d(x, output_size=1).view(x.size(0), -1)
# x = gem(x).view(x.size(0), -1)
out_embedding = self.model_ft_embedding(x)
if self.cross_entropy_flag:
out_cross_entropy = self.fc_cross_entropy(x)
return out_embedding, out_cross_entropy
else:
return out_embedding
def set_parameter_requires_grad(model, feature_extracting):
if feature_extracting:
for param in model.parameters():
param.requires_grad = False
def initialize_model(model_name, embedding_dim, feature_extracting, use_pretrained=True):
if model_name == "densenet161":
model_ft = models.densenet161(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extracting)
num_features = model_ft.classifier.in_features
model_ft.classifier = nn.Linear(num_features, embedding_dim)
elif model_name == "resnet101":
model_ft = models.resnet101(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extracting)
num_features = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_features, embedding_dim)
elif model_name == "inceptionv3":
model_ft = models.inception_v3(pretrained=use_pretrained)
set_parameter_requires_grad(model_ft, feature_extracting)
num_features = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_features, embedding_dim)
elif model_name == "seresnext":
model_ft = se_resnext101_32x4d(num_classes=1000)
set_parameter_requires_grad(model_ft, feature_extracting)
num_features = model_ft.last_linear.in_features
model_ft.last_linear = nn.Linear(num_features, embedding_dim)
else:
raise ValueError
return model_ft
# GeM Pooling
def gem(x, p=3, eps=1e-6):
return F.adaptive_avg_pool2d(x.clamp(min=eps).pow(p), output_size=1).pow(1. / p)
``` |
{
"source": "JohnGiorgi/seq2rel",
"score": 2
} |
#### File: JohnGiorgi/seq2rel/demo.py
```python
from typing import Tuple
import streamlit as st
import streamlit.components.v1 as components
from pyvis.network import Network
from seq2rel import Seq2Rel
from seq2rel.common import util
TEXT_EXAMPLES = {
"ade": (
"Hydroxyurea is a cytostatic agent used to treat myeloproliferative disorders and"
" long-term treatment is associated with mucocutaneous adverse events and nail hyperpigmentation"
),
"bc5cdr": (
"Neuroleptic malignant syndrome induced by ziprasidone on the second day of"
" treatment. Neuroleptic malignant syndrome (NMS) is the rarest and most serious of the"
" neuroleptic-induced movement disorders. We describe a case of neuroleptic malignant"
" syndrome (NMS) associated with the use of ziprasidone. Although conventional neuroleptics"
" are more frequently associated with NMS, atypical antipsychotic drugs like ziprasidone"
" may also be a cause. The patient is a 24-year-old male with a history of schizophrenia"
" who developed signs and symptoms of NMS after 2 days of treatment with an 80-mg/day dose"
" of orally administrated ziprasidone. This case is the earliest (second day of treatment)"
" NMS due to ziprasidone reported in the literature."
),
"biogrid": (
"DNA-dependent protein kinase (DNA-PK) is composed of a 460-kDa catalytic component (p460)"
" and a DNA-binding component Ku protein. Immunoblot analysis after treatment of Jurkat"
" cells with anti-Fas antibody demonstrated the cleavage of p460 concomitantly with an"
" increase in CPP32/Yama/apopain activity. Recombinant CPP32/Yama/apopain specifically"
" cleaved p460 in the DNA-PK preparation that had been purified from Raji cells into 230-"
" and 160-kDa polypeptides, the latter of which was detected in anti-Fas-treated Jurkat"
" cells. The regulatory component Ku protein was not significantly affected by"
" CPP32/Yama/apopain. DNA-PK activity was decreased with the disappearance of p460 in the"
" incubation of DNA-PK with CPP32/Yama/apopain. These results suggest that the catalytic"
" component of DNA-PK is one of the target proteins for CPP32/Yama/apopain in Fas-mediated"
"apoptosis."
),
"gda": (
"Functional gene polymorphisms in aggressive and chronic periodontitis. There is strong"
" evidence that genetic as well as environmental factors affect the development of"
" periodontitis, and some suggestion that aggressive and chronic forms of the disease share"
" the same genetic predisposition. This study addresses the hypothesis that there are both"
" shared and unique genetic associations in these forms of periodontitis. A sample of 51"
" patients with aggressive disease, 57 patients with chronic disease, and 100 healthy"
" controls was recruited for this study. Ten functional polymorphisms in 7 candidate genes"
" were genotyped. The results show statistically significant (p <or= 0.05) differences"
" between genotype frequencies in aggressive and controls (IL-1B +3954 IL-6 -174); chronic"
" and controls (IL-6 -174 VDR -1056); chronic and aggressive periodontitis (IL-1A -889);"
" and periodontitis as a whole and controls (VDR -1056, TLR-4 399 _ IL-6 -174). These"
" results suggest that there are in fact both shared and unique genetic associations in"
" aggressive and chronic periodontitis."
),
}
@st.cache(allow_output_mutation=True, max_entries=1, ttl=3600)
def load_model(model_name: str):
return Seq2Rel(
model_name,
overrides={"dataset_reader.type": "seq2rel"},
)
def process_ent(text: str, ents: Tuple[str, ...]) -> str:
matched_ents = []
for ent in ents:
try:
start = text.lower().index(ent.lower())
end = start + len(ent)
matched_ents.append(text[start:end])
except ValueError:
matched_ents.append(ent)
ent_text = f"{matched_ents[0]}"
if matched_ents[1:]:
ent_text += f"{util.COREF_SEP_SYMBOL} {f'{util.COREF_SEP_SYMBOL} '.join(matched_ents[1:])}"
return ent_text
st.sidebar.write(
f"""
# Seq2Rel
A demo for our [Seq2Rel](https://github.com/JohnGiorgi/seq2rel) models.
Seq2Rel is a sequence-to-sequence based architecture for joint entity and relation extraction.
1. Select a pretrained model below (it may take a few seconds to load).
2. Enter some text on the right, and the extracted entity mentions and relations will be visualized
below.
Coreferent mentions will be seperated by a semicolon (`{util.COREF_SEP_SYMBOL}`). Hover over nodes and
edges to see their predicted classes.
"""
)
model_name = st.sidebar.selectbox("Model name", ("ADE", "BC5CDR", "BioGRID", "GDA")).strip().lower()
st.sidebar.subheader("Additional Settings")
debug = st.sidebar.checkbox("Debug", False)
if model_name:
model = load_model(model_name)
input_text = st.text_area(
"Enter some text",
value=TEXT_EXAMPLES[model_name],
help="Enter some text here. This will be auto-filled with a model-specific example.",
)
if input_text:
net = Network(notebook=True)
output = model(input_text)
st.subheader("Input text")
st.write(input_text)
if debug:
st.subheader("Raw output")
st.write(output)
st.subheader("Extracted relations")
deserialize_annotations = util.deserialize_annotations(output)
for prediction in deserialize_annotations:
for rel_type, rels in prediction.items():
# TODO: This should be extended to n-ary relations.
for rel in rels:
ent_1, ent_1_type = process_ent(input_text, rel[0][0]), rel[0][1]
ent_2, ent_2_type = process_ent(input_text, rel[1][0]), rel[1][1]
net.add_node(ent_1, title=ent_1_type)
net.add_node(ent_2, title=ent_2_type)
net.add_edge(ent_1, ent_2, title=rel_type)
net.show("network.html")
HtmlFile = open("network.html", "r", encoding="utf-8")
source_code = HtmlFile.read()
components.html(source_code, height=1200, width=1000)
```
#### File: seq2rel/metrics/fbeta_measure_seq2rel.py
```python
from typing import List, Optional, Set
import torch
from allennlp.training.metrics.fbeta_measure import FBetaMeasure
from allennlp.training.metrics.metric import Metric
from seq2rel.common.util import EntityAnnotation, deserialize_annotations
def _fuzzy_cluster_match(
pred_rel: EntityAnnotation,
gold_rels: Set[EntityAnnotation],
threshold: float = 0.5,
ordered_ents: bool = False,
) -> bool:
"""Given some predicted relation `pred_rel`, returns True if there is a fuzzy match to any
relation in the ground truth relations `gold_rels`. A fuzzy match occurs if there exists a
ground truth relation where, for every predicted cluster, P, there is a gold cluster G such
that | P ∩ G | / |P| > cluster_threshold. The number of predicted clusters and their predicted
entity classes must exactly match the ground truth regardless of threshold.
"""
for gold_rel in gold_rels:
# If the number of gold and predicted clusters differ then we don't have a match.
if len(gold_rel) != len(pred_rel):
continue
matched_indices = []
for i, (pred_mentions, pred_label) in enumerate(pred_rel):
for j, (gold_mentions, gold_label) in enumerate(gold_rel):
# If `ordered_ents`, order of predicted clusters must match order of gold clusters.
if ordered_ents and i != j:
continue
# Avoid matching different predicted clusters to the same gold cluster.
if j in matched_indices:
continue
# Convert to a set, as we don't care about duplicates or order.
pred = set(pred_mentions)
gold = set(gold_mentions)
# A predicted cluster (P) matches a gold cluster (G) if:
# 1. | P ∩ G | / |P| > threshold
# 2. The predicted cluster label matches the gold cluster label
if (len(pred & gold) / len(pred)) > threshold and pred_label == gold_label:
matched_indices.append(j)
break
# Did not find a fuzzy match for all clusters, therefore the predicted relation is incorrect.
if len(matched_indices) == len(pred_rel):
return True
return False
@Metric.register("fbeta_seq2rel")
class FBetaMeasureSeq2Rel(FBetaMeasure):
"""A thin wrapper around FBetaMeasure, which computes the precision, recall and F-measure for
the output a Seq2Rel model. Besides `labels` and `ordered_ents`, the parameters are the same as
the parent class. For details, please see:
[FBetaMeasure](https://github.com/allenai/allennlp/blob/main/allennlp/training/metrics/fbeta_measure.py)
# Parameters
labels: `list`
The set of labels to include (and their order if `average is None`.)
Labels present in the data can be excluded, for example to calculate a
multi-class average ignoring a majority negative class. Labels not present
in the data will result in 0 components in a macro or weighted average.
cluster_threshold : `float`, optional (default = `None`)
If `cluster_threshold`, use fuzzy matching, where a predicted cluster (P) is considered a
true positive if | P ∩ G | / | P | > `cluster_threshold` for at least one gold cluster (G).
A reasonable threshold value is `0.5`.
ordered_ents : `bool`, optional (default = `False`)
True if the entities should be considered ordered (e.g. there are distinct head and tail
entities). Defaults to False.
"""
supports_distributed = True
def __init__(
self,
labels: List[str],
cluster_threshold: Optional[float] = None,
ordered_ents: bool = False,
beta: float = 1.0,
average: Optional[str] = None,
) -> None:
super().__init__(beta=beta, average=average)
# Unlike the parent class, we require labels to be not None. To be compatible with
# the parent class, self._labels needs to be a list of integers representing the
# positions of each class. For our purposes, these labels can just be [0,...,len(labels)]
self._str_labels = labels
self._labels = list(range(len(labels)))
self._num_classes = len(self._labels)
if cluster_threshold is not None and (cluster_threshold <= 0 or cluster_threshold > 1):
raise ValueError(f"cluster_threshold must be between (0, 1]. Got {cluster_threshold}.")
self._cluster_threshold = cluster_threshold
self._ordered_ents = ordered_ents
def __call__(self, predictions: List[str], ground_truths: List[str]) -> None:
"""
# Parameters
predictions : `list`, required.
A list of predictions.
ground_truths : `torch.Tensor`, required.
A list corresponding to some ground truths to evaluate against.
"""
if len(ground_truths) != len(predictions):
raise ValueError(
f"len(ground_truths) must equal len(predictions)."
f" Got {len(ground_truths)} and {len(predictions)}."
)
# It means we call this metric at the first time
# when `self._true_positive_sum` is None.
if self._true_positive_sum is None: # type: ignore
self._true_positive_sum = torch.zeros(self._num_classes)
self._true_sum = torch.zeros(self._num_classes)
self._pred_sum = torch.zeros(self._num_classes)
self._total_sum = torch.zeros(self._num_classes)
pred_annotations = deserialize_annotations(predictions, ordered_ents=self._ordered_ents)
gold_annotations = deserialize_annotations(ground_truths, ordered_ents=self._ordered_ents)
# Predictions and ground truths are contained with equal length lists as they are per-batch.
for pred_ann, gold_ann in zip(pred_annotations, gold_annotations):
if gold_ann:
for rel_label, gold_rels in gold_ann.items():
# Filter out any labels not provided at instantiation.
if self._labels and rel_label not in self._str_labels:
continue
# Get the predicted relations for this label.
class_index = self._str_labels.index(rel_label)
pred_rels = pred_ann.get(rel_label, [])
# Convert to a set, as we don't care about duplicates or order.
dedup_pred_rels = set(pred_rels)
dedup_gold_rels = set(gold_rels)
# If cluster_threshold, use fuzzy matching to determine true positives.
if self._cluster_threshold:
for rel in dedup_pred_rels:
if _fuzzy_cluster_match(
rel,
dedup_gold_rels,
threshold=self._cluster_threshold,
ordered_ents=self._ordered_ents,
):
self._true_positive_sum[class_index] += 1 # type: ignore
self._pred_sum[class_index] += 1
else:
self._true_positive_sum[class_index] += len( # type: ignore
dedup_pred_rels & dedup_gold_rels
)
self._pred_sum[class_index] += len(dedup_pred_rels)
self._true_sum[class_index] += len(dedup_gold_rels)
# No corresponding gold annotation, so these are all false-positives.
else:
for rel_label, pred_rels in pred_ann.items():
dedup_pred_rels = set(pred_rels)
if self._labels and rel_label not in self._str_labels:
continue
class_index = self._str_labels.index(rel_label)
self._pred_sum[class_index] += len(dedup_pred_rels)
# We need to set the total sum to be compatible with the parent class.
# Because we do not support masking, it is equal to the "true sum".
self._total_sum = self._true_sum.detach().clone()
@Metric.register("f1_seq2rel")
class F1MeasureSeq2Rel(FBetaMeasureSeq2Rel):
def __init__(
self,
labels: List[str],
cluster_threshold: Optional[float] = None,
ordered_ents: bool = False,
average: Optional[str] = None,
) -> None:
super().__init__(
labels=labels,
cluster_threshold=cluster_threshold,
ordered_ents=ordered_ents,
beta=1.0,
average=average,
)
```
#### File: modules/attention/test_multihead_attention.py
```python
import numpy
import torch
from allennlp.common import Params
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.modules.attention.attention import Attention
from numpy.testing import assert_almost_equal
from seq2rel.modules.attention.multihead_attention import MultiheadAttention
class TestMultiheadAttention(AllenNlpTestCase):
def test_can_init_multihead(self):
legacy_attention = Attention.from_params(
Params({"type": "multihead_attention", "embed_dim": 4, "num_heads": 2})
)
isinstance(legacy_attention, MultiheadAttention)
def test_multihead_similarity(self):
attn = MultiheadAttention(embed_dim=4, num_heads=2)
vector = torch.FloatTensor([[0, 0, 0, 0], [1, 1, 1, 1]])
matrix = torch.FloatTensor(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]
)
with torch.no_grad():
output = attn(vector, matrix)
assert_almost_equal(
output.sum(dim=-1).numpy(),
numpy.array([1.0, 1.0]),
decimal=2,
)
```
#### File: training/callbacks/test_concatenation_augmentation.py
```python
from pathlib import Path
import pytest
from seq2rel.training.callbacks.concatenation_augmentation import ConcatenationAugmentationCallback
class TestConcatenationAugmentationCallback:
def test_aug_frac_value_error(self) -> None:
with pytest.raises(ValueError):
_ = ConcatenationAugmentationCallback(
serialization_dir="", train_data_path="", aug_frac=1.1
)
with pytest.raises(ValueError):
_ = ConcatenationAugmentationCallback(
serialization_dir="", train_data_path="", aug_frac=-0.1
)
def test_on_start(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Ensure that on object instantiation, there are two training examples.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 2
# Ensure that on training start, there are two plus one training examples.
concatenation_augmentation.on_start(trainer="")
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 3
def test_on_epoch(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Ensure that on object instantiation, there are two training examples.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 2
# Ensure that on epoch end, there are two plus one training examples.
concatenation_augmentation.on_epoch(trainer="")
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
assert len(train_data) == 3
def test_on_end(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# This is the train data BEFORE any augmentation.
expected = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
# Purposefully modify the training data on disk, and check that `on_end` restores it
Path(concatenation_augmentation._train_data_path).write_text(expected[0].strip())
concatenation_augmentation.on_end(trainer="")
actual = Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
assert actual == expected
def test_format_instance(
self, concatenation_augmentation: ConcatenationAugmentationCallback
) -> None:
first_instance = "I am the first instance"
second_instance = "I am the second instance"
# Test with no sep_token provided
sep_token = " "
expected = first_instance + sep_token + second_instance
actual = concatenation_augmentation._format_instance(first_instance, second_instance)
assert actual == expected
# Test with sep_token provided
concatenation_augmentation._sep_token = "[SEP]"
expected = first_instance + f" {concatenation_augmentation._sep_token} " + second_instance
actual = concatenation_augmentation._format_instance(first_instance, second_instance)
assert actual == expected
def test_augment(self, concatenation_augmentation: ConcatenationAugmentationCallback) -> None:
# Load the training data and create a concatenated example.
train_data = (
Path(concatenation_augmentation._train_data_path).read_text().strip().splitlines()
)
first_source, first_target = train_data[0].split("\t")
second_source, second_target = train_data[1].split("\t")
concatenated_one = f"{first_source} {second_source}\t{first_target} {second_target}"
concatenated_two = f"{second_source} {first_source}\t{second_target} {first_target}"
# This works because there is only two possible augmentated examples given
# `concatenation_augmentation._train_data` and `concatenation_augmentation._aug_frac`.
expected_one = train_data + [concatenated_one]
expected_two = train_data + [concatenated_two]
actual = concatenation_augmentation._augment()
assert actual == expected_one or actual == expected_two
``` |
{
"source": "johnglensc/pynmea",
"score": 3
} |
#### File: pynmea/tests/test_nmea_stream.py
```python
import os
from unittest import TestCase
from pynmea.streamer import NMEAStream
from pynmea.nmea import GPRMC
class TestStream(TestCase):
def test_splits_data_1(self):
test_data = '$foo,bar,baz*77\n$Meep,wibble,123,321\n'
streamer = NMEAStream()
result = streamer._split(test_data)
self.assertEqual(result, ['foo,bar,baz*77', 'Meep,wibble,123,321'])
def test_splits_data_2(self):
test_data = '$foo,bar,baz*77\r$Meep,wibble,123,321\r'
streamer = NMEAStream()
result = streamer._split(test_data)
self.assertEqual(result, ['foo,bar,baz*77', 'Meep,wibble,123,321'])
def test_splits_data_3(self):
test_data = '$foo,bar,baz*77\r\n$Meep,wibble,123,321'
streamer = NMEAStream()
result = streamer._split(test_data)
self.assertEqual(result, ['foo,bar,baz*77', 'Meep,wibble,123,321'])
def test_splits_data_4(self):
test_data = '$foo,bar,baz*77NOTHING$Meep,wibble,123,321NOTHING'
streamer = NMEAStream()
result = streamer._split(test_data, separator='NOTHING')
self.assertEqual(result, ['foo,bar,baz*77', 'Meep,wibble,123,321'])
def test__read(self):
test_file = os.path.join(os.path.dirname(__file__), 'test_data',
'test_data_small.gps')
expected_result = ['GPRMC,184332.07,A,1929.459,S,02410.381,E,74.00,16.78,210410,0.0,E,A*2B',
'GPGGA,184333.07,1929.439,S,02410.387,E,1,04,2.8,100.00,M,-33.9,M,,0000*65',
'GPRMC,184444.08,A,1928.041,S,02410.809,E,74.00,16.78,210410,0.0,E,A*25',
'GPGGA,184445.08,1928.021,S,02410.814,E,1,04,2.7,100.00,M,-33.9,M,,0000*6E',
'GPGLL,1928.001,S,02410.820,E,184446.08,A,A*79',
'GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36',
'GPRMC,184448.08,A,1927.962,S,02410.832,E,74.00,16.78,210410,0.0,E,A*26',
'GPGGA,184449.08,1927.942,S,02410.838,E,1,04,1.7,100.00,M,-33.9,M,,0000*6C',
'GPGLL,1927.922,S,02410.844,E,184450.08,A,A*7B',
'GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36']
with open(test_file, 'r') as test_file_fd:
streamer = NMEAStream(stream_obj=test_file_fd)
next_data = streamer._read()
data = []
while next_data:
data += next_data
next_data = streamer._read()
pass
self.assertEqual(data, expected_result)
def test__read_data(self):
expected_result = ['GPRMC,184332.07,A,1929.459,S,02410.381,E,74.00,16.78,210410,0.0,E,A*2B',
'GPGGA,184333.07,1929.439,S,02410.387,E,1,04,2.8,100.00,M,-33.9,M,,0000*65',
'GPRMC,184444.08,A,1928.041,S,02410.809,E,74.00,16.78,210410,0.0,E,A*25',
'GPGGA,184445.08,1928.021,S,02410.814,E,1,04,2.7,100.00,M,-33.9,M,,0000*6E',
'GPGLL,1928.001,S,02410.820,E,184446.08,A,A*79',
'GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36',
'GPRMC,184448.08,A,1927.962,S,02410.832,E,74.00,16.78,210410,0.0,E,A*26',
'GPGGA,184449.08,1927.942,S,02410.838,E,1,04,1.7,100.00,M,-33.9,M,,0000*6C',
'GPGLL,1927.922,S,02410.844,E,184450.08,A,A*7B',
'GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36']
input_data = """$GPRMC,184332.07,A,1929.459,S,02410.381,E,74.00,16.78,210410,0.0,E,A*2B
$GPGGA,184333.07,1929.439,S,02410.387,E,1,04,2.8,100.00,M,-33.9,M,,0000*65
$GPRMC,184444.08,A,1928.041,S,02410.809,E,74.00,16.78,210410,0.0,E,A*25
$GPGGA,184445.08,1928.021,S,02410.814,E,1,04,2.7,100.00,M,-33.9,M,,0000*6E
$GPGLL,1928.001,S,02410.820,E,184446.08,A,A*79
$GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36
$GPRMC,184448.08,A,1927.962,S,02410.832,E,74.00,16.78,210410,0.0,E,A*26
$GPGGA,184449.08,1927.942,S,02410.838,E,1,04,1.7,100.00,M,-33.9,M,,0000*6C
$GPGLL,1927.922,S,02410.844,E,184450.08,A,A*7B
$GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36
"""
streamer = NMEAStream()
data = streamer._read(data=input_data)
data += streamer._read(data='')
self.assertEqual(data, expected_result)
def test__get_type(self):
streamer = NMEAStream()
sentence = '$GPRMC,184332.07,A,1929.459,S,02410.381,E,74.00,16.78,210410,0.0,E,A*2B'
sen_type = streamer._get_type(sentence)
self.assertTrue(isinstance(sen_type(), GPRMC))
def test_read_data_obj(self):
test_file = os.path.join(os.path.dirname(__file__), 'test_data',
'test_data_small.gps')
with open(test_file, 'r') as test_file_fd:
streamer = NMEAStream(stream_obj=test_file_fd)
next_data = streamer.get_objects()
nmea_objects = []
while next_data:
nmea_objects += next_data
next_data = streamer.get_objects()
expected_object_types = ['GPRMC', 'GPGGA', 'GPRMC', 'GPGGA', 'GPGLL',
'GPVTG', 'GPRMC', 'GPGGA', 'GPGLL', 'GPVTG']
self.assertEqual(expected_object_types[0], nmea_objects[0].sen_type)
self.assertEqual(expected_object_types[1], nmea_objects[1].sen_type)
self.assertEqual(expected_object_types[2], nmea_objects[2].sen_type)
self.assertEqual(expected_object_types[3], nmea_objects[3].sen_type)
self.assertEqual(expected_object_types[4], nmea_objects[4].sen_type)
self.assertEqual(expected_object_types[5], nmea_objects[5].sen_type)
self.assertEqual(expected_object_types[6], nmea_objects[6].sen_type)
self.assertEqual(expected_object_types[7], nmea_objects[7].sen_type)
self.assertEqual(expected_object_types[8], nmea_objects[8].sen_type)
self.assertEqual(expected_object_types[9], nmea_objects[9].sen_type)
def test_read_data_obj_raw(self):
data = """$GPRMC,184332.07,A,1929.459,S,02410.381,E,74.00,16.78,210410,0.0,E,A*2B
$GPGGA,184333.07,1929.439,S,02410.387,E,1,04,2.8,100.00,M,-33.9,M,,0000*65
$GPRMC,184444.08,A,1928.041,S,02410.809,E,74.00,16.78,210410,0.0,E,A*25
$GPGGA,184445.08,1928.021,S,02410.814,E,1,04,2.7,100.00,M,-33.9,M,,0000*6E
$GPGLL,1928.001,S,02410.820,E,184446.08,A,A*79
$GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36
$GPRMC,184448.08,A,1927.962,S,02410.832,E,74.00,16.78,210410,0.0,E,A*26
$GPGGA,184449.08,1927.942,S,02410.838,E,1,04,1.7,100.00,M,-33.9,M,,0000*6C
$GPGLL,1927.922,S,02410.844,E,184450.08,A,A*7B
$GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36
"""
streamer = NMEAStream()
nmea_objects = streamer.get_objects(data=data)
nmea_objects += streamer.get_objects(data='')
expected_object_types = ['GPRMC', 'GPGGA', 'GPRMC', 'GPGGA', 'GPGLL',
'GPVTG', 'GPRMC', 'GPGGA', 'GPGLL', 'GPVTG']
self.assertEqual(expected_object_types[0], nmea_objects[0].sen_type)
self.assertEqual(expected_object_types[1], nmea_objects[1].sen_type)
self.assertEqual(expected_object_types[2], nmea_objects[2].sen_type)
self.assertEqual(expected_object_types[3], nmea_objects[3].sen_type)
self.assertEqual(expected_object_types[4], nmea_objects[4].sen_type)
self.assertEqual(expected_object_types[5], nmea_objects[5].sen_type)
self.assertEqual(expected_object_types[6], nmea_objects[6].sen_type)
self.assertEqual(expected_object_types[7], nmea_objects[7].sen_type)
self.assertEqual(expected_object_types[8], nmea_objects[8].sen_type)
self.assertEqual(expected_object_types[9], nmea_objects[9].sen_type)
def test_read_data_str_raw(self):
data = """$GPRMC,184332.07,A,1929.459,S,02410.381,E,74.00,16.78,210410,0.0,E,A*2B
$GPGGA,184333.07,1929.439,S,02410.387,E,1,04,2.8,100.00,M,-33.9,M,,0000*65
$GPRMC,184444.08,A,1928.041,S,02410.809,E,74.00,16.78,210410,0.0,E,A*25
$GPGGA,184445.08,1928.021,S,02410.814,E,1,04,2.7,100.00,M,-33.9,M,,0000*6E
$GPGLL,1928.001,S,02410.820,E,184446.08,A,A*79
$GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36
$GPRMC,184448.08,A,1927.962,S,02410.832,E,74.00,16.78,210410,0.0,E,A*26
$GPGGA,184449.08,1927.942,S,02410.838,E,1,04,1.7,100.00,M,-33.9,M,,0000*6C
$GPGLL,1927.922,S,02410.844,E,184450.08,A,A*7B
$GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36
"""
streamer = NMEAStream()
nmea_objects = streamer.get_strings(data=data)
nmea_objects += streamer.get_strings(data='')
expected_result = ['GPRMC,184332.07,A,1929.459,S,02410.381,E,74.00,16.78,210410,0.0,E,A*2B',
'GPGGA,184333.07,1929.439,S,02410.387,E,1,04,2.8,100.00,M,-33.9,M,,0000*65',
'GPRMC,184444.08,A,1928.041,S,02410.809,E,74.00,16.78,210410,0.0,E,A*25',
'GPGGA,184445.08,1928.021,S,02410.814,E,1,04,2.7,100.00,M,-33.9,M,,0000*6E',
'GPGLL,1928.001,S,02410.820,E,184446.08,A,A*79',
'GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36',
'GPRMC,184448.08,A,1927.962,S,02410.832,E,74.00,16.78,210410,0.0,E,A*26',
'GPGGA,184449.08,1927.942,S,02410.838,E,1,04,1.7,100.00,M,-33.9,M,,0000*6C',
'GPGLL,1927.922,S,02410.844,E,184450.08,A,A*7B',
'GPVTG,16.78,T,,M,74.00,N,137.05,K,A*36']
self.assertEqual(expected_result, nmea_objects)
``` |
{
"source": "johnglover/pyof",
"score": 2
} |
#### File: johnglover/pyof/setup.py
```python
from distutils.core import setup, Extension
from distutils import sysconfig
import sys
def get_platform():
"""Return the current platform if supported, or 'unsupported' if not."""
if sys.platform[:6] == "darwin":
return "darwin"
else:
return "unsupported"
# check that the current platform is supported
if get_platform() == "unsupported":
print "Error: Cannot currently build on this platform. "
print " Only Mac OS X is supported."
exit(1)
# include directories
includes = """../libs/fmodex/include/
../libs/FreeImage/include/
../libs/freetype/include/
../libs/freetype/include/freetype2/
../libs/glee/include/
../libs/poco/include/
../libs/rtAudio/include/
../libs/openFrameworks/app/
../libs/openFrameworks/communication/
../libs/openFrameworks/events/
../libs/openFrameworks/graphics/
../libs/openFrameworks/sound/
../libs/openFrameworks/utils/
../libs/openFrameworks/video/
../libs/openFrameworks/""".split()
# source files
source_files = """openframeworks/openframeworks.i
../libs/openFrameworks/app/ofAppGlutWindow.cpp
../libs/openFrameworks/app/ofAppRunner.cpp
../libs/openFrameworks/communication/ofArduino.cpp
../libs/openFrameworks/communication/ofSerial.cpp
../libs/openFrameworks/graphics/ofBitmapFont.cpp
../libs/openFrameworks/graphics/ofGraphics.cpp
../libs/openFrameworks/graphics/ofImage.cpp
../libs/openFrameworks/graphics/ofTexture.cpp
../libs/openFrameworks/graphics/ofTrueTypeFont.cpp
../libs/openFrameworks/sound/ofSoundPlayer.cpp
../libs/openFrameworks/sound/ofSoundStream.cpp
../libs/openFrameworks/utils/ofMath.cpp
../libs/openFrameworks/utils/ofUtils.cpp
../libs/openFrameworks/video/ofQtUtils.cpp
../libs/openFrameworks/video/ofVideoGrabber.cpp
../libs/openFrameworks/video/ofVideoPlayer.cpp""".split()
# addons
# addon_source_files = """../addons/ofxThread/src/ofxThread.cpp""".split()
# source_files.extend(addon_source_files)
# set library directories and add frameworks for OS X
if get_platform() == "darwin":
includes.extend("""/System/Library/Frameworks/OpenGL.framework/Headers
/System/Library/Frameworks/GLUT.framework/Headers
/System/Library/Frameworks/AGL.framework/Headers
/System/Library/Frameworks/ApplicationServices.framework/Headers
/System/Library/Frameworks/AudioToolbox.framework/Headers
/System/Library/Frameworks/Carbon.framework/Headers
/System/Library/Frameworks/CoreAudio.framework/Headers
/System/Library/Frameworks/CoreFoundation.framework/Headers
/System/Library/Frameworks/CoreServices.framework/Headers
/System/Library/Frameworks/Quicktime.framework/Headers""".split())
link_args = """-framework OpenGL
-framework GLUT
-framework AGL
-framework ApplicationServices
-framework AudioToolbox
-framework Carbon
-framework CoreAudio
-framework CoreFoundation
-framework CoreServices
-framework Quicktime
../libs/FreeImage/lib/osx/freeimage.a
../libs/freetype/lib/osx/freetype.a
../libs/glee/lib/osx/GLee.a
../libs/rtAudio/lib/osx/rtAudio.a
../libs/fmodex/lib/osx/libfmodex.dylib
../libs/poco/lib/osx/CppUnit.a
../libs/poco/lib/osx/PocoFoundation.a
../libs/poco/lib/osx/PocoNet.a
../libs/poco/lib/osx/PocoUtil.a
../libs/poco/lib/osx/PocoXML.a""".split()
# Remove any 64-bit flags from distutils sysconfig config_vars, the Quicktime
# framework is 32-bit only.
# There doesn't seem to be a nicer way to change these values.
config_vars = sysconfig.get_config_vars()
config_vars['PY_CFLAGS'] = config_vars['PY_CFLAGS'].replace("-arch x86_64 ", "")
config_vars['CFLAGS'] = config_vars['CFLAGS'].replace("-arch x86_64 ", "")
config_vars['BLDSHARED'] = config_vars['BLDSHARED'].replace("-arch x86_64 ", "")
config_vars['LDFLAGS'] = config_vars['LDFLAGS'].replace("-arch x86_64 ", "")
config_vars['LDSHARED'] = config_vars['LDSHARED'].replace("-arch x86_64 ", "")
# Build the extension
doc_lines = __doc__.split("\n")
openframeworks = Extension("openframeworks/_openframeworks",
sources = source_files,
include_dirs = includes,
extra_link_args = link_args,
swig_opts = ['-c++'])
# install setup data_files in the same folder as the module
# from distutils.command.install import INSTALL_SCHEMES
# for scheme in INSTALL_SCHEMES.values():
# scheme['data'] = scheme['platlib']
setup(name='openframeworks',
description = doc_lines[0],
long_description = "\n".join(doc_lines[2:]),
url = 'http://github.com/johnglover/ofpy',
download_url = 'http://github.com/johnglover/ofpy',
license = 'MIT',
author = '<NAME>',
author_email = '<EMAIL>',
platforms = ["Mac OS-X"],
version = '006.2',
ext_modules = [openframeworks],
packages = ['openframeworks'])
``` |
{
"source": "johngncook/red-jake",
"score": 3
} |
#### File: johngncook/red-jake/app_functions.py
```python
import pyttsx3
import datetime
import speech_recognition as sr
import wolframalpha
engine = pyttsx3.init('sapi5')
app_id = 'L73LGW-35VJW9WY6J'
client = wolframalpha.Client(app_id)
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def greet():
"""Greets user depending on time of day."""
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 12:
speak ("Good Morning!")
elif hour >= 12 and hour <18:
speak("Good Afternoon")
else:
speak("Good Evening!")
speak(" I am <NAME>ir. Red Jake. How may I help you today?")
def take_command():
"""Takes microphone input and returns string output."""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1)
audio = r.listen(source)
try:
print("Recognizing....")
query = r.recognize_google(audio)
print(f"User said: {query}\n")
except Exception as e:
#print(e)
print("Say that again please...")
return "None"
return query
``` |
{
"source": "johng/obd",
"score": 2
} |
#### File: tests/integration/omnibolt.py
```python
from bit.network.meta import Unspent
from typing import Optional, Final, Any
import json
from websocket import create_connection
from omnicore_connection import OmnicoreConnection
from omnicore_types import Address
class OmniBolt:
def __init__(self, address_str: str):
self._address_str = address_str
self._websocket = None
self._account = None
self.user_id: Optional[str] = None
self.node_id: Optional[str] = None
self.wallet_address: Optional[Address] = None
def _sent_sync_request(self, message, expected_type=None):
if expected_type is None:
expected_type = message["type"]
str_message = json.dumps(message)
print(str_message)
self._websocket.send(str_message)
raw_result = self._websocket.recv()
result = json.loads(raw_result)
assert (
result["type"] == expected_type
), f"Expecting type={expected_type}, got type={result['type'], }"
print(result)
assert result["status"], f"Invalid response=[{result}]"
return result["result"]
def connect(self):
ws = create_connection(f"ws://{self._address_str}/wsregtest")
open_message = '{"type": -102004}'
ws.send(open_message)
self._websocket = ws
result = ws.recv()
print("Received '%s'" % result)
result = ws.recv()
print("Received '%s'" % result)
d = json.loads(result)
return d["result"]
def login(self, user) -> str:
message = {"type": -102001, "data": {"mnemonic": user, "is_admin": False}}
response = self._sent_sync_request(message)
self.user_id = response["userPeerId"]
self.node_id = response["nodePeerId"]
self.node_address = response["nodeAddress"]
return response
def generate_address(
self,
) -> Address:
message = {"type": -103000}
response = self._sent_sync_request(message)
return Address(
public_key=response["pub_key"],
private_key=response["wif"],
index=response["index"],
address=response["address"],
)
def connect_peer(self, peer: "OmniBolt"):
message = {"type": -102003, "data": {"remote_node_address": peer.node_address}}
return self._sent_sync_request(message)
def _open_channel(
self,
*,
peer_node: "OmniBolt",
funding_public_key: str,
funder_address_index: int,
):
message = {
"type": -100032,
"data": {
"funding_pubkey": funding_public_key,
"funder_address_index": funder_address_index,
},
"recipient_node_peer_id": peer_node.node_id,
"recipient_user_peer_id": peer_node.user_id,
}
response = self._sent_sync_request(message)
return response, self.read_message()
def auto_funding(
self,
*,
peer: "OmniBolt",
temporary_channel_id: str,
btc_amount: float,
property_id: int,
asset_amount: float,
):
message = {
"type": -100134,
"recipient_node_peer_id": peer.node_id,
"recipient_user_peer_id": peer.user_id,
"data": {
"temporary_channel_id": temporary_channel_id,
"btc_amount": btc_amount,
"property_id": property_id,
"asset_amount": asset_amount,
},
}
return (
self._sent_sync_request(message, expected_type=-100340),
self.read_message(),
self.read_message(),
self.read_message(),
self.read_message(),
self.read_message(),
self.read_message(),
)
def get_all_channels(self):
message = {
"type": -103150,
}
return self._sent_sync_request(message)["data"]
def add_invoice(self, property_id: int, amount: int):
response = self.generate_address()
message = {
"type": -100402,
"recipient_node_peer_id": self.node_id,
"recipient_user_peer_id": self.user_id,
"data": {
"h": response.public_key,
"expiry_time": "2120-12-15",
"description": "description",
"property_id": property_id,
"amount": amount,
},
}
self.h_address = response
return self._sent_sync_request(message)
def _get_routing_information(self, invoice: str, peer: "OmniBolt"):
message = {
"type": -100401,
"recipient_node_peer_id": peer.node_id,
"recipient_user_peer_id": peer.user_id,
"data": {"invoice": invoice},
}
return (
self._sent_sync_request(message, expected_type=-100040),
self.read_message(),
self.read_message(),
self.read_message(),
)
def pay_invoice(self, invoice: str, peer: "OmniBolt"):
self._get_routing_information(invoice, peer), peer.read_message()
def read_message(self):
raw_reply = self._websocket.recv()
reply = json.loads(raw_reply)
assert reply["status"], f"Invalid reply=[{reply}]"
return reply["result"]
def create_commitment_transaction(
self,
peer: "OmniBolt",
channel_id: str,
amount: float,
last_temp_address_private_key: str,
curr_temp_address_pub_key: str,
curr_temp_address_index: int,
):
message = {
"type": -100351,
"recipient_node_peer_id": peer.node_id,
"recipient_user_peer_id": peer.user_id,
"data": {
"channel_id": channel_id,
"amount": amount,
"last_temp_address_private_key": last_temp_address_private_key,
"curr_temp_address_index": curr_temp_address_index,
"curr_temp_address_pub_key": curr_temp_address_pub_key,
},
}
return self._sent_sync_request(message)
def open_channel(
self,
peer: "OmniBolt",
omnicore_connection: OmnicoreConnection,
omnicore_item,
funding_btc,
channel_size: int,
):
property_id = omnicore_item["propertyid"]
funding_address = self.generate_address()
open_channel_resp, accept_message = self._open_channel(
peer_node=peer,
funding_public_key=funding_address.public_key,
funder_address_index=funding_address.index,
)
temp_channel_id = open_channel_resp["temporary_channel_id"]
omnicore_connection.send_bitcoin(funding_address.address, 1000000000)
omnicore_connection.mine_bitcoin(50, self.wallet_address.address)
grant_amount = "9999999999.00000000"
omnicore_connection.send_omnicore_token(
funding_btc,
funding_address.address,
omnicore_item,
grant_amount,
)
omnicore_connection.mine_bitcoin(200, self.wallet_address.address)
resp_funding = self.auto_funding(
temporary_channel_id=temp_channel_id,
peer=peer,
btc_amount=0.0004,
property_id=property_id,
asset_amount=channel_size,
)
print("Response funding {}".format(resp_funding))
```
#### File: tests/integration/runner.py
```python
from typing import Optional
from omnibolt import OmniBolt
from omnicore_connection import OmnicoreConnection
import os
class TestRunner:
@staticmethod
def generate_omni_bolt(address: str) -> OmniBolt:
omni_bolt = OmniBolt(address)
user = omni_bolt.connect()
omni_bolt.login(user)
omni_bolt.wallet_address = omni_bolt.generate_address()
return omni_bolt
def __init__(self):
self.omni_bolt_alice = self.generate_omni_bolt(
os.environ.get("OMNI_BOLT_ALICE")
)
self.omni_bolt_bob = self.generate_omni_bolt(os.environ.get("OMNI_BOLT_BOB"))
self.property_id: Optional[int] = None
self.channel_id: Optional[int] = None
def setup_basic_workflow(self, channel_size: int) -> int:
omnicore_connection = OmnicoreConnection()
address_miner = omnicore_connection.generate_bitcoin_address("miner")
address_master_funder = omnicore_connection.generate_bitcoin_address(
"address_master_funder"
)
omnicore_connection.mine_bitcoin(200, address_miner)
omnicore_connection.send_bitcoin(address_master_funder, 100000)
created_omnicore_item = omnicore_connection.generate_omni_token(
address_master_funder, address_miner
)
self.omni_bolt_alice.connect_peer(
self.omni_bolt_bob,
),
grant_amount = "9999999999.00000000"
omnicore_connection.omni_sendgrant(
address_master_funder,
created_omnicore_item,
grant_amount,
)
# Send omnicore currency to Alice
omnicore_connection.mine_bitcoin(20, address_miner)
self.omni_bolt_alice.property_id = int(created_omnicore_item["propertyid"])
self.omni_bolt_bob.property_id = int(created_omnicore_item["propertyid"])
self.omni_bolt_alice.open_channel(
self.omni_bolt_bob,
omnicore_connection,
created_omnicore_item,
address_master_funder,
channel_size,
)
``` |
{
"source": "johngoeltz/pdf2chem",
"score": 3
} |
#### File: johngoeltz/pdf2chem/pdf2chem.py
```python
import chemdataextractor as cde
import cirpy
import time
import sys
import re
import pandas as pd
import os
import textract
from datetime import datetime
# future work - a choice for whether to query 3-letter words
# if curation_type == "Regular (ignores 3-letter words)":
# regex_number = 4
# else:
# regex_number = 3
# curation_type = "Exhaustive (includes 3-letter words)"
pdf_path = os.getcwd()
regex_number = 3
IN_COLAB = "google.colab" in sys.modules
if IN_COLAB:
pdf_method = "pdfminer"
else:
pdf_method = "pdftotext"
print("We\'ll use {} as the pdf extraction method.".format(pdf_method))
# element symbols and false positives hard-coded until stable hosting is found
element_symbols = ['h', 'he', 'li', 'be', 'b', 'c', 'n', 'o', 'f', 'ne', 'na',
'mg', 'al', 'si', 'p', 's', 'cl', 'ar', 'k', 'ca',
'sc', 'ti', 'v', 'cr', 'mn', 'fe', 'co', 'ni', 'cu',
'zn', 'ga', 'ge', 'as', 'se', 'br', 'kr', 'rb', 'sr',
'y', 'zr', 'nb', 'mo', 'tc', 'ru', 'rh', 'pd', 'ag',
'cd', 'in', 'sn', 'sb', 'te', 'i', 'xe', 'cs', 'ba',
'la', 'ce', 'pr', 'nd', 'pm', 'sm', 'eu', 'gd', 'tb',
'dy', 'ho', 'er', 'tm', 'yb', 'lu', 'hf', 'ta', 'w',
're', 'os', 'ir', 'pt', 'au', 'hg', 'tl', 'pb', 'bi',
'po', 'at', 'rn', 'fr', 'ra', 'ac', 'th', 'pa', 'u',
'np', 'pu', 'am', 'cm', 'bk', 'cf', 'es', 'fm', 'md',
'no', 'lr', 'rf', 'db', 'sg', 'bh', 'hs', 'mt', 'ds ',
'rg ', 'cn ', 'nh', 'fl', 'mc', 'lv', 'ts', 'og']
false_positives = ['reno', 'lower', 'format', 'lead', 'nci', 'cc', 'isi',
'doi', "\\'b", 'is', 'ph', 'mv', 'zone', 'based', 'on',
'final', 'kato', 'cm', 'life', 'versus', 'www', 'can',
'ate', 'mm', 'crystal', 'sem', 'an', 's1', 'force', 'may',
'any', 'lau', 'voltage', 'kc', 'mino', 'm. h.', 'set',
'selective', 'c.p.k.', 'same', 'page 10', 'm-1', 'ai',
'c1', 'm2', 'et', 'fulfill', 'dry', 'via', 'may', 'pka',
'any', 'edge', 'b.v.', 'final', 'rt', '2b', 'h.y.', 'y.k.',
'v.v.', 'w.y.', 'good', 'region', 'cycle', 'des', 'force',
'may', 'dsc', 'chcl', 'counter', 'van', 'see', 'best',
'green', 'equal', 'result', 'challenge', 'substance',
'spectrum', 'der', 'its', 'glass', 'all', 'new', 'mix',
'so', 'soc.', 'arm', 'nm', 'ran', 'enable', 'sd', 'saa',
'map', 'ac1', 'fab', 'act', 'b7', 'liu', 'check', 'dual',
'via', 'den', 'fc', 'if', 'rapid', 'san', 'van', 'control',
'see', 'harry', 'adam', 'line', 'ac-1', 'sig', 'recruit',
'bli', 'test', 'tau', 'acs', 'iap', 'box', 'campaign',
'target', 'gfp', 'new', 'cv', 'rt', 'lid', 'compound',
'selective', 'rfb', 'ment', 'est', 'mm', 'con', 'con-',
's4', 'harry', 'ip', 'lp', 'ple', 'ml', 'prone', 'pka',
'sum', 'derivative', 'ten', 'min', 'vortex', 'gradual',
'tot', 'ber', 'red', 'ing', 'para', 'phs', 'gen', 'dft',
'nals', 'enable', 'set', 'versus', 'ma', 'the', 'and',
'eo', 'cps', 'ep', 'are', 'same', 'cos', 'age', 'sem',
's4', 'cycle', 'far', 'cal', 'overall', 'net', 'et', 'ml',
's1', 'prone', 'capture', 'or', 'rise', 'but', 'diurnal',
'dry', 'may', 'of', 'off', 'dp', 'if', 'dants', 'van',
'eden', 'line', 'tx', 'top', 'va', 'per', 'ny', 'on',
'ing', 'cp', 'for', 'dc', 'air', 'nhe', 'gas', 'zonal',
'all', 'new', 'based', 'had', 'ph', 'cm3', 'pyrite', 'soc',
'ser', 'acc', 'res', 'eds', 'mp', 'pro', 'inc', 'im', 'bv',
'disodium', 'ab', 'ed', 'carboxylate', '1mm', 'nat', 'eq',
'acc', 'sci', 'mol', 'int', 'sc-s', 'scs', 'gu', 'atm',
'shi', '2az', 'abbott', 'ms', 'wang', 'pdc', 'franklin',
'bay', 'dess', 'hbd', 'retard', 'intercept', 'iii',
'acid', 'fraction', 'aldrich', 'triton', 'cda', 'cyano',
'vinyl', 'flux', 'ethyl', 'methyl', 'mit', 'trigger',
'accelerate', 'ants', 'pentyl', 'laser', 'india', 'dos',
'los', 'acetyl', 'dec', 'sheets', 'tem', 'dimethyl',
'serial', 'tag', 'tandem', 'trap', 'mic', 'exciton',
'aldehyde', 'combat', 'roi', 'probiotic', 'antiviral',
'cada', 'beam', 'austin', 'lactone', 'lumen', 'diethyl',
'optimal', 'sulfoxide', 'gm3', 'gel', 'blockade', 'omega',
'cubes', 'bin', 'alcohols', 'alcohol', 'benchmark',
'portal', 'matrix', 'apex', 'bacterial', 'cube', 'linker',
'cascade', 'optimum', 'carbonyl oxygen', 'facet', 'shield']
if regex_number == 3:
false_positives = [word for word in false_positives if not
re.search("[a-zA-Z0-9+-]{3}", word) or
re.search("[a-zA-Z0-9+-]{4}", word)]
"""# Define functions"""
def quick_curate(pdf_path, pdf_method, false_positives, regex_number):
# extract the text from the pdf
# the pdf_method should adapt to both local and hosted
# runtime compatibility
text = textract.process(pdf_path, method=pdf_method)
# queue up and reset list used to process the paper
temp_word_list = []
# strip new line and other markup from pdf mining
text = str(text).replace("\\n-", '').replace('\-\n', '')
text = str(text).replace('\-\n-', '').replace('\\n', ' ')
text = str(text).replace('\n', ' ').replace('.', '')
text = str(text).replace('*', "").replace('ISSN', '').replace('NSF', '')
text = str(text).replace('NIH', '').replace("b'", '').replace(r"\r", '')
# split by white spaces
temp_word_list = re.split("\s+", str(text))
# try to remove reference section by cutting off everything after the last
# mention of reference
ref = [i for i, w in enumerate(temp_word_list) if
w.lower().startswith('reference')]
#print(ref)
try:
temp_word_list = temp_word_list[:(ref[-1])]
except Exception as e:
pass
# reconnect any words that got hyphenated and cut off at the end
# of a column
for i, word in enumerate(temp_word_list):
if re.search('[-]+$', word):
temp_word_list[i] = word.replace('-', '') + temp_word_list[i+1]
del(temp_word_list[i+1])
print('The initial list for {} has {} words.'.format(pdf_path,
len(temp_word_list)))
# reconstruct a text string from the cleaned list,
# as cde's NLP works on strings
cleaned_text = ''
for word in temp_word_list:
cleaned_text += word
cleaned_text += ' '
# have cde do NLP on the string and convert the results
# into a list of strings
doc = cde.Document(cleaned_text)
chemicals_all = [span for span in doc.cems]
chem_strings = [str(word).lower().replace('\n', ' ')
for word in chemicals_all]
# remove any blanks or null values
chem_strings = [word for word in chem_strings if word]
# remove anything left with a backslash in it
chem_strings = [word for word in chem_strings if not
re.search('[\\\+]', word)]
print('We\'ll attempt to resolve {} \
potential chemicals.'.format(len(chem_strings)))
# reset lists used for processing query hits and misses
smiles_list = []
already_queried = []
missed_items = []
for item in chem_strings:
# if Sn is found, it's probably tin, not S=C
if item.lower() == "sn":
smiles_list.append('SnH4')
print(item, smiles_list[-1])
continue
# keeping element symbols, such as H, C, or Na
# this may turn into an option
if item in element_symbols:
smiles_list.append(cirpy.resolve(item, 'smiles'))
print(item, smiles_list[-1])
continue
# Future work - include options for user to specify exclusion of
# 3-letter words
# adapt the regex code that leaves out short words/abbreviations
# to the user input above
# if regex_number == 4:
#
# if not re.search("[a-zA-Z0-9+-]{4}", item):
# smiles_list.append(None)
# print('Found a word that\'s a\
# likely false positive: {}'.format(item))
# missed_items.append(item)
# continue
if regex_number == 3:
if not re.search("[a-zA-Z0-9+-]{3}", item):
smiles_list.append(None)
print('Found a word that\'s a likely false \
positive: {}'.format(item))
missed_items.append(item)
continue
# save time by not querying chemicals that are in the text many times
if item in already_queried:
smiles_list.append(None)
print('We\'ve already queried this one: {}'.format(item))
# don't query the chemical if it's a known false positive
# these include author names and a few other odds and ends
elif item.strip('.').strip(',').lower() in false_positives:
smiles_list.append(None)
print('Found one known to be a false positive: {}'.format(item))
# if the item passes all the tests,
# attempt to resolve it via NIH's CIR
else:
try:
smiles_list.append(cirpy.resolve(item, 'smiles'))
print(item, smiles_list[-1])
time.sleep(0.21)
# except loop in here to account for
# internet stability issues and the like
except Exception as e:
try:
print(e)
print('Exception raised. Pausing for \
2 seconds and trying again')
time.sleep(2)
smiles_list.append(cirpy.resolve(item, 'smiles'))
print(smiles_list[-1])
except Exception as e:
try:
print(e)
print('Exception raised. Pausing for another \
2 seconds and trying again')
time.sleep(2)
smiles_list.append(cirpy.resolve(item, 'smiles'))
print(smiles_list[-1])
except Exception as e:
try:
print(e)
print('Exception raised. Pausing for one more \
stretch and trying again')
time.sleep(2)
smiles_list.append(cirpy.resolve(item, 'smiles'))
print(smiles_list[-1])
except Exception as e:
print(e)
print('It still raised an exception. Here\'s \
how far it got:')
print(smiles_list)
print(len(smiles_list))
print('This item will be added to a list \
called missed items.')
print(item)
smiles_list.append('Check')
missed_items.append(item)
already_queried.append(item)
# tidy these up into pandas dataframes and export them as csv files
chem_df = pd.DataFrame(zip(chem_strings, smiles_list),
columns=('Name', 'SMILES'))
chem_df = chem_df.dropna()
chem_df.to_csv(os.path.splitext(pdf_path)[0]+'_'+datetime.today().
strftime('%Y%m%d')+'_names_and_SMILES.csv')
if missed_items:
missed_df = pd.DataFrame(missed_items, columns=['Missed'])
missed_df = missed_df.drop_duplicates()
missed_df.to_csv(os.path.splitext(pdf_path)[0]+'_'+datetime.
today().strftime('%Y%m%d')+'_zzz_missed_items.csv')
def aggregate_csv_files():
# combines all results files into a single csv file
all_chemicals = pd.concat([pd.read_csv(filename) for filename in
os.listdir(pdf_dir) if
re.search('csv$', filename)])
all_chemicals.to_csv(datetime.today().
strftime('%Y%m%d')+"combined_csv.csv",
index=False, encoding='utf-8-sig')
"""# Curate pdfs"""
#@title ## Curator output will appear below
def curate_folder(pdf_dir = os.getcwd()):
"""
Extract known chemicals from a folder of pdf files, and export a
.csv file of SMILESstrings, a machine-readable chemical format
for each file and a combined .csv for all the pdf files.
Extended Summary
----------------
Extract text from a pdf file. Use chemdataextractor's NLP to identify
chemical entities. Attempt to resolve each entity at NIH's CACTVS service.
Organize chemicals recognized by PubChem into a dataframe.
Export the chemical names and SMILES strings as a .csv files
Repeat for each pdf file in the folder
Parameters
----------
pdf_dir : string, optional
path to a folder of pdf files (the default is the current working
directory)
"""
pd.DataFrame(data=None, columns=('Name', 'SMILES'))
assert os.path.exists(pdf_dir), "I did not find the \
directory at, "+str(pdf_dir)
os.chdir(pdf_dir)
for filename in os.listdir(pdf_dir):
if re.search('pdf$', filename):
try:
chemicals = quick_curate(filename, pdf_method,
false_positives, regex_number)
except Exception as e:
print('An exception was raised for ' + filename)
print(e)
try:
aggregate_csv_files()
except Exception as e:
print(e)
"An error occurred while trying to combine the output csv files."
``` |
{
"source": "JohnGoertz/Gumbi",
"score": 3
} |
#### File: Gumbi/gumbi/plotting.py
```python
from __future__ import annotations # Necessary for self-type annotations until Python >3.10
from dataclasses import dataclass
from typing import Callable
import warnings
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.special import logit
from gumbi.aggregation import *
from gumbi.arrays import *
from gumbi.utils import round_to_n, Trigger
__all__ = ['ParrayPlotter']
@dataclass
class ParrayPlotter:
r"""Wrapper for a ``matplotlib.pyplot`` function; adjusts ticks and labels according to plotter settings.
Provides a consistent interface to matplotlib plotting functions that allows easy iteration between permutations
of plotting and tick labeling in natural, transformed, standardized space. When called on a plotting function,
a :class:`ParrayPlotter` instance passes pre-formated x and y (and z) arrays to the function as positional
arguments, along with any additional keyword arguments supplied. :class:`ParrayPlotter` then adjusts tick labels
according to its *\*_tick_scale* arguments.
Passing a ``.t`` or ``.z`` child of a parray automatically overrides the respective *_scale* argument. This is
achieved by inspecting the variable name for a ``'_t'`` or ``'_z'`` suffix, so avoiding using variable names with
those suffixes to avoid confusion. Note that not all permutations of *\*_scale* and *\*_tick_scale* are
permitted: *_tick_scale* should generally either match the respective *_scale* argument or be ``'natural'``.
:class:`ParrayPlotter` also provides a :meth:`colorbar` method that adds a colorbar and reformats its ticks and
labels according to the *z_scale* and *z_tick_scale* attributes.
Parameters
----------
x_pa, y_pa: ParameterArray | LayeredArray | np.ndarray
X and Y arrays. If *z_pa* or *stdzr* are not supplied, x_pa or y_pa must contain a Standardizer instance.
z_pa: ParameterArray | LayeredArray | np.ndarray, optional
Z array for 2D plots. If *stdzr* is not supplied, *z_pa*, *x_pa*, or *y_pa* must contain a Standardizer instance.
stdzr: Standardizer, optional
Standardizer for converting ticks. Only optional if *z_pa*, *x_pa*, or *y_pa* contain a Standardizer instance.
x_scale, y_scale, z_scale : {'natural', 'transformed', 'standardized'}
Space in which to plot respective array. Ignored if array is not a :class:`ParameterArray`.
x_tick_scale, y_tick_scale, z_tick_scale : {'natural', 'transformed', 'standardized'}
Space in which to label ticks for respective axis. Should be 'natural' or match respective *\*_scale* argument.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from gumbi import Standardizer, ParrayPlotter, ParameterArray
>>> stdzr = Standardizer(x = {'μ': -5, 'σ': 0.5},
... y = {'μ': -0.3, 'σ': 0.15},
... z={'μ': 2, 'σ': 2},
... log_vars=['x', 'y'], logit_vars=['z'])
>>> x = np.arange(1, 10, 0.25)
>>> y = np.arange(1, 10, 0.25)
>>> x, y = np.meshgrid(x, y)
>>> z = np.sin(np.sqrt((x - 5) ** 2 + (y - 5) ** 2))**2*0.9+0.05
>>> xyz = ParameterArray(x=x, y=y, z=z, stdzr=stdzr)
Make a natural-space contour plot with user-specified levels
>>> pp = ParrayPlotter(xyz['x'], xyz['y'], xyz['z'])
>>> pp(plt.contour, levels=8)
Use the same :class:`ParrayPlotter` to make a different pseudocolor plot and add a colorbar:
>>> pcm = pp(plt.pcolormesh, shading='gouraud')
>>> cbar = pp.colorbar(pcm, ax=plt.gca())
Make a filled contour plot with *x* plotted in natural-space and *x* tick labels displayed in natural-space,
*y* plotted in transformed space but *y* tick lables displayed in natural-space, and *z* plotted in standardized
space with a colorbar displaying standardized-space tick labels:
>>> pp = ParrayPlotter(xyz['x'], xyz['y'].t, xyz['z'], z_scale='standardized', z_tick_scale='standardized')
>>> cs = pp(plt.contourf)
>>> cbar = pp.colorbar(cs)
"""
x: ParameterArray | LayeredArray | np.ndarray
y: UncertainParameterArray | UncertainArray | ParameterArray | LayeredArray | np.ndarray
z: UncertainParameterArray | UncertainArray | ParameterArray | LayeredArray | np.ndarray = None
stdzr: Standardizer = None
x_scale: str = 'natural'
x_tick_scale: str = 'natural'
y_scale: str = 'natural'
y_tick_scale: str = 'natural'
z_scale: str = 'natural'
z_tick_scale: str = 'natural'
def __post_init__(self):
self.update()
for arr in [self.z, self.y, self.x]:
if self.stdzr is None:
self.stdzr = getattr(arr, 'stdzr', None)
if self.stdzr is None:
raise ValueError('Standardizer must be provided if none of the arrays contain a Standardizer.')
def update(self):
self._update_x()
self._update_y()
if self.z is not None:
self._update_z()
else:
self.zlabel = None
self.z_ = None
def _update_x(self):
self.x_, self.xlabel, self.x_scale = _parse_array(self.x, self.x_scale)
def _update_y(self):
self.y_, self.ylabel, self.y_scale = _parse_array(self.y, self.y_scale)
def _update_z(self):
self.z_, self.zlabel, self.z_scale = _parse_array(self.z, self.z_scale)
def __call__(self, plotter: Callable, **kwargs):
r"""Wrapper for a ``matplotlib.pyplot`` function; adjusts ticks and labels according to plotter settings.
Parameters
----------
plotter: Callable
Plotting function to be wrapped. Must accept at least two or three positional arguments.
**kwargs
Additional keyword arguments passed to wrapped function.
Returns
-------
output
Output of wrapped function
"""
args = [arg for arg in [self.x_, self.y_, self.z_] if arg is not None]
out = plotter(*args, **kwargs)
ax = kwargs.get('ax', plt.gca())
_format_parray_plot_labels(ax, self.stdzr, self.xlabel, self.x_scale, self.x_tick_scale, self.ylabel,
self.y_scale, self.y_tick_scale)
return out
def colorbar(self, mappable=None, cax=None, ax=None, **kwargs):
"""Wrapper for ``matplotlib.pyplot.colorbar``; adjusts ticks and labels according to plotter settings."""
cbar = plt.colorbar(mappable=mappable, cax=cax, ax=ax, **kwargs)
if self.zlabel.endswith('_z') or self.zlabel.endswith('_t'):
self.zlabel = self.zlabel[:-2]
# self.zlabel = self.zlabel.removesuffix('_z').removesuffix('_t') # Use when Python>=3.7
_reformat_tick_labels(cbar, 'c', self.zlabel, self.z_scale, self.z_tick_scale, self.stdzr)
label = _augment_label(self.stdzr, self.zlabel, self.z_tick_scale)
cbar.set_label(label)
return cbar
def plot(self, ci=0.95, ax=None, palette=None, line_kws=None, ci_kws=None):
r"""
Parameters
----------
ci : float or None, default 0.95
Confidence interval on :math:`0<\mathtt{ci}<1`. If None, no confidence intervals will be drawn.
ax : plt.Axes, optional
Axes on which to plot. Defaults to ``plt.gca()``.
palette : str or array-like
Name of seaborn palette or list of colors (at least two) for plotting.
line_kws : dict, optional
Additional keyword arguments passed to ``plt.plot``.
ci_kws : dict, optional
Additional keyword arguments passed to :meth:``plot_ci``.
Returns
-------
ax : plt.Axes
Axes for the plot
"""
if self.z is not None:
raise NotImplementedError('Method "plot" not implemented when z_pa is present.')
line_kws = dict() if line_kws is None else line_kws
ci_kws = dict() if ci_kws is None else ci_kws
palette = sns.cubehelix_palette() if palette is None else palette
palette = sns.color_palette(palette) if type(palette) is str else palette
line_defaults = dict(lw=2, color=palette[-2], zorder=0)
ci_defaults = dict(lw=2, facecolor=palette[1], zorder=-1, alpha=0.5)
line_kws = {**line_defaults, **line_kws} # Fix once Python >= 3.9
ci_kws = {**ci_defaults, **ci_kws} # Fix once Python >= 3.9
ax = plt.gca() if ax is None else ax
ax.plot(self.x_, self.y_, **line_kws)
if ci is not None and hasattr(self.y, 'σ2'):
self.plot_ci(ci=ci, ax=ax, **ci_kws)
_format_parray_plot_labels(ax, self.stdzr, self.xlabel, self.x_scale, self.x_tick_scale, self.ylabel,
self.y_scale, self.y_tick_scale)
return ax
def plot_ci(self, ci=0.95, ci_style='fill', center='median', ax=None, **kwargs):
r"""Plots the confidence interval for an UncertainParameterArray.
Parameters
----------
ci : float or None, default 0.95
Confidence interval on :math:`0<\mathtt{ci}<1`. If None, no confidence intervals will be drawn.
ci_style : {'fill', 'band', 'errorbar', 'bar'}
Whether to plot CI using ``plt.fill_between`` (*fill* or *band*) or ``plt.errorbar`` (*errorbar* or *bar*).
center : {'median', 'mean'}
Which metric to plot as midpoint if using ``plt.errorbar``.
ax : plt.Axes, optional
Axes on which to plot. Defaults to ``plt.gca()``.
**kwargs
Additional keyword arguments passed to ``plt.fill_between`` or ``plt.errorbar``.
Returns
-------
ax : plt.Axes
Axes for the plot
"""
if self.z is not None:
raise NotImplementedError('Method "plot_ci" not supported when z_pa is present.')
if not hasattr(self.y, 'σ2'):
raise NotImplementedError('Method "plot_ci" only supported when y_pa has the "σ2" attribute.')
ax = plt.gca() if ax is None else ax
y, *_ = _parse_uparray(self.y, self.y_scale)
l = y.dist.ppf((1 - ci) / 2)
m = y.dist.ppf(0.5) if center == 'median' else y.μ
u = y.dist.ppf((1 + ci) / 2)
fill_between_styles = ['fill', 'band']
errorbar_styles = ['errorbar', 'bar']
if ci_style in fill_between_styles:
ax.fill_between(self.x_, l, u, **kwargs)
elif ci_style in errorbar_styles:
ax.errorbar(self.x_, m, m-l, u-m, **kwargs)
else:
return ValueError(f'ci_style must be one of {fill_between_styles + errorbar_styles}')
return ax
def _parse_array(array, scale) -> (np.ndarray, str, str):
if isinstance(array, (UncertainParameterArray, UncertainArray)):
array, label, scale = _parse_uparray(array, scale)
array = array.μ
elif isinstance(array, (ParameterArray, LayeredArray)):
array, label, scale = _parse_parray(array, scale)
array = array.values()
else:
array, label, scale = _parse_parray(array, scale)
return array, label, scale
def _parse_parray(pa, scale) -> (ParameterArray | LayeredArray | np.ndarray, str, str):
if isinstance(pa, ParameterArray):
if scale == 'standardized':
array = pa.z
elif scale == 'transformed':
array = pa.t
else:
array = pa
label = pa.names[0]
elif isinstance(pa, LayeredArray):
array = pa
label = pa.names[0]
if pa.names[0].endswith('_z'):
scale = 'standardized'
elif pa.names[0].endswith('_t'):
scale = 'transformed'
else:
array = pa
label = ''
return array, label, scale
def _parse_uparray(upa, scale) -> (UncertainParameterArray | UncertainArray, str, str):
if isinstance(upa, UncertainParameterArray):
if scale == 'standardized':
array = upa.z
elif scale == 'transformed':
array = upa.t
else:
array = upa
elif isinstance(upa, UncertainArray):
if upa.name.endswith('_z'):
scale = 'standardized'
elif upa.name.endswith('_t'):
scale = 'transformed'
array = upa
else:
raise TypeError('Array must be either an UncertainParameterArray or an UncertainArray.')
label = upa.name
return array, label, scale
def _format_parray_plot_labels(ax, stdzr, xlabel, x_scale, x_tick_scale, ylabel, y_scale, y_tick_scale):
if xlabel.endswith('_z') or xlabel.endswith('_t'):
xlabel = xlabel[:-2]
if ylabel.endswith('_z') or ylabel.endswith('_t'):
ylabel = ylabel[:-2]
# xlabel = xlabel.removesuffix('_z').removesuffix('_t') # Use when Python>=3.9
# ylabel = ylabel.removesuffix('_z').removesuffix('_t') # Use when Python>=3.9
_reformat_tick_labels(ax, 'x', xlabel, x_scale, x_tick_scale, stdzr)
_reformat_tick_labels(ax, 'y', ylabel, y_scale, y_tick_scale, stdzr)
label = _augment_label(stdzr, xlabel, x_tick_scale)
ax.set_xlabel(label)
label = _augment_label(stdzr, ylabel, y_tick_scale)
ax.set_ylabel(label)
def _augment_label(stdzr, label, tick_scale):
prefixes = {np.log: 'log ', logit: 'logit '}
transform = stdzr.transforms.get(label, [None])[0]
prefix = prefixes.get(transform, '') if tick_scale in ['transformed', 'standardized'] else ''
suffix = ' (standardized)' if tick_scale == 'standardized' else ''
return f'{prefix}{label}{suffix}'
def _reformat_tick_labels(ax, axis, name, current, new, stdzr, sigfigs=3):
tick_setters = {
# ('natural', 'standardized'): _n_ticks_z_labels,
# ('natural', 'transformed'): _n_ticks_t_labels,
('standardized', 'natural'): _z_ticks_n_labels,
('transformed', 'natural'): _t_ticks_n_labels,
}
if current != new:
tpl = (current, new)
if tpl not in tick_setters:
raise ValueError('Cannot convert ticks between {0} and {1}'.format(*tpl))
else:
tick_setter = tick_setters[tpl]
tick_setter(ax, axis, stdzr, name, sigfigs=sigfigs)
def _get_ticks_setter(ax, axis):
if axis == 'x':
ticks = ax.get_xticks()
set_ticks = ax.set_xticks
set_labels = ax.set_xticklabels
elif axis == 'y':
ticks = ax.get_yticks()
set_ticks = ax.set_yticks
set_labels = ax.set_yticklabels
elif axis == 'z':
ticks = ax.get_zticks()
set_ticks = ax.set_zticks
set_labels = ax.set_zticklabels
elif axis == 'c':
ticks = ax.get_ticks()
set_ticks = ax.set_ticks
set_labels = ax.set_ticklabels
def setter(*args, **kwargs):
# TODO: Find a better way to set tick labels
# Setting only labels throws a FixedLocator warning, but setting ticks first extends the plot area excessively
with warnings.catch_warnings():
warnings.simplefilter("ignore")
set_labels(*args, **kwargs)
# set_ticks(ticks)
# set_labels(*args, **kwargs)
return ticks, setter
def _get_label_setter(ax, axis):
if axis == 'x':
set_label = ax.set_xlabel
elif axis == 'y':
set_label = ax.set_ylabel
elif axis == 'z':
set_label = ax.set_zlabel
elif axis == 'c':
set_label = ax.set_label
return set_label
def _n_ticks_z_labels(ax, axis, stdzr, name, sigfigs=3):
ticks, set_ticklabels = _get_ticks_setter(ax, axis)
new_ticks = stdzr.stdz(name, ticks)
new_ticks = round_to_n(new_ticks, sigfigs)
set_ticklabels(new_ticks)
def _n_ticks_t_labels(ax, axis, stdzr, name, sigfigs=3):
ticks, set_ticklabels = _get_ticks_setter(ax, axis)
new_ticks = stdzr.transform(name, ticks)
new_ticks = round_to_n(new_ticks, sigfigs)
set_ticklabels(new_ticks)
def _z_ticks_n_labels(ax, axis, stdzr, name, sigfigs=3):
ticks, set_ticklabels = _get_ticks_setter(ax, axis)
new_ticks = stdzr.unstdz(name, ticks)
new_ticks = round_to_n(new_ticks, sigfigs)
set_ticklabels(new_ticks)
def _t_ticks_n_labels(ax, axis, stdzr, name, sigfigs=3):
ticks, set_ticklabels = _get_ticks_setter(ax, axis)
new_ticks = stdzr.untransform(name, ticks)
new_ticks = round_to_n(new_ticks, sigfigs)
set_ticklabels(new_ticks)
```
#### File: gumbi/regression/base.py
```python
import warnings
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from scipy.interpolate import interpn
from itertools import product
from gumbi.utils.misc import assert_in, assert_is_subset
from gumbi.utils.gp_utils import get_ℓ_prior
from gumbi.aggregation import DataSet
from gumbi.arrays import *
from gumbi.arrays import ParameterArray as parray
from gumbi.arrays import UncertainParameterArray as uparray
from gumbi.arrays import MVUncertainParameterArray as mvuparray
__all__ = ['Regressor']
class Regressor(ABC):
r"""Surface learning and prediction.
A Regressor is built from a dataframe in the form of a :class:`DataSet` object. This is stored as
:attr:`tidy`. The model inputs are constructed by filtering this dataframe, extracting column values, and
converting these to numerical input coordinates. Each subclass defines at least `build_model`, `fit`, and `predict_points`
methods in addition to subclass-specific methods.
Dimensions fall into several categories:
* Filter dimensions, those with only one level, are used to subset the dataframe but are not included as explicit
inputs to the model. These are not specified explicitly, but rather any continuous or categorical dimension with only one
level is treated as a filter dimension.
* Continuous dimensions are treated as explicit coordinates and given a Radial Basis Function kernel
* Linear dimensions (which must be a subset of `continuous_dims`) have an additional linear kernel.
* Coregion dimensions imply a distinct but correlated output for each level
* If more than one output is specified, ``self.out_col`` is treated as a categorical dim.
Parameters
----------
dataset : DataSet
Data for fitting.
outputs : str or list of str, default None
Name(s) of output(s) to learn. If ``None``, uses all values from ``outputs`` attribute of *dataset*.
seed : int
Random seed
Attributes
----------
data : DataSet
Data for fitting.
outputs : list of str, optional
Name(s) of output(s) to learn.
seed : int
Random seed
continuous_dims : list of str
Columns of dataframe used as continuous dimensions
linear_dims : list of str
Subset of continuous dimensions to apply an additional linear kernel.
continuous_levels : dict
Values considered within each continuous column as ``{dim: [level1, level2]}``
continuous_coords : dict
Numerical coordinates of each continuous level within each continuous dimension as ``{dim: {level: coord}}``
categorical_dims : list of str
Columns of dataframe used as categorical dimensions
categorical_levels : dict
Values considered within each categorical column as ``{dim: [level1, level2]}``
categorical_coords : dict
Numerical coordinates of each categorical level within each categorical dimension as ``{dim: {level: coord}}``
additive : bool
Whether to treat categorical dimensions as additive or joint
filter_dims : dict
Dictionary of column-value pairs used to filter dataset before fitting
X : array
A 2D tall array of input coordinates.
y : array
A 1D vector of observations
"""
def __init__(self, dataset: DataSet, outputs=None, seed=2021):
if not isinstance(dataset, DataSet):
raise TypeError('Learner instance must be initialized with a DataSet object')
self.data = dataset
self.stdzr = dataset.stdzr
outputs = outputs if outputs is not None else dataset.outputs
self.outputs = outputs if isinstance(outputs, list) else [outputs]
self.out_col = dataset.names_column
self.seed = seed
self.continuous_dims = []
self.linear_dims = []
self.continuous_levels = {}
self.continuous_coords = {}
self.categorical_dims = []
self.categorical_levels = {}
self.categorical_coords = {}
self.additive = False
self.model_specs = {}
self.X = None
self.y = None
self.grid_vectors = None
self.grid_parray = None
self.grid_points = None
self.ticks = None
self.predictions = None
################################################################################
# Model building and fitting
################################################################################
@abstractmethod
def fit(self, *args, **kwargs):
"""Defined by subclass
See Also
--------
:meth:`GP.fit`
:meth:`GLM.fit`
"""
pass
@abstractmethod
def build_model(self, *args, **kwargs):
"""Defined by subclass
See Also
--------
:meth:`GP.build_model`
:meth:`GLM.build_model`
"""
pass
################################################################################
# Properties and convenience methods
################################################################################
def parray(self, **kwargs) -> parray:
"""Creates a parray with the current instance's stdzr attached"""
return parray(stdzr=self.stdzr, **kwargs)
def uparray(self, name: str, μ: np.ndarray, σ2: np.ndarray, **kwargs) -> uparray:
"""Creates a uparray with the current instance's stdzr attached"""
return uparray(name, μ, σ2, stdzr=self.stdzr, **kwargs)
def mvuparray(self, *uparrays, cor, **kwargs) -> mvuparray:
"""Creates a uparray with the current instance's stdzr attached"""
return mvuparray(*uparrays, cor=cor, stdzr=self.stdzr, **kwargs)
@property
def dims(self) -> list:
"""List of all dimensions under consideration"""
return self.continuous_dims + self.categorical_dims
@property
def levels(self) -> dict:
"""Dictionary of values considered within each dimension as ``{dim: [level1, level2]}``"""
return {**self.continuous_levels, **self.categorical_levels} # Fix once Python >= 3.9
@property
def coords(self) -> dict:
""" Dictionary of numerical coordinates of each level within each dimension as ``{dim: {level: coord}}``"""
return {**self.continuous_coords, **self.categorical_coords} # Fix once Python >= 3.9
################################################################################
# Preprocessing
################################################################################
def specify_model(self, outputs=None, linear_dims=None, continuous_dims=None, continuous_levels=None, continuous_coords=None,
categorical_dims=None, categorical_levels=None, additive=False):
"""Checks for consistency among dimensions and levels and formats appropriately.
Parameters
----------
outputs : str or list of str, default None
Name(s) of output(s) to learn. If ``None``, :attr:`outputs` is used.
linear_dims : str or list of str, optional
Subset of continuous dimensions to apply an additional linear kernel. If ``None``, defaults to ``['Y','X']``.
continuous_dims : str or list of str, optional
Columns of dataframe used as continuous dimensions
continuous_levels : str, list, or dict, optional
Values considered within each continuous column as ``{dim: [level1, level2]}``
continuous_coords : list or dict, optional
Numerical coordinates of each continuous level within each continuous dimension as ``{dim: {level: coord}}``
categorical_dims : str or list of str, optional
Columns of dataframe used as categorical dimensions
categorical_levels : str, list, or dict, optional
Values considered within each categorical column as ``{dim: [level1, level2]}``
additive : bool, default False
Whether to treat categorical_dims as additive or joint (default)
Returns
-------
self : :class:`GP`
"""
# Ensure output is valid and format as list
outputs = outputs if outputs is not None else self.outputs
assert_is_subset(self.out_col, outputs, self.data.outputs)
self.outputs = outputs if isinstance(outputs, list) else [outputs]
# Ensure dimensions are valid and format as list
self.continuous_dims = self._parse_dimensions(continuous_dims)
self.linear_dims = self._parse_dimensions(linear_dims)
self.categorical_dims = self._parse_dimensions(categorical_dims)
if set(self.categorical_dims) & set(self.continuous_dims) != set():
raise ValueError('Overlapping items in categorical_dims and continuous_dims')
# Ensure levels are valid and format as dict
self.continuous_levels = self._parse_levels(self.continuous_dims, continuous_levels)
self.categorical_levels = self._parse_levels(self.categorical_dims, categorical_levels)
# Add self.out_col to the end of the categorical list
self.categorical_dims += [self.out_col]
self.categorical_levels[self.out_col] = self.outputs
# Move dims with only one level to separate list
self.filter_dims = {}
for dim in self.dims:
levels = self.levels[dim]
if len(levels) == 1:
self.filter_dims[dim] = levels
self.continuous_dims = [d for d in self.continuous_dims if d != dim]
self.categorical_dims = [d for d in self.categorical_dims if d != dim]
self.continuous_levels = {d: l for d, l in self.continuous_levels.items() if d != dim}
self.categorical_levels = {d: l for d, l in self.categorical_levels.items() if d != dim}
# Ensure coordinates are valid and format as dict-of-dicts
self.continuous_coords = self._parse_coordinates(self.continuous_dims, self.continuous_levels, continuous_coords)
self.categorical_coords = self._parse_coordinates(self.categorical_dims, self.categorical_levels, None)
# Add 'X' and 'Y' to the beginning of the continuous list
# if 'Y' not in self.continuous_dims:
# self.continuous_dims = ['Y'] + self.continuous_dims
# if 'X' not in self.continuous_dims:
# self.continuous_dims = ['X'] + self.continuous_dims
# self.continuous_levels | {dim: self.tidy.tidy[dim].unique() for dim in ['X', 'Y']} | self.continuous_levels}
# self.continuous_coords | {dim: {level: level for level in self.continuous_levels[dim]} for dim in ['X', 'Y']} | self.continuous_coords}
assert_is_subset('continuous dimensions', self.linear_dims, self.continuous_dims)
self.additive = additive
return self
def _parse_dimensions(self,
dims: None or str or list) -> list:
"""Ensure dimensions are possible and formatted as list"""
if dims is not None:
assert self.out_col not in dims
dims = dims if isinstance(dims, list) else [dims]
assert_is_subset('columns', dims, self.data.tidy.columns)
else:
dims = []
return dims
def _parse_levels(self, dims: list, levels: None or str or list or dict) -> dict:
"""Perform consistency checks between dimensions and levels and format `levels` as dict"""
if len(dims) != 0:
if levels is None:
# Use all levels of all dims
levels = {dim: list(self.data.tidy[dim].unique()) for dim in dims}
elif any(isinstance(levels, typ) for typ in [str, list]):
# If only a single dim is supplied, convert levels to dictionary
assert len(dims) == 1, 'Non-dict argument for `levels` only allowed if `len(dims)==1`'
levels = levels if isinstance(levels, list) else [levels]
levels = {dims[0]: levels}
elif isinstance(levels, dict):
# Ensure levels are specified as lists
for d, v in levels.items():
if not isinstance(v, list):
levels[d] = [v]
# Ensure each dimension specified by levels is valid
bad = [dim for dim in levels.keys() if dim not in dims]
if bad:
raise KeyError(f'Dimensions {bad} specified in *levels not found in *dims')
# Ensure each level is valid
bad = {k: v for k, vs in levels.items() for v in vs if v not in self.data.tidy[k].unique()}
if bad:
raise ValueError(f'Values specified in *levels not found in tidy: {bad}')
# Use all levels of remaining dims
levels.update({dim: list(self.data.tidy[dim].unique()) for dim in dims if dim not in levels.keys()}) # Fix once Python >= 3.9
else:
raise TypeError('`levels` must be of type str, list, or dict')
for dim in dims:
assert_is_subset(f'data[{dim}]', levels[dim], self.data.tidy[dim])
else:
levels = {}
return levels
def _parse_coordinates(self, dims: list, levels: dict, coords: None or list or dict) -> dict:
"""Check for consistency between supplied dims/levels/coords or generate coords automatically"""
if coords is not None:
if isinstance(coords, dict):
# Ensure all dim-level pairs in ``levels`` and ``coords`` match exactly
level_tuples = [(dim, level) for dim, levels_list in levels.items() for level in levels_list]
coord_tuples = [(dim, level) for dim, coord_dict in coords.items() for level in coord_dict.keys()]
assert_is_subset('coordinates', coord_tuples, level_tuples)
assert_is_subset('coordinates', level_tuples, coord_tuples)
elif isinstance(coords, list):
assert len(levels.keys()) == 1, \
'Non-dict argument for `continuous_coords` only allowed if `len(continuous_dims)==1`'
dim = dims[0]
assert len(coords) == len(levels[dim])
coords = {dim: {level: coord for level, coord in zip(levels[dim], coords)}}
else:
raise TypeError('Coordinates must be of type list or dict')
if not all(isinstance(coord, (int, float))
for coord_dict in coords.values()
for coord in coord_dict.values()):
raise TypeError('Coordinates must be numeric')
elif dims is not None and levels is not None:
coords = {dim: self._make_coordinates(dim, levels_list) for dim, levels_list in levels.items()}
else:
coords = {}
return coords
def _make_coordinates(self, dim: str, levels_list: list) -> dict:
"""Generate numerical coordinates for each level in each dim under consideration"""
df = self.data.tidy
col = df[df[dim].isin(levels_list)][dim]
if col.dtype in [np.float32, np.float64, np.int32, np.int64]:
coords = {level: level for level in levels_list}
else:
coords = {level: col.astype('category').cat.categories.to_list().index(level) for level in levels_list}
return coords
def get_filtered_data(self, standardized=False, metric='mean'):
"""The portion of the dataset under consideration
A filter is built by comparing the values in the unstandardized dataframe with those in :attr:`filter_dims`,
:attr:`categorical_levels`, and :attr:`continuous_levels`, then the filter is applied to the standardized or
unstandardized dataframe as indicated by the `standardized` input argument.
Parameters
----------
standardized : bool, default True
Whether to return a subset of the raw tidy or the centered and scaled tidy
metric : str, default 'mean'
Which summary statistic to return (must be a value in the `Metric` column)
Returns
-------
tidy : pd.DataFrame
"""
df = self.data.tidy
allowed = df.isin(self.filter_dims)[self.filter_dims.keys()].all(axis=1)
if 'Metric' in df.columns:
assert_in('Metric', metric, self.data.tidy['Metric'].unique())
allowed &= df['Metric'] == metric
for dim, levels in self.levels.items():
allowed &= df[dim].isin(levels)
return df[allowed] if not standardized else self.data.tidy.z[allowed]
def get_structured_data(self, metric='mean'):
"""Formats input data and observations as parrays
Parameters
----------
metric : str, default 'mean'
Which summary statistic to return (must be a value in the `Metric` column)
Returns
-------
X : parray
A multilayered column vector of input coordinates.
y : parray
A multilayered (1D) vector of observations
See Also
--------
:meth:`get_filtered_data`
"""
df = self.get_filtered_data(standardized=False, metric=metric)
# Ensure same number of observations for every output (only possible if something broke)
assert len(set(sum(df[self.out_col] == output) for output in self.outputs)) == 1
# Assuming all parameters observed at the same points
# Extract the model dimensions from the dataframe for one of the parameters
dims = set(self.dims) - set([self.out_col])
dim_values = {dim: df[df[self.out_col] == self.outputs[0]].replace(self.coords)[dim].values for dim in dims}
X = self.parray(**dim_values, stdzd=False)
# List of parrays for each output
outputs = {output: df[df[self.out_col] == output]['Value'].values for output in self.outputs}
y = self.parray(**outputs, stdzd=False)
return X, y
def get_shaped_data(self, metric='mean'):
"""Formats input data and observations as plain numpy arrays
Parameters
----------
metric : str, default 'mean'
Which summary statistic to return (must be a value in the `Metric` column)
Returns
-------
X : np.ndarray
A tall matrix of input coordinates with shape (n_obs, n_dims).
y : np.ndarray
A (1D) vector of observations
See Also
--------
:meth:`get_filtered_data`
"""
self.X, self.y = self.get_structured_data(metric=metric)
# Convert ParameterArray into plain numpy tall array
if self.out_col in self.dims:
ordered_outputs = {k: v for k, v in sorted(self.coords[self.out_col].items(), key=lambda item: item[1])}
y = np.hstack([self.y.z[output+'_z'].values() for output in ordered_outputs.keys()])
X = self.X[:, None] # convert to column vector
X = parray.vstack([X.add_layers(**{self.out_col: coord}) for coord in ordered_outputs.values()])
X = np.atleast_2d(np.column_stack([X[dim].z.values().squeeze() for dim in self.dims]))
else:
y = self.y.z.values().squeeze()
X = np.atleast_2d(np.column_stack([self.X[dim].z.values().squeeze() for dim in self.dims]))
return X, y
################################################################################
# Prediction
################################################################################
@abstractmethod
def predict(self, points_array, with_noise=True, **kwargs):
"""Defined by subclass.
It is not recommended to call :meth:`predict` directly, since it requires a very specific formatting for inputs,
specifically a tall array of standardized coordinates in the same order as :attr:`dims`. Rather, one of the
convenience functions :meth:`predict_points` or :meth:`predict_grid` should be used, as these have a more
intuitive input structure and format the tidy appropriately prior to calling :meth:`predict`.
See Also
--------
:meth:`GP.predict`
:meth:`GLM.predict`
Returns
-------
prediction_mean, prediction_var : list of np.ndarray
Mean and variance of predictions at each supplied points
"""
pass
def _check_has_prediction(self):
"""Does what it says on the tin"""
if self.predictions is None:
raise ValueError('No predictions found. Run self.predict_grid or related method first.')
def _parse_prediction_output(self, output):
if self.out_col in self.categorical_dims:
# Multiple parameters are possible, determine which ones to predict
if output is None:
# predict all parameters in model
output = self.categorical_levels[self.out_col]
elif isinstance(output, list):
assert_is_subset('Outputs', output, self.categorical_levels[self.out_col])
elif isinstance(output, str):
output = [output]
assert_is_subset('Outputs', output, self.categorical_levels[self.out_col])
else:
raise ValueError('"output" must be list, string, or None')
else:
# If self.out_col is not in categorical_dims, it must be in filter_dims, and only one is possible
output = self.filter_dims[self.out_col]
return output
def _prepare_points_for_prediction(self, points: ParameterArray, output):
points = np.atleast_1d(points)
assert points.ndim == 1
assert set(self.dims) - set([self.out_col]) == set(points.names), \
'All model dimensions must be present in "points" parray.'
if self.out_col in self.categorical_dims:
# Multiple parameters are possible, determine which ones to predict
# Get model coordinates for each output to be predicted
param_coords = [self.categorical_coords[self.out_col][p] for p in output]
# Convert input points to tall array and tile once for each output, adding the respective coordinate
tall_points = parray.vstack([points.add_layers(**{self.out_col: coord})[:, None] for coord in param_coords])
else:
# If self.out_col is not in categorical_dims, it must be in filter_dims, and only one is possible
# Convert input points to tall array
param_coords = None
tall_points = points[:, None]
# Combine standardized coordinates into an ordinary tall numpy array for prediction
points_array = np.hstack([tall_points[dim].z.values() for dim in self.dims])
return points_array, tall_points, param_coords
def predict_points(self, points, output=None, with_noise=True, **kwargs):
"""Make predictions at supplied points
Parameters
----------
points : ParameterArray
1-D ParameterArray vector of coordinates for prediction, must have one layer per ``self.dims``
output : str or list of str, optional
Variable for which to make predictions
with_noise : bool, default True
Whether to incorporate aleatoric uncertainty into prediction error
**kwargs
Additional keyword arguments passed to subclass-specific :meth:`predict` method
Returns
-------
prediction : UncertainParameterArray
Predictions as a `uparray`
"""
output = self._parse_prediction_output(output)
points_array, tall_points, param_coords = self._prepare_points_for_prediction(points, output=output)
# Prediction means and variance as a list of numpy vectors
pred_mean, pred_variance = self.predict(points_array, with_noise=with_noise, **kwargs)
self.predictions_X = points
# Store predictions in appropriate structured array format
if len(output) == 1:
# Predicting one output, return an UncertainParameterArray
self.predictions = self.uparray(output[0], pred_mean, pred_variance, stdzd=True)
else:
# Predicting multiple parameters, return an MVUncertainParameterArray
# First split prediction into UncertainParameterArrays
uparrays = []
for i, name in enumerate(output):
idx = (tall_points[self.out_col].values() == param_coords[i]).squeeze()
μ = pred_mean[idx]
σ2 = pred_variance[idx]
uparrays.append(self.uparray(name, μ, σ2, stdzd=True))
# Calculate the correlation matrix from the hyperparameters of the coregion kernel
W = self.MAP[f'W_{self.out_col}'][param_coords, :]
κ = self.MAP[f'κ_{self.out_col}'][param_coords]
B = W @ W.T + np.diag(κ) # covariance matrix
D = np.atleast_2d(np.sqrt(np.diag(B))) # standard deviations
cor = B / (D.T @ D) # correlation matrix
# Store predictions as MVUncertainParameterArray
self.predictions = self.mvuparray(*uparrays, cor=cor)
return self.predictions
def prepare_grid(self, limits=None, at=None, resolution=100):
"""Prepare unobserved input coordinates for specified continuous dimensions.
Parameters
----------
limits : ParameterArray
List of min/max values as a single parray with one layer for each of a subset of `continuous_dims`.
at : ParameterArray
A single parray of length 1 with one layer for each remaining `continuous_dims` by name.
ticks : dict
resolution : dict or int
Number of points along each dimension, either as a dictionary or one value applied to all dimensions
Returns
-------
"""
# Remove any previous predictions to avoid confusion
self.predictions = None
self.predictions_X = None
##
## Check inputs for consistency and completeness
##
# Ensure "at" is supplied correctly
if at is None:
at = self.parray(none=[])
elif not isinstance(at, ParameterArray):
raise TypeError('"at" must be a ParameterArray')
elif at.ndim != 0:
raise ValueError('"at" must be single point, potentially with multiple layers')
# Ensure a grid can be built
at_dims = set(at.names)
continuous_dims = set(self.continuous_dims)
limit_dims = continuous_dims-at_dims
# If there are no remaining dimensions
if limit_dims == set():
raise ValueError('At least one dimension must be non-degenerate to generate grid.')
# If no limits are supplied
if limits is None:
# Fill limits with default values
limits = self.parray(**{dim: [-2.5, +2.5] for dim in self.dims if dim in limit_dims}, stdzd=True)
else:
# Append default limits to `limits` for unspecified dimensions
if not isinstance(limits, ParameterArray):
raise TypeError('"limits" must be a ParameterArray')
remaining_dims = limit_dims-set(limits.names)
if remaining_dims:
dflt_limits = self.parray(**{dim: [-2.5, +2.5] for dim in remaining_dims}, stdzd=True)
limits = limits.add_layers(**dflt_limits.as_dict())
# Ensure all dimensions are specified without conflicts
limit_dims = set(limits.names)
if limit_dims.intersection(at_dims):
raise ValueError('Dimensions specified via "limits" and in "at" must not overlap.')
elif not continuous_dims.issubset(at_dims.union(limit_dims)-set(['none'])):
raise ValueError('Not all continuous dimensions are specified by "limits" or "at".')
# Format "resolution" as dict if necessary
if isinstance(resolution, int):
resolution = {dim: resolution for dim in self.continuous_dims}
elif not isinstance(resolution, dict):
raise TypeError('"resolution" must be a dictionary or an integer')
else:
assert_is_subset('continuous dimensions', resolution.keys(), self.continuous_dims)
##
## Build grids
##
# Store a dictionary with one 1-layer 1-D parray for the grid points along each dimension
# Note they may be different sizes
grid_vectors = {dim:
self.parray(
**{dim: np.linspace(*limits[dim].z.values(), resolution[dim])[:, None]},
stdzd=True)
for dim in limit_dims}
# Create a single n-layer n-dimensional parray for all evaluation points
grids = np.meshgrid(*[grid_vectors[dim] for dim in self.dims if dim in limit_dims])
grid_parray = self.parray(**{array.names[0]: array.values() for array in grids})
# Add values specified in "at" to all locations in grid_parray
if at.names != ['none']:
at_arrays = {dim: np.full(grid_parray.shape, value) for dim, value in at.as_dict().items()}
grid_parray = grid_parray.add_layers(**at_arrays)
# Store dimensions along which grid was formed, ensuring the same order as self.dims
self.prediction_dims = [dim for dim in self.dims if dim in limit_dims]
self.grid_vectors = grid_vectors
self.grid_parray = grid_parray
self.grid_points = self.grid_parray.ravel()
return grid_parray
def predict_grid(self, output=None, categorical_levels=None, with_noise=True, **kwargs):
"""Make predictions and reshape into grid.
If the model has :attr:`categorical_dims`, a specific level for each dimension must be specified as key-value
pairs in `categorical_levels`.
Parameters
----------
output : str or list of str, optional
Variable(s) for which to make predictions
categorical_levels : dict, optional
Level for each :attr:`categorical_dims` at which to make prediction
with_noise : bool, default True
Whether to incorporate aleatoric uncertainty into prediction error
Returns
-------
prediction : UncertainParameterArray
Predictions as a grid with len(:attr:`continuous_dims`) dimensions
"""
if self.grid_points is None:
raise ValueError('Grid must first be specified with `prepare_grid`')
points = self.grid_points
if self.categorical_dims:
points = self.append_categorical_points(points, categorical_levels=categorical_levels)
self.predict_points(points, output=output, with_noise=with_noise, **kwargs)
self.predictions = self.predictions.reshape(self.grid_parray.shape)
self.predictions_X = self.predictions_X.reshape(self.grid_parray.shape)
return self.predictions
def append_categorical_points(self, continuous_parray, categorical_levels):
"""Appends coordinates for the supplied categorical dim-level pairs to tall array of continuous coordinates.
Parameters
----------
continuous_points : ParameterArray
Tall :class:`ParameterArray` of coordinates, one layer per continuous dimension
categorical_levels : dict
Single level for each :attr:`categorical_dims` at which to make prediction
Returns
-------
points : ParameterArray
Tall `ParameterArray` of coordinates, one layer per continuous and categorical dimension
"""
if categorical_levels is not None:
if set(categorical_levels.keys()) != (set(self.categorical_dims) - set([self.out_col])):
raise AttributeError('Must specify level for every categorical dimension')
points = continuous_parray.fill_with(**{dim: self.categorical_coords[dim][level]
for dim, level in categorical_levels.items()})
else:
points = continuous_parray
return points
################################################################################
# Proposals
################################################################################
def propose(self, target, acquisition='EI'):
"""Bayesian Optimization with Expected Improvement acquisition function"""
if self.predictions is None:
raise ValueError('No predictions to make proposal from!')
assert_in(acquisition, ['EI', 'PD'])
output = self.predictions.name
df = self.get_filtered_data(standardized=False)
df = df[df[self.out_col] == output]
observed = self.parray(**{output: df['Values']}, stdzd=False)
target = self.parray(**{output: target}, stdzd=False)
best_yet = np.min(np.sqrt(np.mean(np.square(observed.z - target.z))))
if acquisition == 'EI':
self.proposal_surface = self.predictions.z.EI(target.z, best_yet.z)
elif acquisition == 'PD':
self.proposal_surface = self.predictions.z.nlpd(target.z)
self.proposal_idx = np.argmax(self.proposal_surface)
self.proposal = self.predictions_X.ravel()[self.proposal_idx]
return self.proposal
################################################################################
# Evaluation
################################################################################
def cross_validate(self, unit=None, *, n_train=None, pct_train=None, train_only=None, warm_start=True, seed=None,
errors='natural', **MAP_kws):
"""Fits model on random subset of tidy and evaluates accuracy of predictions on remaining observations.
This method finds unique combinations of values in the columns specified by ``dims``, takes a random subset of
these for training, and evaluates the predictions made for the remaining tidy.
Notes
-----
:meth:`cross_validate` is *reproducibly random* by default. In order to evaluate different test/train subsets of
the same size, you will need to set the `seed` explicitly.
Specifying *unit* changes the interpretation of *n_train* and *pct_train*: rather than the number
or fraction of all individual observations to be included in the training set, these now represent the number
of distinct entities in the *unit* column from the wide-form dataset.
Criteria in *train_only* are enforced before grouping observations by *unit*. If *train_only* and *unit* are
both specified, but the *train_only* criteria encompass only some observations of a given entity in *unit*,
this could lead to expected behavior.
Similarly, if *warm_start* and *unit* are both specified, but a given entity appears in multiple categories
from any of the :attr:`categorical_dims`, this could lead to expected behavior. It is recommended to set
*warm_start* to `False` if this is the case.
Parameters
----------
unit : list of str
Columns from which to take unique combinations as training and testing sets. This could be useful when the
data contains multiple (noisy) observations for each of several distinct entities.
n_train : int, optional
Number of training points to use. Exactly one of `n_train` and `pct_train` must be specified.
pct_train : float, optional
Percent of training points to use. Exactly one of `n_train` and `pct_train` must be specified.
train_only : dict, optional
Specifications for observations to be always included in the training set. This will select all rows of the
wide-form dataset which *exactly* match *all* criteria.
warm_start : bool, default True
Whether to include a minimum of one observation for each level in each `categorical_dim` in the training set.
seed : int, optional
Random seed
errors : {'natural', 'standardized', 'transformed'}
"Space" in which to return prediction errors
**MAP_kws
Additional
Returns
-------
dict
Dictionary with nested dictionaries 'train' and 'test', both containing fields 'data', 'NLPDs', and 'errors'.
These fields contain the relevant subset of observations as a DataSet, an array of the negative log
posterior densities of observations given the predictions, and an array of the natural-space difference
between observations and prediction means, respectively.
"""
if not (n_train is None) ^ (pct_train is None):
raise ValueError('Exactly one of "n_train" and "pct_train" must be specified')
if unit is not None:
if not isinstance(unit, str):
raise TypeError('Keyword "unit" must be a single string.')
assert_in('Keyword "errors"', errors, ['natural', 'standardized', 'transformed'])
seed = self.seed if seed is None else seed
rg = np.random.default_rng(seed)
df = self.data.wide
n_entities = len(set(df.index)) if unit is None else len(set(df.set_index(unit).index))
n_train = n_train if n_train is not None else np.floor(n_entities * pct_train).astype(int)
if n_train <= 0:
raise ValueError('Size of training set must be strictly greater than zero.')
if n_train > n_entities:
raise ValueError('Size of training set must be not exceed number of observations or entities in dataset.')
# Build up a list of dataframes that make up the training set
train_list = []
if train_only is not None:
# Move items that match `train_only` criteria to training set
train_only_criteria = [df[dim] == level for dim, level in train_only.items()]
train_only_idxs = pd.concat(train_only_criteria, axis=1).all(axis=1).index
train_only_df = df.loc[train_only_idxs] if unit is None else df.loc[train_only_idxs].set_index(unit)
n_train -= len(set(train_only_df.index))
if n_train < 0:
raise ValueError('Adding `train_only` observations exceeded specified size of training set')
train_list.append(train_only_df)
df = df.drop(index=train_only_idxs)
# Group data by columns specified as "unit"
if unit is not None:
df = df.set_index(unit)
remaining_entities = set(df.index)
if len(train_list) > 1:
# Ensure train_only didn't partially slice a unique entity
train_only_entities = set(train_list[-1].index)
if len(train_only_entities.intersection(remaining_entities)) > 0:
raise ValueError('Criteria in `train_only` partially sliced an entity specified by `unit`, which makes \
interpretation of `n_train` ambiguous.')
if n_train > len(df.index.unique()):
raise ValueError('Specified size of training set exceeds number of unique combinations found in `dims`')
if warm_start:
# Add one random item from each categorical level to the training set
if len(self.categorical_dims) > 0:
# Filter out any observations not in the specified categorical levels
level_combinations = list(product(*self.categorical_levels.values()))
cat_grps = (df
.groupby(self.categorical_dims)
.filter(lambda grp: grp.name not in level_combinations)
.groupby(self.categorical_dims))
if cat_grps.ngroups == 0:
raise ValueError(f'None of the combinations of categorical levels were found in data.\nCombinations:\n{level_combinations}')
# Randomly select one item from each group
warm_idxs = cat_grps.sample(1, random_state=seed).index
if len(set(warm_idxs)) != len(warm_idxs):
warnings.warn('Duplicate entities specified by `unit` were selected during `warm_start`. This may lead to unexpected behavior.')
n_train -= len(set(warm_idxs))
if n_train < 0:
raise ValueError('Adding `warm_start` observations exceeded specified size of training set')
train_list.append(df.loc[warm_idxs])
df = df.drop(index=warm_idxs)
# Move a random subset of the remaining items to the training set
train_idxs = rg.choice(df.index.unique(), n_train, replace=False)
for_train = df.loc[train_idxs]
train_list.append(for_train)
train_df = pd.concat(train_list).reset_index()
test_df = df.drop(train_idxs).reset_index()
categorical_dims = [dim for dim in self.categorical_dims if dim != self.out_col]
specifications = dict(outputs=self.outputs, linear_dims=self.linear_dims, continuous_dims=self.continuous_dims,
continuous_levels=self.continuous_levels, continuous_coords=self.continuous_coords,
categorical_dims=categorical_dims, categorical_levels=self.categorical_levels,
additive=self.additive)
train_specs = {**specifications, ** {
'continuous_levels': {dim: [lvl for lvl in lvls if lvl in train_df[dim].values]
for dim, lvls in self.continuous_levels.items()},
'categorical_levels': {dim: [lvl for lvl in lvls if lvl in train_df[dim].values]
for dim, lvls in self.categorical_levels.items()},
'continuous_coords': {dim: {lvl: coord for lvl, coord in coords.items() if lvl in train_df[dim].values}
for dim, coords in self.continuous_coords.items()}
}} # Fix once Python >= 3.9
test_specs = {**specifications, ** {
'continuous_levels': {dim: [lvl for lvl in lvls if lvl in test_df[dim].values]
for dim, lvls in self.continuous_levels.items()},
'categorical_levels': {dim: [lvl for lvl in lvls if lvl in test_df[dim].values]
for dim, lvls in self.categorical_levels.items()},
'continuous_coords': {dim: {lvl: coord for lvl, coord in coords.items() if lvl in test_df[dim].values}
for dim, coords in self.continuous_coords.items()}
}} # Fix once Python >= 3.9
dataset_specs = dict(outputs=self.data.outputs,
names_column=self.data.names_column,
values_column=self.data.values_column,
log_vars=self.data.log_vars,
logit_vars=self.data.logit_vars,
stdzr=self.data.stdzr)
train_ds = DataSet(train_df, **dataset_specs)
test_ds = DataSet(test_df, **dataset_specs)
# Build and fit a new object of the current class (GP, GLM, etc) with the training set
train_obj = self.__class__(train_ds, outputs=self.outputs, seed=seed)
train_obj.specify_model(**train_specs)
train_obj.filter_dims = self.filter_dims
train_obj.build_model(**self.model_specs)
train_obj.find_MAP(**MAP_kws) # TODO: make more general to allow alternative inference approaches
# Get in-sample prediction metrics
train_X, train_y = train_obj.get_structured_data()
train_predictions = train_obj.predict_points(train_X)
train_nlpd = train_predictions.nlpd(train_y.values())
train_error = {
'natural': train_y.values() - train_predictions.μ,
'transformed': train_y.t.values() - train_predictions.t.μ,
'standardized': train_y.z.values() - train_predictions.z.μ,
}[errors]
if len(test_df.index.unique()) > 0:
# If there's anything left for a testing set, build and fit a new object with the testing set
test_obj = self.__class__(test_ds, outputs=self.outputs, seed=seed)
# TODO: figure out why this was necessary and get rid of it
categorical_dims = [dim for dim in self.categorical_dims if dim != self.out_col]
test_specs['categorical_dims'] = categorical_dims
train_specs['categorical_dims'] = categorical_dims
test_obj.specify_model(**test_specs)
test_obj.filter_dims = self.filter_dims
# Get out-of-sample prediction metrics
test_X, test_y = test_obj.get_structured_data()
test_predictions = train_obj.predict_points(test_X)
test_nlpd = test_predictions.nlpd(test_y.values())
test_error = {
'natural': test_y.values() - test_predictions.μ,
'transformed': test_y.t.values() - test_predictions.t.μ,
'standardized': test_y.z.values() - test_predictions.z.μ,
}[errors]
else:
test_nlpd = np.nan
test_error = np.nan
result = {
'train': {
'data': train_ds,
'NLPDs': train_nlpd,
'errors': train_error},
'test': {
'data': test_ds,
'NLPDs': test_nlpd,
'errors': test_error}
}
return result
################################################################################
# Plotting
################################################################################
def get_conditional_prediction(self, **dim_values):
"""The conditional prediction at the given values of the specified dimensions over the remaining dimension(s).
Conditioning the prediction on specific values of `m` dimensions can be thought of as taking a "slice" along the
remaining `n` dimensions.
Performs `(m+n)`-dimensional interpolation over the entire prediction grid for each of the mean and variance
separately, then returns the interpolation evaluated at the specified values for the provided dimensions and the
original values for the remaining dimensions.
Parameters
----------
dim_values
Keyword arguments specifying value for each dimension at which to return the conditional prediction of the
remaining dimensions.
Returns
-------
conditional_grid: ParameterArray
`n`-dimensional grid with `n` parameters (layers) at which the conditional prediction is evaluated
conditional_prediction: UncertainParameterArray
`n`-dimensional grid of predictions conditional on the given values of the `m` specified dimensions
"""
self._check_has_prediction()
all_dims = self.prediction_dims
# All points along every axis (parrays)
# Note that these may not all be the same length
all_margins = {dim: vec.squeeze() for dim, vec in self.grid_vectors.items() if dim in self.prediction_dims}
# The dimensions to be "kept" are the ones not listed in kwargs
keep = set(all_dims) - set(dim_values.keys())
kept_margins = [all_margins[dim] for dim in self.prediction_dims if dim in keep]
# parray grid of original points along all "kept" dimensions
conditional_grid = self.parray(**{array.names[0]: array.values() for array in np.meshgrid(*kept_margins)})
# Add specified value for each remaining dimension at all points, then unravel
xi_parray = conditional_grid.add_layers(
**{dim: np.full(conditional_grid.shape, value) for dim, value in dim_values.items()}
).ravel()
# Stack standardized points into (ordinary) tall array, ensuring dimensions are in the right order for the model
xi_pts = np.column_stack([xi_parray[dim].z.values() for dim in self.dims if dim in xi_parray.names])
# Interpolate the mean and variance of the predictions
# Swapping the first two axes is necessary because grids were generated using meshgrid's default "ij" indexing
# but interpn expects "xy" indexing
μ_arr = np.swapaxes(self.predictions.μ, 0, 1)
μi = interpn([all_margins[dim].z.values() for dim in self.dims if dim in self.prediction_dims], μ_arr, xi_pts)
σ2_arr = np.swapaxes(self.predictions.σ2, 0, 1)
σ2i = interpn([all_margins[dim].z.values() for dim in self.dims if dim in self.prediction_dims], σ2_arr, xi_pts)
conditional_prediction = self.uparray(self.predictions.name, μ=μi, σ2=σ2i).reshape(*conditional_grid.shape)
return conditional_grid.squeeze(), conditional_prediction.squeeze()
```
#### File: regression/pymc3/extras.py
```python
from .GP import *
import numpy as np
import pandas as pd
import pymc3 as pm
import arviz as az
from functools import wraps
class GPC(GP):
@wraps(GP.build_model)
def build_model(self, seed=None, continuous_kernel='ExpQuad', heteroskedastic_inputs=False,
heteroskedastic_outputs=False, sparse=False, n_u=100, eps=1e-6):
if heteroskedastic_inputs:
raise NotImplementedError('The GP Classifier does not support heteroskedastic inputs.')
if heteroskedastic_outputs:
raise NotImplementedError('The GP Classifier does not support heteroskedastic outputs.')
if sparse:
raise NotImplementedError('The GP Classifier does not support sparse structure (yet).')
self.build_latent(seed=seed, continuous_kernel=continuous_kernel, eps=eps)
_, y = self.get_shaped_data('mean')
with self.model:
f = self.prior
# logit link and Bernoulli likelihood
p = pm.Deterministic("p", pm.math.invlogit(f))
_ = pm.Bernoulli("y", p=p, observed=y)
return self
@wraps(GP.draw_point_samples)
def draw_point_samples(self, points, *args, source=None, output=None, var_name='posterior_samples', additive_level='total',
increment_var=True, **kwargs):
var_name = self._recursively_append(var_name, increment_var=increment_var)
# A
self.stdzr.logit_vars += [var_name]
return super(GPC, self).draw_point_samples(points, *args, source=source, output=output, var_name=var_name,
additive_level=additive_level, increment_var=True, **kwargs)
```
#### File: Gumbi/tests/test_regression.py
```python
import pytest
import pickle
import pandas as pd
import numpy as np
import pathlib as pl
import pymc3 as pm
from gumbi import GP, DataSet, Standardizer
test_dir = pl.Path(__file__).resolve().parent
test_data = test_dir / 'test_data'
example_stdzr = {
'a': {'μ': -0.762, 'σ2': 1.258**2},
'b': {'μ': -0.0368, 'σ2': 0.351**2},
'c': {'μ': -5.30, 'σ2': 0.582**2},
'd': {'μ': -0.307, 'σ2': 0.158**2},
'e': {'μ': -1.056, 'σ2': 0.398**2},
'f': {'μ': 3.34, 'σ2': 0.1501**2},
'X': {'μ': -0.282, 'σ2': 1**2},
'Y': {'μ': 4.48, 'σ2': 0.75**2},
'lg10_Z': {'μ': 5, 'σ2': 2**2},
}
log_vars = ['d', 'f', 'b', 'c', 'Y']
logit_vars = ['e', 'X']
################################################################################
# GP
################################################################################
@pytest.fixture
def example_estimates():
es = pd.read_pickle(test_data / 'test_dataset.pkl')
stdzr = Standardizer(**example_stdzr, log_vars=log_vars, logit_vars=logit_vars)
ds = DataSet.from_tidy(es, names_column='Parameter', stdzr=stdzr)
return ds
@pytest.fixture
def example_gp(example_estimates):
return GP(example_estimates, outputs='d')
# Input argument parsing
def test_gp_default_fit_parsing(example_gp):
gp = example_gp.specify_model(continuous_dims=['X', 'Y'])
assert gp.continuous_dims == ['X', 'Y']
assert gp.categorical_dims == []
def test_gp_data_parsing(example_gp):
gp = example_gp.specify_model(continuous_dims=['X', 'Y'])
X, y = gp.get_structured_data()
assert X.shape == (66,)
assert len(X.names) == 2
assert y.shape == (66,)
def test_gp_numerical_continuous_fit_parsing(example_gp):
gp = example_gp.specify_model(continuous_dims=['X', 'Y', 'lg10_Z'])
assert gp.continuous_dims == ['X', 'Y', 'lg10_Z']
for dim in gp.continuous_dims:
assert len(gp.continuous_levels[dim]) == len(gp.data.tidy[dim].unique())
assert len(gp.continuous_coords[dim].values()) == len(gp.continuous_levels[dim])
assert gp.categorical_dims == []
X, y = gp.get_structured_data()
assert X.shape == (66,)
assert len(X.names) == 3
assert y.shape == (66,)
def test_gp_categorical_continuous_fit_parsing(example_gp):
gp = example_gp.specify_model(continuous_dims=['X', 'Y', 'Name'])
assert gp.continuous_dims == ['X', 'Y', 'Name']
for dim in gp.continuous_dims:
assert len(gp.continuous_levels[dim]) == len(gp.data.tidy[dim].unique())
assert len(gp.continuous_coords[dim].values()) == len(gp.continuous_levels[dim])
assert gp.categorical_dims == []
X, y = gp.get_structured_data()
assert X.shape == (66,)
assert len(X.names) == 3
assert y.shape == (66,)
def test_gp_params_fit_parsing(example_gp):
gp = example_gp.specify_model(outputs=['d', 'c'], continuous_dims=['X', 'Y'])
assert gp.continuous_dims == ['X', 'Y']
assert gp.categorical_dims == ['Parameter']
assert gp.categorical_levels == {'Parameter': ['d', 'c']}
assert gp.categorical_coords == {'Parameter': {'d': 1, 'c': 0}}
X, y = gp.get_structured_data()
assert X.shape == (66,)
assert len(X.names) == 2
assert y.shape == (66,)
assert len(y.names) == 2
def test_gp_single_input_fit_parsing(example_gp):
gp = example_gp.specify_model(continuous_dims=['X', 'Y', 'Name'], continuous_levels={'Name': ['intense-opportunity']})
assert gp.continuous_dims == ['X', 'Y']
assert gp.filter_dims == {'Name': ['intense-opportunity'], 'Parameter': ['d']}
X, y = gp.get_structured_data()
assert X.shape == (7,)
assert len(X.names) == 2
assert y.shape == (7,)
# Model building
# def test_gp_build_model_simple(example_gp):
# gp = example_gp.specify_model(continuous_dims=['X', 'Y'])
# gp.build_model()
# assert isinstance(gp.model, pm.model.Model)
# assert isinstance(gp.gp_dict['total'], pm.gp.gp.Marginal)
# Combinatorial gp objects with various parameterizations
@pytest.fixture(params=[False, True])
def additive(request):
return request.param
@pytest.fixture(params=[{'outputs': ['d', 'c'], 'continuous_dims': ['X', 'Y']},
{'continuous_dims': ['X', 'Y'], 'categorical_dims': 'Code'},
{'continuous_dims': ['X', 'Y', 'Name']},
{'continuous_dims': ['X', 'Y', 'lg10_Z']},
{'continuous_dims': ['X', 'Y', 'Name'], 'continuous_levels': {'Name': ['intense-opportunity']}}])
def fit_inputs(request):
return request.param
@pytest.mark.slow
def test_gp_build_model(example_gp, fit_inputs, additive):
# Basically just makes sure that `build_model` runs without errors
gp = example_gp.specify_model(**fit_inputs, additive=additive)
gp.build_model()
assert isinstance(gp.model, pm.model.Model)
assert isinstance(gp.gp_dict['total'], pm.gp.gp.Marginal)
@pytest.mark.slow
def test_gp_build_model_additive(example_gp):
# Basically just makes sure that `build_model` runs without errors
gp = example_gp.specify_model(outputs=['d', 'c'], continuous_dims=['X', 'Y'], categorical_dims='lg10_Z', additive=True)
gp.build_model()
assert isinstance(gp.model, pm.model.Model)
assert all(name in gp.gp_dict.keys() for name in ['total', 'global', 'lg10_Z'])
# MAP estimation
def test_gp_fit_simple(example_gp):
# Basically just makes sure that `fit` runs without errors
gp = example_gp.fit(continuous_dims=['X', 'Y', 'lg10_Z'], continuous_levels={'lg10_Z': [8]})
assert isinstance(gp.MAP, dict)
@pytest.mark.slow
def test_gp_fit(example_gp, fit_inputs, additive):
# Basically just makes sure that `fit` runs without errors
gp = example_gp.fit(**fit_inputs, additive=additive)
assert isinstance(gp.MAP, dict)
``` |
{
"source": "johngoggs/goggs-gcp-endpointsrun",
"score": 3
} |
#### File: johngoggs/goggs-gcp-endpointsrun/app.py
```python
from flask import Flask, jsonify
app = Flask(__name__)
@app.route('/api/status', methods=['GET'])
def get_status():
return jsonify(message='Server is up!'), 200
@app.route('/api/auth/apikey', methods=['GET'])
def get_auth_apikey():
return jsonify(message='Congratulations. You have completed apikey auth.',
auth='apikey'), 200
@app.route('/api/auth/bearer', methods=['GET'])
def get_auth_sabearer():
return jsonify(message='Congratulations. You have completed service account bearer auth.',
auth='service account bearer'), 200
@app.errorhandler(400)
def bad_request_error(e):
return jsonify(code=400,
message='Bad request',
detailedMessage='{}'.format(e)), 400
@app.errorhandler(404)
def not_found_error(e):
return jsonify(code=404,
message='Path not found',
detailedMessage='{}'.format(e)), 404
@app.errorhandler(405)
def method_not_allowed_error(e):
return jsonify(code=405,
message='Method not allowed',
detailedMessage='{}'.format(e)), 405
@app.errorhandler(Exception)
def internal_server_error(e):
return jsonify(code=500,
message='Internal server error',
detailedMessage='{}'.format(e)), 500
if __name__ == '__main__':
app.run(host='127.0.0.1', port='8080')
``` |
{
"source": "Johngoss725/Mixamo-To-Godot",
"score": 2
} |
#### File: Johngoss725/Mixamo-To-Godot/Mixamo_RM_Godot.py
```python
import bpy
import os
import ntpath
#Script Created By: Average Godot Enjoyer
def fixBones():
#print('Running Mixamo Armature Renaming Script.')
bpy.ops.object.mode_set(mode = 'OBJECT')
if not bpy.ops.object:
print('Please select the armature')
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.context.object.show_in_front = True
for rig in bpy.context.selected_objects:
if rig.type == 'ARMATURE':
for mesh in rig.children:
for vg in mesh.vertex_groups:
#print(vg.name)
new_name = vg.name
new_name = new_name.replace("mixamorig:","")
#print(new_name)
rig.pose.bones[vg.name].name = new_name
vg.name = new_name
for bone in rig.pose.bones:
#print(bone.name.replace("mixamorig:",""))
bone.name = bone.name.replace("mixamorig:","")
for action in bpy.data.actions:
print(action.name)
fc = action.fcurves
for f in fc:
#print(f.data_path)
f.data_path = f.data_path.replace("mixamorig:","")
def scaleAll():
bpy.ops.object.mode_set(mode='OBJECT')
prev_context=bpy.context.area.type
bpy.ops.object.mode_set(mode='POSE')
bpy.ops.pose.select_all(action='SELECT')
bpy.context.area.type = 'GRAPH_EDITOR'
bpy.context.space_data.dopesheet.filter_text = "Location"
bpy.context.space_data.pivot_point = 'CURSOR'
bpy.context.space_data.dopesheet.use_filter_invert = False
#print(bpy.context.selected_objects)
bpy.ops.anim.channels_select_all(action='SELECT')
bpy.ops.transform.resize(value=(1, 0.01, 1), orient_type='GLOBAL',
orient_matrix=((1, 0, 0), (0, 1, 0), (0, 0, 1)),
orient_matrix_type='GLOBAL',
constraint_axis=(False, True, False),
mirror=True, use_proportional_edit=False,
proportional_edit_falloff='SMOOTH',
proportional_size=1,
use_proportional_connected=False,
use_proportional_projected=False)
def copyHips():
bpy.context.area.ui_type = 'FCURVES'
#SELECT OUR ROOT MOTION BONE
bpy.ops.pose.select_all(action='DESELECT')
bpy.context.object.pose.bones['RootMotion'].bone.select = True
# SET FRAME TO ZERO
bpy.ops.graph.cursor_set(frame=0.0, value=0.0)
#ADD NEW KEYFRAME
bpy.ops.anim.keyframe_insert_menu(type='Location')
#SELECT ONLY HIPS AND LOCTAIUON GRAPH DATA
bpy.ops.pose.select_all(action='DESELECT')
bpy.context.object.pose.bones['Hips'].bone.select = True
bpy.context.area.ui_type = 'DOPESHEET'
bpy.context.space_data.dopesheet.filter_text = "Location"
bpy.context.area.ui_type = 'FCURVES'
#COPY THE LOCATION VALUES OF THE HIPS AND DELETE THEM
bpy.ops.graph.copy()
bpy.ops.graph.select_all(action='DESELECT')
myFcurves = bpy.context.object.animation_data.action.fcurves
# print(myFcurves)
for i in myFcurves:
if str(i.data_path)=='pose.bones["Hips"].location':
myFcurves.remove(i)
bpy.ops.pose.select_all(action='DESELECT')
bpy.context.object.pose.bones['RootMotion'].bone.select = True
bpy.ops.graph.paste()
bpy.context.area.ui_type = 'VIEW_3D'
def deleteArmeture():
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.select_all(action="SELECT")
bpy.ops.object.delete(use_global=False, confirm=False)
def import_armeture(path):
bpy.ops.object.transform_apply(location=True, rotation=True, scale=True)
bpy.ops.import_scene.fbx( filepath = path)
def add_root_bone():
#we get our armeture
armeture = bpy.data.objects[0]
bpy.ops.object.mode_set(mode='EDIT')
# add root Motion
bpy.ops.armature.bone_primitive_add()
bpy.ops.object.mode_set(mode='POSE')
bpy.context.object.pose.bones["Bone"].name = "RootMotion"
bpy.ops.object.mode_set(mode='EDIT')
armeture.data.edit_bones['mixamorig:Hips'].parent = armeture.data.edit_bones['RootMotion']
bpy.ops.object.mode_set(mode='OBJECT')
def get_all_anims():
# HERE IS WHERE WE PLACE OUR FILEPATH TO THE FOLDER WITH OUR ANIMATIONS.
path = ""
files = os.listdir(path)
use_num = len(files)
counter = 0
for file in files:
use_string = path+"/"+file
import_armeture(use_string)
print("We are now importing: " + use_string)
counter += 1
print(os.path.basename(use_string))
bpy.data.actions[0].name = os.path.basename(use_string)
add_root_bone()
fixBones()
scaleAll()
copyHips()
if counter != use_num:
deleteArmeture()
else:
pass
bpy.context.area.ui_type = 'TEXT_EDITOR'
if __name__ == "__main__":
get_all_anims()
#tHIS LOOPS THROUGH ALL OUR ACTIONS AND CREATES NLA STRIPS.
for action in bpy.data.actions:
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.area.type = 'DOPESHEET_EDITOR'
bpy.context.space_data.ui_mode = 'ACTION'
bpy.context.selected_objects[0].animation_data.action = action
bpy.ops.object.mode_set(mode='OBJECT')
bpy.context.area.type = 'DOPESHEET_EDITOR'
bpy.context.space_data.ui_mode = 'ACTION'
bpy.ops.action.push_down()
``` |
{
"source": "john-grando/pyExpandObjects",
"score": 3
} |
#### File: python_standard_lib/test/datetimetester.py
```python
import io
import itertools
import bisect
import copy
import decimal
import sys
import os
import pickle
import random
import re
import struct
import unittest
from array import array
from operator import lt, le, gt, ge, eq, ne, truediv, floordiv, mod
from test import support
from test.support import is_resource_enabled, ALWAYS_EQ, LARGEST, SMALLEST
import datetime as datetime_module
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import timezone
from datetime import date, datetime
import time as _time
import _testcapi
# Needed by test_datetime
import _strptime
#
pickle_loads = {pickle.loads, pickle._loads}
pickle_choices = [(pickle, pickle, proto)
for proto in range(pickle.HIGHEST_PROTOCOL + 1)]
assert len(pickle_choices) == pickle.HIGHEST_PROTOCOL + 1
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 34.5, "abc", {}, [], ())
# XXX Copied from test_float.
INF = float("inf")
NAN = float("nan")
#############################################################################
# module tests
class TestModule(unittest.TestCase):
def test_constants(self):
datetime = datetime_module
self.assertEqual(datetime.MINYEAR, 1)
self.assertEqual(datetime.MAXYEAR, 9999)
def test_all(self):
"""Test that __all__ only points to valid attributes."""
all_attrs = dir(datetime_module)
for attr in datetime_module.__all__:
self.assertIn(attr, all_attrs)
def test_name_cleanup(self):
if '_Pure' in self.__class__.__name__:
self.skipTest('Only run for Fast C implementation')
datetime = datetime_module
names = set(name for name in dir(datetime)
if not name.startswith('__') and not name.endswith('__'))
allowed = set(['MAXYEAR', 'MINYEAR', 'date', 'datetime',
'datetime_CAPI', 'time', 'timedelta', 'timezone',
'tzinfo', 'sys'])
self.assertEqual(names - allowed, set([]))
def test_divide_and_round(self):
if '_Fast' in self.__class__.__name__:
self.skipTest('Only run for Pure Python implementation')
dar = datetime_module._divide_and_round
self.assertEqual(dar(-10, -3), 3)
self.assertEqual(dar(5, -2), -2)
# four cases: (2 signs of a) x (2 signs of b)
self.assertEqual(dar(7, 3), 2)
self.assertEqual(dar(-7, 3), -2)
self.assertEqual(dar(7, -3), -2)
self.assertEqual(dar(-7, -3), 2)
# ties to even - eight cases:
# (2 signs of a) x (2 signs of b) x (even / odd quotient)
self.assertEqual(dar(10, 4), 2)
self.assertEqual(dar(-10, 4), -2)
self.assertEqual(dar(10, -4), -2)
self.assertEqual(dar(-10, -4), 2)
self.assertEqual(dar(6, 4), 2)
self.assertEqual(dar(-6, 4), -2)
self.assertEqual(dar(6, -4), -2)
self.assertEqual(dar(-6, -4), 2)
#############################################################################
# tzinfo tests
class FixedOffset(tzinfo):
def __init__(self, offset, name, dstoffset=42):
if isinstance(offset, int):
offset = timedelta(minutes=offset)
if isinstance(dstoffset, int):
dstoffset = timedelta(minutes=dstoffset)
self.__offset = offset
self.__name = name
self.__dstoffset = dstoffset
def __repr__(self):
return self.__name.lower()
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dstoffset
class PicklableFixedOffset(FixedOffset):
def __init__(self, offset=None, name=None, dstoffset=None):
FixedOffset.__init__(self, offset, name, dstoffset)
def __getstate__(self):
return self.__dict__
class _TZInfo(tzinfo):
def utcoffset(self, datetime_module):
return random.random()
class TestTZInfo(unittest.TestCase):
def test_refcnt_crash_bug_22044(self):
tz1 = _TZInfo()
dt1 = datetime(2014, 7, 21, 11, 32, 3, 0, tz1)
with self.assertRaises(TypeError):
dt1.utcoffset()
def test_non_abstractness(self):
# In order to allow subclasses to get pickled, the C implementation
# wasn't able to get away with having __init__ raise
# NotImplementedError.
useless = tzinfo()
dt = datetime.max
self.assertRaises(NotImplementedError, useless.tzname, dt)
self.assertRaises(NotImplementedError, useless.utcoffset, dt)
self.assertRaises(NotImplementedError, useless.dst, dt)
def test_subclass_must_override(self):
class NotEnough(tzinfo):
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
self.assertTrue(issubclass(NotEnough, tzinfo))
ne = NotEnough(3, "NotByALongShot")
self.assertIsInstance(ne, tzinfo)
dt = datetime.now()
self.assertRaises(NotImplementedError, ne.tzname, dt)
self.assertRaises(NotImplementedError, ne.utcoffset, dt)
self.assertRaises(NotImplementedError, ne.dst, dt)
def test_normal(self):
fo = FixedOffset(3, "Three")
self.assertIsInstance(fo, tzinfo)
for dt in datetime.now(), None:
self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3))
self.assertEqual(fo.tzname(dt), "Three")
self.assertEqual(fo.dst(dt), timedelta(minutes=42))
def test_pickling_base(self):
# There's no point to pickling tzinfo objects on their own (they
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
self.assertIs(type(orig), tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIs(type(derived), tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
offset = timedelta(minutes=-300)
for otype, args in [
(PicklableFixedOffset, (offset, 'cookie')),
(timezone, (offset,)),
(timezone, (offset, "EST"))]:
orig = otype(*args)
oname = orig.tzname(None)
self.assertIsInstance(orig, tzinfo)
self.assertIs(type(orig), otype)
self.assertEqual(orig.utcoffset(None), offset)
self.assertEqual(orig.tzname(None), oname)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIsInstance(derived, tzinfo)
self.assertIs(type(derived), otype)
self.assertEqual(derived.utcoffset(None), offset)
self.assertEqual(derived.tzname(None), oname)
def test_issue23600(self):
DSTDIFF = DSTOFFSET = timedelta(hours=1)
class UKSummerTime(tzinfo):
"""Simple time zone which pretends to always be in summer time, since
that's what shows the failure.
"""
def utcoffset(self, dt):
return DSTOFFSET
def dst(self, dt):
return DSTDIFF
def tzname(self, dt):
return 'UKSummerTime'
tz = UKSummerTime()
u = datetime(2014, 4, 26, 12, 1, tzinfo=tz)
t = tz.fromutc(u)
self.assertEqual(t - t.utcoffset(), u)
class TestTimeZone(unittest.TestCase):
def setUp(self):
self.ACDT = timezone(timedelta(hours=9.5), 'ACDT')
self.EST = timezone(-timedelta(hours=5), 'EST')
self.DT = datetime(2010, 1, 1)
def test_str(self):
for tz in [self.ACDT, self.EST, timezone.utc,
timezone.min, timezone.max]:
self.assertEqual(str(tz), tz.tzname(None))
def test_repr(self):
datetime = datetime_module
for tz in [self.ACDT, self.EST, timezone.utc,
timezone.min, timezone.max]:
# test round-trip
tzrep = repr(tz)
self.assertEqual(tz, eval(tzrep))
def test_class_members(self):
limit = timedelta(hours=23, minutes=59)
self.assertEqual(timezone.utc.utcoffset(None), ZERO)
self.assertEqual(timezone.min.utcoffset(None), -limit)
self.assertEqual(timezone.max.utcoffset(None), limit)
def test_constructor(self):
self.assertIs(timezone.utc, timezone(timedelta(0)))
self.assertIsNot(timezone.utc, timezone(timedelta(0), 'UTC'))
self.assertEqual(timezone.utc, timezone(timedelta(0), 'UTC'))
for subminute in [timedelta(microseconds=1), timedelta(seconds=1)]:
tz = timezone(subminute)
self.assertNotEqual(tz.utcoffset(None) % timedelta(minutes=1), 0)
# invalid offsets
for invalid in [timedelta(1, 1), timedelta(1)]:
self.assertRaises(ValueError, timezone, invalid)
self.assertRaises(ValueError, timezone, -invalid)
with self.assertRaises(TypeError): timezone(None)
with self.assertRaises(TypeError): timezone(42)
with self.assertRaises(TypeError): timezone(ZERO, None)
with self.assertRaises(TypeError): timezone(ZERO, 42)
with self.assertRaises(TypeError): timezone(ZERO, 'ABC', 'extra')
def test_inheritance(self):
self.assertIsInstance(timezone.utc, tzinfo)
self.assertIsInstance(self.EST, tzinfo)
def test_utcoffset(self):
dummy = self.DT
for h in [0, 1.5, 12]:
offset = h * HOUR
self.assertEqual(offset, timezone(offset).utcoffset(dummy))
self.assertEqual(-offset, timezone(-offset).utcoffset(dummy))
with self.assertRaises(TypeError): self.EST.utcoffset('')
with self.assertRaises(TypeError): self.EST.utcoffset(5)
def test_dst(self):
self.assertIsNone(timezone.utc.dst(self.DT))
with self.assertRaises(TypeError): self.EST.dst('')
with self.assertRaises(TypeError): self.EST.dst(5)
def test_tzname(self):
self.assertEqual('UTC', timezone.utc.tzname(None))
self.assertEqual('UTC', timezone(ZERO).tzname(None))
self.assertEqual('UTC-05:00', timezone(-5 * HOUR).tzname(None))
self.assertEqual('UTC+09:30', timezone(9.5 * HOUR).tzname(None))
self.assertEqual('UTC-00:01', timezone(timedelta(minutes=-1)).tzname(None))
self.assertEqual('XYZ', timezone(-5 * HOUR, 'XYZ').tzname(None))
# bpo-34482: Check that surrogates are handled properly.
self.assertEqual('\ud800', timezone(ZERO, '\ud800').tzname(None))
# Sub-minute offsets:
self.assertEqual('UTC+01:06:40', timezone(timedelta(0, 4000)).tzname(None))
self.assertEqual('UTC-01:06:40',
timezone(-timedelta(0, 4000)).tzname(None))
self.assertEqual('UTC+01:06:40.000001',
timezone(timedelta(0, 4000, 1)).tzname(None))
self.assertEqual('UTC-01:06:40.000001',
timezone(-timedelta(0, 4000, 1)).tzname(None))
with self.assertRaises(TypeError): self.EST.tzname('')
with self.assertRaises(TypeError): self.EST.tzname(5)
def test_fromutc(self):
with self.assertRaises(ValueError):
timezone.utc.fromutc(self.DT)
with self.assertRaises(TypeError):
timezone.utc.fromutc('not datetime')
for tz in [self.EST, self.ACDT, Eastern]:
utctime = self.DT.replace(tzinfo=tz)
local = tz.fromutc(utctime)
self.assertEqual(local - utctime, tz.utcoffset(local))
self.assertEqual(local,
self.DT.replace(tzinfo=timezone.utc))
def test_comparison(self):
self.assertNotEqual(timezone(ZERO), timezone(HOUR))
self.assertEqual(timezone(HOUR), timezone(HOUR))
self.assertEqual(timezone(-5 * HOUR), timezone(-5 * HOUR, 'EST'))
with self.assertRaises(TypeError): timezone(ZERO) < timezone(ZERO)
self.assertIn(timezone(ZERO), {timezone(ZERO)})
self.assertTrue(timezone(ZERO) != None)
self.assertFalse(timezone(ZERO) == None)
tz = timezone(ZERO)
self.assertTrue(tz == ALWAYS_EQ)
self.assertFalse(tz != ALWAYS_EQ)
self.assertTrue(tz < LARGEST)
self.assertFalse(tz > LARGEST)
self.assertTrue(tz <= LARGEST)
self.assertFalse(tz >= LARGEST)
self.assertFalse(tz < SMALLEST)
self.assertTrue(tz > SMALLEST)
self.assertFalse(tz <= SMALLEST)
self.assertTrue(tz >= SMALLEST)
def test_aware_datetime(self):
# test that timezone instances can be used by datetime
t = datetime(1, 1, 1)
for tz in [timezone.min, timezone.max, timezone.utc]:
self.assertEqual(tz.tzname(t),
t.replace(tzinfo=tz).tzname())
self.assertEqual(tz.utcoffset(t),
t.replace(tzinfo=tz).utcoffset())
self.assertEqual(tz.dst(t),
t.replace(tzinfo=tz).dst())
def test_pickle(self):
for tz in self.ACDT, self.EST, timezone.min, timezone.max:
for pickler, unpickler, proto in pickle_choices:
tz_copy = unpickler.loads(pickler.dumps(tz, proto))
self.assertEqual(tz_copy, tz)
tz = timezone.utc
for pickler, unpickler, proto in pickle_choices:
tz_copy = unpickler.loads(pickler.dumps(tz, proto))
self.assertIs(tz_copy, tz)
def test_copy(self):
for tz in self.ACDT, self.EST, timezone.min, timezone.max:
tz_copy = copy.copy(tz)
self.assertEqual(tz_copy, tz)
tz = timezone.utc
tz_copy = copy.copy(tz)
self.assertIs(tz_copy, tz)
def test_deepcopy(self):
for tz in self.ACDT, self.EST, timezone.min, timezone.max:
tz_copy = copy.deepcopy(tz)
self.assertEqual(tz_copy, tz)
tz = timezone.utc
tz_copy = copy.deepcopy(tz)
self.assertIs(tz_copy, tz)
def test_offset_boundaries(self):
# Test timedeltas close to the boundaries
time_deltas = [
timedelta(hours=23, minutes=59),
timedelta(hours=23, minutes=59, seconds=59),
timedelta(hours=23, minutes=59, seconds=59, microseconds=999999),
]
time_deltas.extend([-delta for delta in time_deltas])
for delta in time_deltas:
with self.subTest(test_type='good', delta=delta):
timezone(delta)
# Test timedeltas on and outside the boundaries
bad_time_deltas = [
timedelta(hours=24),
timedelta(hours=24, microseconds=1),
]
bad_time_deltas.extend([-delta for delta in bad_time_deltas])
for delta in bad_time_deltas:
with self.subTest(test_type='bad', delta=delta):
with self.assertRaises(ValueError):
timezone(delta)
def test_comparison_with_tzinfo(self):
# Constructing tzinfo objects directly should not be done by users
# and serves only to check the bug described in bpo-37915
self.assertNotEqual(timezone.utc, tzinfo())
self.assertNotEqual(timezone(timedelta(hours=1)), tzinfo())
#############################################################################
# Base class for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
# Test that __eq__ and __ne__ don't complain for mixed-type comparisons.
# Subclasses must define 'theclass', and theclass(1, 1, 1) must be a
# legit constructor.
def test_harmless_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertFalse(me == ())
self.assertTrue(me != ())
self.assertFalse(() == me)
self.assertTrue(() != me)
self.assertIn(me, [1, 20, [], me])
self.assertIn([], [me, 1, 20, []])
# Comparison to objects of unsupported types should return
# NotImplemented which falls back to the right hand side's __eq__
# method. In this case, ALWAYS_EQ.__eq__ always returns True.
# ALWAYS_EQ.__ne__ always returns False.
self.assertTrue(me == ALWAYS_EQ)
self.assertFalse(me != ALWAYS_EQ)
# If the other class explicitly defines ordering
# relative to our class, it is allowed to do so
self.assertTrue(me < LARGEST)
self.assertFalse(me > LARGEST)
self.assertTrue(me <= LARGEST)
self.assertFalse(me >= LARGEST)
self.assertFalse(me < SMALLEST)
self.assertTrue(me > SMALLEST)
self.assertFalse(me <= SMALLEST)
self.assertTrue(me >= SMALLEST)
def test_harmful_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertRaises(TypeError, lambda: me < ())
self.assertRaises(TypeError, lambda: me <= ())
self.assertRaises(TypeError, lambda: me > ())
self.assertRaises(TypeError, lambda: me >= ())
self.assertRaises(TypeError, lambda: () < me)
self.assertRaises(TypeError, lambda: () <= me)
self.assertRaises(TypeError, lambda: () > me)
self.assertRaises(TypeError, lambda: () >= me)
#############################################################################
# timedelta tests
class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
theclass = timedelta
def test_constructor(self):
eq = self.assertEqual
td = timedelta
# Check keyword args to constructor
eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0))
eq(td(1), td(days=1))
eq(td(0, 1), td(seconds=1))
eq(td(0, 0, 1), td(microseconds=1))
eq(td(weeks=1), td(days=7))
eq(td(days=1), td(hours=24))
eq(td(hours=1), td(minutes=60))
eq(td(minutes=1), td(seconds=60))
eq(td(seconds=1), td(milliseconds=1000))
eq(td(milliseconds=1), td(microseconds=1000))
# Check float args to constructor
eq(td(weeks=1.0/7), td(days=1))
eq(td(days=1.0/24), td(hours=1))
eq(td(hours=1.0/60), td(minutes=1))
eq(td(minutes=1.0/60), td(seconds=1))
eq(td(seconds=0.001), td(milliseconds=1))
eq(td(milliseconds=0.001), td(microseconds=1))
def test_computations(self):
eq = self.assertEqual
td = timedelta
a = td(7) # One week
b = td(0, 60) # One minute
c = td(0, 0, 1000) # One millisecond
eq(a+b+c, td(7, 60, 1000))
eq(a-b, td(6, 24*3600 - 60))
eq(b.__rsub__(a), td(6, 24*3600 - 60))
eq(-a, td(-7))
eq(+a, td(7))
eq(-b, td(-1, 24*3600 - 60))
eq(-c, td(-1, 24*3600 - 1, 999000))
eq(abs(a), a)
eq(abs(-a), a)
eq(td(6, 24*3600), a)
eq(td(0, 0, 60*1000000), b)
eq(a*10, td(70))
eq(a*10, 10*a)
eq(a*10, 10*a)
eq(b*10, td(0, 600))
eq(10*b, td(0, 600))
eq(b*10, td(0, 600))
eq(c*10, td(0, 0, 10000))
eq(10*c, td(0, 0, 10000))
eq(c*10, td(0, 0, 10000))
eq(a*-1, -a)
eq(b*-2, -b-b)
eq(c*-2, -c+-c)
eq(b*(60*24), (b*60)*24)
eq(b*(60*24), (60*b)*24)
eq(c*1000, td(0, 1))
eq(1000*c, td(0, 1))
eq(a//7, td(1))
eq(b//10, td(0, 6))
eq(c//1000, td(0, 0, 1))
eq(a//10, td(0, 7*24*360))
eq(a//3600000, td(0, 0, 7*24*1000))
eq(a/0.5, td(14))
eq(b/0.5, td(0, 120))
eq(a/7, td(1))
eq(b/10, td(0, 6))
eq(c/1000, td(0, 0, 1))
eq(a/10, td(0, 7*24*360))
eq(a/3600000, td(0, 0, 7*24*1000))
# Multiplication by float
us = td(microseconds=1)
eq((3*us) * 0.5, 2*us)
eq((5*us) * 0.5, 2*us)
eq(0.5 * (3*us), 2*us)
eq(0.5 * (5*us), 2*us)
eq((-3*us) * 0.5, -2*us)
eq((-5*us) * 0.5, -2*us)
# Issue #23521
eq(td(seconds=1) * 0.123456, td(microseconds=123456))
eq(td(seconds=1) * 0.6112295, td(microseconds=611229))
# Division by int and float
eq((3*us) / 2, 2*us)
eq((5*us) / 2, 2*us)
eq((-3*us) / 2.0, -2*us)
eq((-5*us) / 2.0, -2*us)
eq((3*us) / -2, -2*us)
eq((5*us) / -2, -2*us)
eq((3*us) / -2.0, -2*us)
eq((5*us) / -2.0, -2*us)
for i in range(-10, 10):
eq((i*us/3)//us, round(i/3))
for i in range(-10, 10):
eq((i*us/-3)//us, round(i/-3))
# Issue #23521
eq(td(seconds=1) / (1 / 0.6112295), td(microseconds=611229))
# Issue #11576
eq(td(999999999, 86399, 999999) - td(999999999, 86399, 999998),
td(0, 0, 1))
eq(td(999999999, 1, 1) - td(999999999, 1, 0),
td(0, 0, 1))
def test_disallowed_computations(self):
a = timedelta(42)
# Add/sub ints or floats should be illegal
for i in 1, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# Division of int by timedelta doesn't make sense.
# Division by zero doesn't make sense.
zero = 0
self.assertRaises(TypeError, lambda: zero // a)
self.assertRaises(ZeroDivisionError, lambda: a // zero)
self.assertRaises(ZeroDivisionError, lambda: a / zero)
self.assertRaises(ZeroDivisionError, lambda: a / 0.0)
self.assertRaises(TypeError, lambda: a / '')
@support.requires_IEEE_754
def test_disallowed_special(self):
a = timedelta(42)
self.assertRaises(ValueError, a.__mul__, NAN)
self.assertRaises(ValueError, a.__truediv__, NAN)
def test_basic_attributes(self):
days, seconds, us = 1, 7, 31
td = timedelta(days, seconds, us)
self.assertEqual(td.days, days)
self.assertEqual(td.seconds, seconds)
self.assertEqual(td.microseconds, us)
def test_total_seconds(self):
td = timedelta(days=365)
self.assertEqual(td.total_seconds(), 31536000.0)
for total_seconds in [123456.789012, -123456.789012, 0.123456, 0, 1e6]:
td = timedelta(seconds=total_seconds)
self.assertEqual(td.total_seconds(), total_seconds)
# Issue8644: Test that td.total_seconds() has the same
# accuracy as td / timedelta(seconds=1).
for ms in [-1, -2, -123]:
td = timedelta(microseconds=ms)
self.assertEqual(td.total_seconds(), td / timedelta(seconds=1))
def test_carries(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1e6 + 1)
t2 = timedelta(microseconds=1)
self.assertEqual(t1, t2)
def test_hash_equality(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1000000)
t2 = timedelta()
self.assertEqual(hash(t1), hash(t2))
t1 += timedelta(weeks=7)
t2 += timedelta(days=7*7)
self.assertEqual(t1, t2)
self.assertEqual(hash(t1), hash(t2))
d = {t1: 1}
d[t2] = 2
self.assertEqual(len(d), 1)
self.assertEqual(d[t1], 2)
def test_pickling(self):
args = 12, 34, 56
orig = timedelta(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = timedelta(2, 3, 4)
t2 = timedelta(2, 3, 4)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = timedelta(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_str(self):
td = timedelta
eq = self.assertEqual
eq(str(td(1)), "1 day, 0:00:00")
eq(str(td(-1)), "-1 day, 0:00:00")
eq(str(td(2)), "2 days, 0:00:00")
eq(str(td(-2)), "-2 days, 0:00:00")
eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59")
eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04")
eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)),
"-210 days, 23:12:34")
eq(str(td(milliseconds=1)), "0:00:00.001000")
eq(str(td(microseconds=3)), "0:00:00.000003")
eq(str(td(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)),
"999999999 days, 23:59:59.999999")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1)),
"%s(days=1)" % name)
self.assertEqual(repr(self.theclass(10, 2)),
"%s(days=10, seconds=2)" % name)
self.assertEqual(repr(self.theclass(-10, 2, 400000)),
"%s(days=-10, seconds=2, microseconds=400000)" % name)
self.assertEqual(repr(self.theclass(seconds=60)),
"%s(seconds=60)" % name)
self.assertEqual(repr(self.theclass()),
"%s(0)" % name)
self.assertEqual(repr(self.theclass(microseconds=100)),
"%s(microseconds=100)" % name)
self.assertEqual(repr(self.theclass(days=1, microseconds=100)),
"%s(days=1, microseconds=100)" % name)
self.assertEqual(repr(self.theclass(seconds=1, microseconds=100)),
"%s(seconds=1, microseconds=100)" % name)
def test_roundtrip(self):
for td in (timedelta(days=999999999, hours=23, minutes=59,
seconds=59, microseconds=999999),
timedelta(days=-999999999),
timedelta(days=-999999999, seconds=1),
timedelta(days=1, seconds=2, microseconds=3)):
# Verify td -> string -> td identity.
s = repr(td)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
td2 = eval(s)
self.assertEqual(td, td2)
# Verify identity via reconstructing from pieces.
td2 = timedelta(td.days, td.seconds, td.microseconds)
self.assertEqual(td, td2)
def test_resolution_info(self):
self.assertIsInstance(timedelta.min, timedelta)
self.assertIsInstance(timedelta.max, timedelta)
self.assertIsInstance(timedelta.resolution, timedelta)
self.assertTrue(timedelta.max > timedelta.min)
self.assertEqual(timedelta.min, timedelta(-999999999))
self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1))
self.assertEqual(timedelta.resolution, timedelta(0, 0, 1))
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
day = timedelta(1)
self.assertRaises(OverflowError, day.__mul__, 10**9)
self.assertRaises(OverflowError, day.__mul__, 1e9)
self.assertRaises(OverflowError, day.__truediv__, 1e-20)
self.assertRaises(OverflowError, day.__truediv__, 1e-10)
self.assertRaises(OverflowError, day.__truediv__, 9e-10)
@support.requires_IEEE_754
def _test_overflow_special(self):
day = timedelta(1)
self.assertRaises(OverflowError, day.__mul__, INF)
self.assertRaises(OverflowError, day.__mul__, -INF)
def test_microsecond_rounding(self):
td = timedelta
eq = self.assertEqual
# Single-field rounding.
eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=0.5/1000), td(microseconds=0))
eq(td(milliseconds=-0.5/1000), td(microseconds=-0))
eq(td(milliseconds=0.6/1000), td(microseconds=1))
eq(td(milliseconds=-0.6/1000), td(microseconds=-1))
eq(td(milliseconds=1.5/1000), td(microseconds=2))
eq(td(milliseconds=-1.5/1000), td(microseconds=-2))
eq(td(seconds=0.5/10**6), td(microseconds=0))
eq(td(seconds=-0.5/10**6), td(microseconds=-0))
eq(td(seconds=1/2**7), td(microseconds=7812))
eq(td(seconds=-1/2**7), td(microseconds=-7812))
# Rounding due to contributions from more than one field.
us_per_hour = 3600e6
us_per_day = us_per_hour * 24
eq(td(days=.4/us_per_day), td(0))
eq(td(hours=.2/us_per_hour), td(0))
eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1))
eq(td(days=-.4/us_per_day), td(0))
eq(td(hours=-.2/us_per_hour), td(0))
eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1))
# Test for a patch in Issue 8860
eq(td(microseconds=0.5), 0.5*td(microseconds=1.0))
eq(td(microseconds=0.5)//td.resolution, 0.5*td.resolution//td.resolution)
def test_massive_normalization(self):
td = timedelta(microseconds=-1)
self.assertEqual((td.days, td.seconds, td.microseconds),
(-1, 24*3600-1, 999999))
def test_bool(self):
self.assertTrue(timedelta(1))
self.assertTrue(timedelta(0, 1))
self.assertTrue(timedelta(0, 0, 1))
self.assertTrue(timedelta(microseconds=1))
self.assertFalse(timedelta(0))
def test_subclass_timedelta(self):
class T(timedelta):
@staticmethod
def from_td(td):
return T(td.days, td.seconds, td.microseconds)
def as_hours(self):
sum = (self.days * 24 +
self.seconds / 3600.0 +
self.microseconds / 3600e6)
return round(sum)
t1 = T(days=1)
self.assertIs(type(t1), T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
self.assertIs(type(t2), T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
self.assertIs(type(t3), timedelta)
t4 = T.from_td(t3)
self.assertIs(type(t4), T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
def test_subclass_date(self):
class DateSubclass(date):
pass
d1 = DateSubclass(2018, 1, 5)
td = timedelta(days=1)
tests = [
('add', lambda d, t: d + t, DateSubclass(2018, 1, 6)),
('radd', lambda d, t: t + d, DateSubclass(2018, 1, 6)),
('sub', lambda d, t: d - t, DateSubclass(2018, 1, 4)),
]
for name, func, expected in tests:
with self.subTest(name):
act = func(d1, td)
self.assertEqual(act, expected)
self.assertIsInstance(act, DateSubclass)
def test_subclass_datetime(self):
class DateTimeSubclass(datetime):
pass
d1 = DateTimeSubclass(2018, 1, 5, 12, 30)
td = timedelta(days=1, minutes=30)
tests = [
('add', lambda d, t: d + t, DateTimeSubclass(2018, 1, 6, 13)),
('radd', lambda d, t: t + d, DateTimeSubclass(2018, 1, 6, 13)),
('sub', lambda d, t: d - t, DateTimeSubclass(2018, 1, 4, 12)),
]
for name, func, expected in tests:
with self.subTest(name):
act = func(d1, td)
self.assertEqual(act, expected)
self.assertIsInstance(act, DateTimeSubclass)
def test_division(self):
t = timedelta(hours=1, minutes=24, seconds=19)
second = timedelta(seconds=1)
self.assertEqual(t / second, 5059.0)
self.assertEqual(t // second, 5059)
t = timedelta(minutes=2, seconds=30)
minute = timedelta(minutes=1)
self.assertEqual(t / minute, 2.5)
self.assertEqual(t // minute, 2)
zerotd = timedelta(0)
self.assertRaises(ZeroDivisionError, truediv, t, zerotd)
self.assertRaises(ZeroDivisionError, floordiv, t, zerotd)
# self.assertRaises(TypeError, truediv, t, 2)
# note: floor division of a timedelta by an integer *is*
# currently permitted.
def test_remainder(self):
t = timedelta(minutes=2, seconds=30)
minute = timedelta(minutes=1)
r = t % minute
self.assertEqual(r, timedelta(seconds=30))
t = timedelta(minutes=-2, seconds=30)
r = t % minute
self.assertEqual(r, timedelta(seconds=30))
zerotd = timedelta(0)
self.assertRaises(ZeroDivisionError, mod, t, zerotd)
self.assertRaises(TypeError, mod, t, 10)
def test_divmod(self):
t = timedelta(minutes=2, seconds=30)
minute = timedelta(minutes=1)
q, r = divmod(t, minute)
self.assertEqual(q, 2)
self.assertEqual(r, timedelta(seconds=30))
t = timedelta(minutes=-2, seconds=30)
q, r = divmod(t, minute)
self.assertEqual(q, -2)
self.assertEqual(r, timedelta(seconds=30))
zerotd = timedelta(0)
self.assertRaises(ZeroDivisionError, divmod, t, zerotd)
self.assertRaises(TypeError, divmod, t, 10)
def test_issue31293(self):
# The interpreter shouldn't crash in case a timedelta is divided or
# multiplied by a float with a bad as_integer_ratio() method.
def get_bad_float(bad_ratio):
class BadFloat(float):
def as_integer_ratio(self):
return bad_ratio
return BadFloat()
with self.assertRaises(TypeError):
timedelta() / get_bad_float(1 << 1000)
with self.assertRaises(TypeError):
timedelta() * get_bad_float(1 << 1000)
for bad_ratio in [(), (42, ), (1, 2, 3)]:
with self.assertRaises(ValueError):
timedelta() / get_bad_float(bad_ratio)
with self.assertRaises(ValueError):
timedelta() * get_bad_float(bad_ratio)
def test_issue31752(self):
# The interpreter shouldn't crash because divmod() returns negative
# remainder.
class BadInt(int):
def __mul__(self, other):
return Prod()
def __rmul__(self, other):
return Prod()
def __floordiv__(self, other):
return Prod()
def __rfloordiv__(self, other):
return Prod()
class Prod:
def __add__(self, other):
return Sum()
def __radd__(self, other):
return Sum()
class Sum(int):
def __divmod__(self, other):
return divmodresult
for divmodresult in [None, (), (0, 1, 2), (0, -1)]:
with self.subTest(divmodresult=divmodresult):
# The following examples should not crash.
try:
timedelta(microseconds=BadInt(1))
except TypeError:
pass
try:
timedelta(hours=BadInt(1))
except TypeError:
pass
try:
timedelta(weeks=BadInt(1))
except (TypeError, ValueError):
pass
try:
timedelta(1) * BadInt(1)
except (TypeError, ValueError):
pass
try:
BadInt(1) * timedelta(1)
except TypeError:
pass
try:
timedelta(1) // BadInt(1)
except TypeError:
pass
#############################################################################
# date tests
class TestDateOnly(unittest.TestCase):
# Tests here won't pass if also run on datetime objects, so don't
# subclass this to test datetimes too.
def test_delta_non_days_ignored(self):
dt = date(2000, 1, 2)
delta = timedelta(days=1, hours=2, minutes=3, seconds=4,
microseconds=5)
days = timedelta(delta.days)
self.assertEqual(days, timedelta(1))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
delta = -delta
days = timedelta(delta.days)
self.assertEqual(days, timedelta(-2))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
class SubclassDate(date):
sub_var = 1
class TestDate(HarmlessMixedComparison, unittest.TestCase):
# Tests here should pass for both dates and datetimes, except for a
# few tests that TestDateTime overrides.
theclass = date
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3),
self.theclass.today()):
# Verify dt -> string -> date identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day)
self.assertEqual(dt, dt2)
def test_ordinal_conversions(self):
# Check some fixed values.
for y, m, d, n in [(1, 1, 1, 1), # calendar origin
(1, 12, 31, 365),
(2, 1, 1, 366),
# first example from "Calendrical Calculations"
(1945, 11, 12, 710347)]:
d = self.theclass(y, m, d)
self.assertEqual(n, d.toordinal())
fromord = self.theclass.fromordinal(n)
self.assertEqual(d, fromord)
if hasattr(fromord, "hour"):
# if we're checking something fancier than a date, verify
# the extra fields have been zeroed out
self.assertEqual(fromord.hour, 0)
self.assertEqual(fromord.minute, 0)
self.assertEqual(fromord.second, 0)
self.assertEqual(fromord.microsecond, 0)
# Check first and last days of year spottily across the whole
# range of years supported.
for year in range(MINYEAR, MAXYEAR+1, 7):
# Verify (year, 1, 1) -> ordinal -> y, m, d is identity.
d = self.theclass(year, 1, 1)
n = d.toordinal()
d2 = self.theclass.fromordinal(n)
self.assertEqual(d, d2)
# Verify that moving back a day gets to the end of year-1.
if year > 1:
d = self.theclass.fromordinal(n-1)
d2 = self.theclass(year-1, 12, 31)
self.assertEqual(d, d2)
self.assertEqual(d2.toordinal(), n-1)
# Test every day in a leap-year and a non-leap year.
dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year, isleap in (2000, True), (2002, False):
n = self.theclass(year, 1, 1).toordinal()
for month, maxday in zip(range(1, 13), dim):
if month == 2 and isleap:
maxday += 1
for day in range(1, maxday+1):
d = self.theclass(year, month, day)
self.assertEqual(d.toordinal(), n)
self.assertEqual(d, self.theclass.fromordinal(n))
n += 1
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31)
# same thing
e = self.theclass(2000, 12, 31)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1)
# same thing
e = self.theclass(2001, 1, 1)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
c = self.theclass(2001,2,1)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
day = timedelta(1)
week = timedelta(7)
a = self.theclass(2002, 3, 2)
self.assertEqual(a + day, self.theclass(2002, 3, 3))
self.assertEqual(day + a, self.theclass(2002, 3, 3))
self.assertEqual(a - day, self.theclass(2002, 3, 1))
self.assertEqual(-day + a, self.theclass(2002, 3, 1))
self.assertEqual(a + week, self.theclass(2002, 3, 9))
self.assertEqual(a - week, self.theclass(2002, 2, 23))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(c - (c - day), day)
# Add/sub ints or floats should be illegal
for i in 1, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - date is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing date and (delta or date) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# date + date is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_overflow(self):
tiny = self.theclass.resolution
for delta in [tiny, timedelta(1), timedelta(2)]:
dt = self.theclass.min + delta
dt -= delta # no problem
self.assertRaises(OverflowError, dt.__sub__, delta)
self.assertRaises(OverflowError, dt.__add__, -delta)
dt = self.theclass.max - delta
dt += delta # no problem
self.assertRaises(OverflowError, dt.__add__, delta)
self.assertRaises(OverflowError, dt.__sub__, -delta)
def test_fromtimestamp(self):
import time
# Try an arbitrary fixed value.
year, month, day = 1999, 9, 19
ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1))
d = self.theclass.fromtimestamp(ts)
self.assertEqual(d.year, year)
self.assertEqual(d.month, month)
self.assertEqual(d.day, day)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(OverflowError, self.theclass.fromtimestamp,
insane)
def test_today(self):
import time
# We claim that today() is like fromtimestamp(time.time()), so
# prove it.
for dummy in range(3):
today = self.theclass.today()
ts = time.time()
todayagain = self.theclass.fromtimestamp(ts)
if today == todayagain:
break
# There are several legit reasons that could fail:
# 1. It recently became midnight, between the today() and the
# time() calls.
# 2. The platform time() has such fine resolution that we'll
# never get the same value twice.
# 3. The platform time() has poor resolution, and we just
# happened to call today() right before a resolution quantum
# boundary.
# 4. The system clock got fiddled between calls.
# In any case, wait a little while and try again.
time.sleep(0.1)
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
if today != todayagain:
self.assertAlmostEqual(todayagain, today,
delta=timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
# March 4, 2002 is a Monday
self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i)
self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1)
# January 2, 1956 is a Monday
self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i)
self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1)
def test_isocalendar(self):
# Check examples from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
week_mondays = [
((2003, 12, 22), (2003, 52, 1)),
((2003, 12, 29), (2004, 1, 1)),
((2004, 1, 5), (2004, 2, 1)),
((2009, 12, 21), (2009, 52, 1)),
((2009, 12, 28), (2009, 53, 1)),
((2010, 1, 4), (2010, 1, 1)),
]
test_cases = []
for cal_date, iso_date in week_mondays:
base_date = self.theclass(*cal_date)
# Adds one test case for every day of the specified weeks
for i in range(7):
new_date = base_date + timedelta(i)
new_iso = iso_date[0:2] + (iso_date[2] + i,)
test_cases.append((new_date, new_iso))
for d, exp_iso in test_cases:
with self.subTest(d=d, comparison="tuple"):
self.assertEqual(d.isocalendar(), exp_iso)
# Check that the tuple contents are accessible by field name
with self.subTest(d=d, comparison="fields"):
t = d.isocalendar()
self.assertEqual((t.year, t.week, t.weekday), exp_iso)
def test_isocalendar_pickling(self):
"""Test that the result of datetime.isocalendar() can be pickled.
The result of a round trip should be a plain tuple.
"""
d = self.theclass(2019, 1, 1)
p = pickle.dumps(d.isocalendar())
res = pickle.loads(p)
self.assertEqual(type(res), tuple)
self.assertEqual(res, (2019, 1, 2))
def test_iso_long_years(self):
# Calculate long ISO years and compare to table from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
ISO_LONG_YEARS_TABLE = """
4 32 60 88
9 37 65 93
15 43 71 99
20 48 76
26 54 82
105 133 161 189
111 139 167 195
116 144 172
122 150 178
128 156 184
201 229 257 285
207 235 263 291
212 240 268 296
218 246 274
224 252 280
303 331 359 387
308 336 364 392
314 342 370 398
320 348 376
325 353 381
"""
iso_long_years = sorted(map(int, ISO_LONG_YEARS_TABLE.split()))
L = []
for i in range(400):
d = self.theclass(2000+i, 12, 31)
d1 = self.theclass(1600+i, 12, 31)
self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:])
if d.isocalendar()[1] == 53:
L.append(i)
self.assertEqual(L, iso_long_years)
def test_isoformat(self):
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02")
def test_ctime(self):
t = self.theclass(2002, 3, 2)
self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002")
def test_strftime(self):
t = self.theclass(2005, 3, 2)
self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05")
self.assertEqual(t.strftime(""), "") # SF bug #761337
self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784
self.assertRaises(TypeError, t.strftime) # needs an arg
self.assertRaises(TypeError, t.strftime, "one", "two") # too many args
self.assertRaises(TypeError, t.strftime, 42) # arg wrong type
# test that unicode input is allowed (issue 2782)
self.assertEqual(t.strftime("%m"), "03")
# A naive object replaces %z and %Z w/ empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
#make sure that invalid format specifiers are handled correctly
#self.assertRaises(ValueError, t.strftime, "%e")
#self.assertRaises(ValueError, t.strftime, "%")
#self.assertRaises(ValueError, t.strftime, "%#")
#oh well, some systems just ignore those invalid ones.
#at least, exercise them to make sure that no crashes
#are generated
for f in ["%e", "%", "%#"]:
try:
t.strftime(f)
except ValueError:
pass
# bpo-34482: Check that surrogates don't cause a crash.
try:
t.strftime('%y\ud800%m')
except UnicodeEncodeError:
pass
#check that this standard extension works
t.strftime("%f")
def test_strftime_trailing_percent(self):
# bpo-35066: Make sure trailing '%' doesn't cause datetime's strftime to
# complain. Different libcs have different handling of trailing
# percents, so we simply check datetime's strftime acts the same as
# time.strftime.
t = self.theclass(2005, 3, 2)
try:
_time.strftime('%')
except ValueError:
self.skipTest('time module does not support trailing %')
self.assertEqual(t.strftime('%'), _time.strftime('%', t.timetuple()))
self.assertEqual(
t.strftime("m:%m d:%d y:%y %"),
_time.strftime("m:03 d:02 y:05 %", t.timetuple()),
)
def test_format(self):
dt = self.theclass(2007, 9, 10)
self.assertEqual(dt.__format__(''), str(dt))
with self.assertRaisesRegex(TypeError, 'must be str, not int'):
dt.__format__(123)
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_resolution_info(self):
# XXX: Should min and max respect subclassing?
if issubclass(self.theclass, datetime):
expected_class = datetime
else:
expected_class = date
self.assertIsInstance(self.theclass.min, expected_class)
self.assertIsInstance(self.theclass.max, expected_class)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_extreme_timedelta(self):
big = self.theclass.max - self.theclass.min
# 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds
n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds
# n == 315537897599999999 ~= 2**58.13
justasbig = timedelta(0, 0, n)
self.assertEqual(big, justasbig)
self.assertEqual(self.theclass.min + big, self.theclass.max)
self.assertEqual(self.theclass.max - big, self.theclass.min)
def test_timetuple(self):
for i in range(7):
# January 2, 1956 is a Monday (0)
d = self.theclass(1956, 1, 2+i)
t = d.timetuple()
self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1))
# February 1, 1956 is a Wednesday (2)
d = self.theclass(1956, 2, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1))
# March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day
# of the year.
d = self.theclass(1956, 3, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1))
self.assertEqual(t.tm_year, 1956)
self.assertEqual(t.tm_mon, 3)
self.assertEqual(t.tm_mday, 1+i)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 0)
self.assertEqual(t.tm_sec, 0)
self.assertEqual(t.tm_wday, (3+i)%7)
self.assertEqual(t.tm_yday, 61+i)
self.assertEqual(t.tm_isdst, -1)
def test_pickling(self):
args = 6, 7, 23
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2))
def test_compat_unpickle(self):
tests = [
b"cdatetime\ndate\n(S'\\x07\\xdf\\x0b\\x1b'\ntR.",
b'cdatetime\ndate\n(U\x04\x07\xdf\x0b\x1btR.',
b'\x80\x02cdatetime\ndate\nU\x04\x07\xdf\x0b\x1b\x85R.',
]
args = 2015, 11, 27
expected = self.theclass(*args)
for data in tests:
for loads in pickle_loads:
derived = loads(data, encoding='latin1')
self.assertEqual(derived, expected)
def test_compare(self):
t1 = self.theclass(2, 3, 4)
t2 = self.theclass(2, 3, 4)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = self.theclass(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_mixed_compare(self):
our = self.theclass(2000, 4, 5)
# Our class can be compared for equality to other classes
self.assertEqual(our == 1, False)
self.assertEqual(1 == our, False)
self.assertEqual(our != 1, True)
self.assertEqual(1 != our, True)
# But the ordering is undefined
self.assertRaises(TypeError, lambda: our < 1)
self.assertRaises(TypeError, lambda: 1 < our)
# Repeat those tests with a different class
class SomeClass:
pass
their = SomeClass()
self.assertEqual(our == their, False)
self.assertEqual(their == our, False)
self.assertEqual(our != their, True)
self.assertEqual(their != our, True)
self.assertRaises(TypeError, lambda: our < their)
self.assertRaises(TypeError, lambda: their < our)
def test_bool(self):
# All dates are considered true.
self.assertTrue(self.theclass.min)
self.assertTrue(self.theclass.max)
def test_strftime_y2k(self):
for y in (1, 49, 70, 99, 100, 999, 1000, 1970):
d = self.theclass(y, 1, 1)
# Issue 13305: For years < 1000, the value is not always
# padded to 4 digits across platforms. The C standard
# assumes year >= 1900, so it does not specify the number
# of digits.
if d.strftime("%Y") != '%04d' % y:
# Year 42 returns '42', not padded
self.assertEqual(d.strftime("%Y"), '%d' % y)
# '0042' is obtained anyway
self.assertEqual(d.strftime("%4Y"), '%04d' % y)
def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_subclass_replace(self):
class DateSubclass(self.theclass):
pass
dt = DateSubclass(2012, 1, 1)
self.assertIs(type(dt.replace(year=2013)), DateSubclass)
def test_subclass_date(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month
args = 2003, 4, 14
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7)
def test_subclass_alternate_constructors(self):
# Test that alternate constructors call the constructor
class DateSubclass(self.theclass):
def __new__(cls, *args, **kwargs):
result = self.theclass.__new__(cls, *args, **kwargs)
result.extra = 7
return result
args = (2003, 4, 14)
d_ord = 731319 # Equivalent ordinal date
d_isoformat = '2003-04-14' # Equivalent isoformat()
base_d = DateSubclass(*args)
self.assertIsInstance(base_d, DateSubclass)
self.assertEqual(base_d.extra, 7)
# Timestamp depends on time zone, so we'll calculate the equivalent here
ts = datetime.combine(base_d, time(0)).timestamp()
test_cases = [
('fromordinal', (d_ord,)),
('fromtimestamp', (ts,)),
('fromisoformat', (d_isoformat,)),
]
for constr_name, constr_args in test_cases:
for base_obj in (DateSubclass, base_d):
# Test both the classmethod and method
with self.subTest(base_obj_type=type(base_obj),
constr_name=constr_name):
constr = getattr(base_obj, constr_name)
dt = constr(*constr_args)
# Test that it creates the right subclass
self.assertIsInstance(dt, DateSubclass)
# Test that it's equal to the base object
self.assertEqual(dt, base_d)
# Test that it called the constructor
self.assertEqual(dt.extra, 7)
def test_pickling_subclass_date(self):
args = 6, 7, 23
orig = SubclassDate(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertTrue(isinstance(derived, SubclassDate))
def test_backdoor_resistance(self):
# For fast unpickling, the constructor accepts a pickle byte string.
# This is a low-overhead backdoor. A user can (by intent or
# mistake) pass a string directly, which (if it's the right length)
# will get treated like a pickle, and bypass the normal sanity
# checks in the constructor. This can create insane objects.
# The constructor doesn't want to burn the time to validate all
# fields, but does check the month field. This stops, e.g.,
# datetime.datetime('1995-03-25') from yielding an insane object.
base = b'1995-03-25'
if not issubclass(self.theclass, datetime):
base = base[:4]
for month_byte in b'9', b'\0', b'\r', b'\xff':
self.assertRaises(TypeError, self.theclass,
base[:2] + month_byte + base[3:])
if issubclass(self.theclass, datetime):
# Good bytes, but bad tzinfo:
with self.assertRaisesRegex(TypeError, '^bad tzinfo state arg$'):
self.theclass(bytes([1] * len(base)), 'EST')
for ord_byte in range(1, 13):
# This shouldn't blow up because of the month byte alone. If
# the implementation changes to do more-careful checking, it may
# blow up because other fields are insane.
self.theclass(base[:2] + bytes([ord_byte]) + base[3:])
def test_fromisoformat(self):
# Test that isoformat() is reversible
base_dates = [
(1, 1, 1),
(1000, 2, 14),
(1900, 1, 1),
(2000, 2, 29),
(2004, 11, 12),
(2004, 4, 3),
(2017, 5, 30)
]
for dt_tuple in base_dates:
dt = self.theclass(*dt_tuple)
dt_str = dt.isoformat()
with self.subTest(dt_str=dt_str):
dt_rt = self.theclass.fromisoformat(dt.isoformat())
self.assertEqual(dt, dt_rt)
def test_fromisoformat_subclass(self):
class DateSubclass(self.theclass):
pass
dt = DateSubclass(2014, 12, 14)
dt_rt = DateSubclass.fromisoformat(dt.isoformat())
self.assertIsInstance(dt_rt, DateSubclass)
def test_fromisoformat_fails(self):
# Test that fromisoformat() fails on invalid values
bad_strs = [
'', # Empty string
'\ud800', # bpo-34454: Surrogate code point
'009-03-04', # Not 10 characters
'123456789', # Not a date
'200a-12-04', # Invalid character in year
'2009-1a-04', # Invalid character in month
'2009-12-0a', # Invalid character in day
'2009-01-32', # Invalid day
'2009-02-29', # Invalid leap day
'20090228', # Valid ISO8601 output not from isoformat()
'2009\ud80002\ud80028', # Separators are surrogate codepoints
]
for bad_str in bad_strs:
with self.assertRaises(ValueError):
self.theclass.fromisoformat(bad_str)
def test_fromisoformat_fails_typeerror(self):
# Test that fromisoformat fails when passed the wrong type
import io
bad_types = [b'2009-03-01', None, io.StringIO('2009-03-01')]
for bad_type in bad_types:
with self.assertRaises(TypeError):
self.theclass.fromisoformat(bad_type)
def test_fromisocalendar(self):
# For each test case, assert that fromisocalendar is the
# inverse of the isocalendar function
dates = [
(2016, 4, 3),
(2005, 1, 2), # (2004, 53, 7)
(2008, 12, 30), # (2009, 1, 2)
(2010, 1, 2), # (2009, 53, 6)
(2009, 12, 31), # (2009, 53, 4)
(1900, 1, 1), # Unusual non-leap year (year % 100 == 0)
(1900, 12, 31),
(2000, 1, 1), # Unusual leap year (year % 400 == 0)
(2000, 12, 31),
(2004, 1, 1), # Leap year
(2004, 12, 31),
(1, 1, 1),
(9999, 12, 31),
(MINYEAR, 1, 1),
(MAXYEAR, 12, 31),
]
for datecomps in dates:
with self.subTest(datecomps=datecomps):
dobj = self.theclass(*datecomps)
isocal = dobj.isocalendar()
d_roundtrip = self.theclass.fromisocalendar(*isocal)
self.assertEqual(dobj, d_roundtrip)
def test_fromisocalendar_value_errors(self):
isocals = [
(2019, 0, 1),
(2019, -1, 1),
(2019, 54, 1),
(2019, 1, 0),
(2019, 1, -1),
(2019, 1, 8),
(2019, 53, 1),
(10000, 1, 1),
(0, 1, 1),
(9999999, 1, 1),
(2<<32, 1, 1),
(2019, 2<<32, 1),
(2019, 1, 2<<32),
]
for isocal in isocals:
with self.subTest(isocal=isocal):
with self.assertRaises(ValueError):
self.theclass.fromisocalendar(*isocal)
def test_fromisocalendar_type_errors(self):
err_txformers = [
str,
float,
lambda x: None,
]
# Take a valid base tuple and transform it to contain one argument
# with the wrong type. Repeat this for each argument, e.g.
# [("2019", 1, 1), (2019, "1", 1), (2019, 1, "1"), ...]
isocals = []
base = (2019, 1, 1)
for i in range(3):
for txformer in err_txformers:
err_val = list(base)
err_val[i] = txformer(err_val[i])
isocals.append(tuple(err_val))
for isocal in isocals:
with self.subTest(isocal=isocal):
with self.assertRaises(TypeError):
self.theclass.fromisocalendar(*isocal)
#############################################################################
# datetime tests
class SubclassDatetime(datetime):
sub_var = 1
class TestDateTime(TestDate):
theclass = datetime
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1, 12, 0)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 0)
self.assertEqual(dt.second, 0)
self.assertEqual(dt.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 59)
self.assertEqual(dt.second, 59)
self.assertEqual(dt.microsecond, 8000)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7),
self.theclass.now()):
# Verify dt -> string -> datetime identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond)
self.assertEqual(dt, dt2)
def test_isoformat(self):
t = self.theclass(1, 2, 3, 4, 5, 1, 123)
self.assertEqual(t.isoformat(), "0001-02-03T04:05:01.000123")
self.assertEqual(t.isoformat('T'), "0001-02-03T04:05:01.000123")
self.assertEqual(t.isoformat(' '), "0001-02-03 04:05:01.000123")
self.assertEqual(t.isoformat('\x00'), "0001-02-03\x0004:05:01.000123")
# bpo-34482: Check that surrogates are handled properly.
self.assertEqual(t.isoformat('\ud800'),
"0001-02-03\ud80004:05:01.000123")
self.assertEqual(t.isoformat(timespec='hours'), "0001-02-03T04")
self.assertEqual(t.isoformat(timespec='minutes'), "0001-02-03T04:05")
self.assertEqual(t.isoformat(timespec='seconds'), "0001-02-03T04:05:01")
self.assertEqual(t.isoformat(timespec='milliseconds'), "0001-02-03T04:05:01.000")
self.assertEqual(t.isoformat(timespec='microseconds'), "0001-02-03T04:05:01.000123")
self.assertEqual(t.isoformat(timespec='auto'), "0001-02-03T04:05:01.000123")
self.assertEqual(t.isoformat(sep=' ', timespec='minutes'), "0001-02-03 04:05")
self.assertRaises(ValueError, t.isoformat, timespec='foo')
# bpo-34482: Check that surrogates are handled properly.
self.assertRaises(ValueError, t.isoformat, timespec='\ud800')
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0001-02-03 04:05:01.000123")
t = self.theclass(1, 2, 3, 4, 5, 1, 999500, tzinfo=timezone.utc)
self.assertEqual(t.isoformat(timespec='milliseconds'), "0001-02-03T04:05:01.999+00:00")
t = self.theclass(1, 2, 3, 4, 5, 1, 999500)
self.assertEqual(t.isoformat(timespec='milliseconds'), "0001-02-03T04:05:01.999")
t = self.theclass(1, 2, 3, 4, 5, 1)
self.assertEqual(t.isoformat(timespec='auto'), "0001-02-03T04:05:01")
self.assertEqual(t.isoformat(timespec='milliseconds'), "0001-02-03T04:05:01.000")
self.assertEqual(t.isoformat(timespec='microseconds'), "0001-02-03T04:05:01.000000")
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 00:00:00")
# ISO format with timezone
tz = FixedOffset(timedelta(seconds=16), 'XXX')
t = self.theclass(2, 3, 2, tzinfo=tz)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00+00:00:16")
def test_isoformat_timezone(self):
tzoffsets = [
('05:00', timedelta(hours=5)),
('02:00', timedelta(hours=2)),
('06:27', timedelta(hours=6, minutes=27)),
('12:32:30', timedelta(hours=12, minutes=32, seconds=30)),
('02:04:09.123456', timedelta(hours=2, minutes=4, seconds=9, microseconds=123456))
]
tzinfos = [
('', None),
('+00:00', timezone.utc),
('+00:00', timezone(timedelta(0))),
]
tzinfos += [
(prefix + expected, timezone(sign * td))
for expected, td in tzoffsets
for prefix, sign in [('-', -1), ('+', 1)]
]
dt_base = self.theclass(2016, 4, 1, 12, 37, 9)
exp_base = '2016-04-01T12:37:09'
for exp_tz, tzi in tzinfos:
dt = dt_base.replace(tzinfo=tzi)
exp = exp_base + exp_tz
with self.subTest(tzi=tzi):
assert dt.isoformat() == exp
def test_format(self):
dt = self.theclass(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(dt.__format__(''), str(dt))
with self.assertRaisesRegex(TypeError, 'must be str, not int'):
dt.__format__(123)
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_more_ctime(self):
# Test fields that TestDate doesn't touch.
import time
t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002")
# Oops! The next line fails on Win2K under MSVC 6, so it's commented
# out. The difference is that t.ctime() produces " 2" for the day,
# but platform ctime() produces "02" for the day. According to
# C99, t.ctime() is correct here.
# self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
# So test a case where that difference doesn't matter.
t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
def test_tz_independent_comparing(self):
dt1 = self.theclass(2002, 3, 1, 9, 0, 0)
dt2 = self.theclass(2002, 3, 1, 10, 0, 0)
dt3 = self.theclass(2002, 3, 1, 9, 0, 0)
self.assertEqual(dt1, dt3)
self.assertTrue(dt2 > dt3)
# Make sure comparison doesn't forget microseconds, and isn't done
# via comparing a float timestamp (an IEEE double doesn't have enough
# precision to span microsecond resolution across years 1 through 9999,
# so comparing via timestamp necessarily calls some distinct values
# equal).
dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998)
us = timedelta(microseconds=1)
dt2 = dt1 + us
self.assertEqual(dt2 - dt1, us)
self.assertTrue(dt1 < dt2)
def test_strftime_with_bad_tzname_replace(self):
# verify ok if tzinfo.tzname().replace() returns a non-string
class MyTzInfo(FixedOffset):
def tzname(self, dt):
class MyStr(str):
def replace(self, *args):
return None
return MyStr('name')
t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
self.assertRaises(TypeError, t.strftime, '%Z')
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
# bad hours
self.theclass(2000, 1, 31, 0) # no exception
self.theclass(2000, 1, 31, 23) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24)
# bad minutes
self.theclass(2000, 1, 31, 23, 0) # no exception
self.theclass(2000, 1, 31, 23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60)
# bad seconds
self.theclass(2000, 1, 31, 23, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60)
# bad microseconds
self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59,
1000000)
# bad fold
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, fold=-1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, fold=2)
# Positional fold:
self.assertRaises(TypeError, self.theclass,
2000, 1, 31, 23, 59, 59, 0, None, 1)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31, 23, 30, 17)
e = self.theclass(2000, 12, 31, 23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1, 0, 5, 17)
e = self.theclass(2001, 1, 1, 0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
a = self.theclass(2002, 3, 2, 17, 6)
millisec = timedelta(0, 0, 1000)
hour = timedelta(0, 3600)
day = timedelta(1)
week = timedelta(7)
self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6))
self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(a - hour, a + -hour)
self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6))
self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6))
self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6))
self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6))
self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a + hour) - a, hour)
self.assertEqual((a + millisec) - a, millisec)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual((a - hour) - a, -hour)
self.assertEqual((a - millisec) - a, -millisec)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a + hour), -hour)
self.assertEqual(a - (a + millisec), -millisec)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(a - (a - hour), hour)
self.assertEqual(a - (a - millisec), millisec)
self.assertEqual(a + (week + day + hour + millisec),
self.theclass(2002, 3, 10, 18, 6, 0, 1000))
self.assertEqual(a + (week + day + hour + millisec),
(((a + week) + day) + hour) + millisec)
self.assertEqual(a - (week + day + hour + millisec),
self.theclass(2002, 2, 22, 16, 5, 59, 999000))
self.assertEqual(a - (week + day + hour + millisec),
(((a - week) - day) - hour) - millisec)
# Add/sub ints or floats should be illegal
for i in 1, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - datetime is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing datetime and (delta or datetime) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# datetime + datetime is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_pickling(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2))
def test_more_pickling(self):
a = self.theclass(2003, 2, 7, 16, 48, 37, 444116)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(a, proto)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertTrue(isinstance(derived, SubclassDatetime))
def test_compat_unpickle(self):
tests = [
b'cdatetime\ndatetime\n('
b"S'\\x07\\xdf\\x0b\\x1b\\x14;\\x01\\x00\\x10\\x00'\ntR.",
b'cdatetime\ndatetime\n('
b'U\n\x07\xdf\x0b\x1b\x14;\x01\x00\x10\x00tR.',
b'\x80\x02cdatetime\ndatetime\n'
b'U\n\x07\xdf\x0b\x1b\x14;\x01\x00\x10\x00\x85R.',
]
args = 2015, 11, 27, 20, 59, 1, 64**2
expected = self.theclass(*args)
for data in tests:
for loads in pickle_loads:
derived = loads(data, encoding='latin1')
self.assertEqual(derived, expected)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
# Run with US-style DST rules: DST begins 2 a.m. on second Sunday in
# March (M3.2.0) and ends 2 a.m. on first Sunday in November (M11.1.0).
@support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_timestamp_naive(self):
t = self.theclass(1970, 1, 1)
self.assertEqual(t.timestamp(), 18000.0)
t = self.theclass(1970, 1, 1, 1, 2, 3, 4)
self.assertEqual(t.timestamp(),
18000.0 + 3600 + 2*60 + 3 + 4*1e-6)
# Missing hour
t0 = self.theclass(2012, 3, 11, 2, 30)
t1 = t0.replace(fold=1)
self.assertEqual(self.theclass.fromtimestamp(t1.timestamp()),
t0 - timedelta(hours=1))
self.assertEqual(self.theclass.fromtimestamp(t0.timestamp()),
t1 + timedelta(hours=1))
# Ambiguous hour defaults to DST
t = self.theclass(2012, 11, 4, 1, 30)
self.assertEqual(self.theclass.fromtimestamp(t.timestamp()), t)
# Timestamp may raise an overflow error on some platforms
# XXX: Do we care to support the first and last year?
for t in [self.theclass(2,1,1), self.theclass(9998,12,12)]:
try:
s = t.timestamp()
except OverflowError:
pass
else:
self.assertEqual(self.theclass.fromtimestamp(s), t)
def test_timestamp_aware(self):
t = self.theclass(1970, 1, 1, tzinfo=timezone.utc)
self.assertEqual(t.timestamp(), 0.0)
t = self.theclass(1970, 1, 1, 1, 2, 3, 4, tzinfo=timezone.utc)
self.assertEqual(t.timestamp(),
3600 + 2*60 + 3 + 4*1e-6)
t = self.theclass(1970, 1, 1, 1, 2, 3, 4,
tzinfo=timezone(timedelta(hours=-5), 'EST'))
self.assertEqual(t.timestamp(),
18000 + 3600 + 2*60 + 3 + 4*1e-6)
@support.run_with_tz('MSK-03') # Something east of Greenwich
def test_microsecond_rounding(self):
for fts in [self.theclass.fromtimestamp,
self.theclass.utcfromtimestamp]:
zero = fts(0)
self.assertEqual(zero.second, 0)
self.assertEqual(zero.microsecond, 0)
one = fts(1e-6)
try:
minus_one = fts(-1e-6)
except OSError:
# localtime(-1) and gmtime(-1) is not supported on Windows
pass
else:
self.assertEqual(minus_one.second, 59)
self.assertEqual(minus_one.microsecond, 999999)
t = fts(-1e-8)
self.assertEqual(t, zero)
t = fts(-9e-7)
self.assertEqual(t, minus_one)
t = fts(-1e-7)
self.assertEqual(t, zero)
t = fts(-1/2**7)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 992188)
t = fts(1e-7)
self.assertEqual(t, zero)
t = fts(9e-7)
self.assertEqual(t, one)
t = fts(0.99999949)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 999999)
t = fts(0.9999999)
self.assertEqual(t.second, 1)
self.assertEqual(t.microsecond, 0)
t = fts(1/2**7)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 7812)
def test_timestamp_limits(self):
# minimum timestamp
min_dt = self.theclass.min.replace(tzinfo=timezone.utc)
min_ts = min_dt.timestamp()
try:
# date 0001-01-01 00:00:00+00:00: timestamp=-62135596800
self.assertEqual(self.theclass.fromtimestamp(min_ts, tz=timezone.utc),
min_dt)
except (OverflowError, OSError) as exc:
# the date 0001-01-01 doesn't fit into 32-bit time_t,
# or platform doesn't support such very old date
self.skipTest(str(exc))
# maximum timestamp: set seconds to zero to avoid rounding issues
max_dt = self.theclass.max.replace(tzinfo=timezone.utc,
second=0, microsecond=0)
max_ts = max_dt.timestamp()
# date 9999-12-31 23:59:00+00:00: timestamp 253402300740
self.assertEqual(self.theclass.fromtimestamp(max_ts, tz=timezone.utc),
max_dt)
# number of seconds greater than 1 year: make sure that the new date
# is not valid in datetime.datetime limits
delta = 3600 * 24 * 400
# too small
ts = min_ts - delta
# converting a Python int to C time_t can raise a OverflowError,
# especially on 32-bit platforms.
with self.assertRaises((ValueError, OverflowError)):
self.theclass.fromtimestamp(ts)
with self.assertRaises((ValueError, OverflowError)):
self.theclass.utcfromtimestamp(ts)
# too big
ts = max_dt.timestamp() + delta
with self.assertRaises((ValueError, OverflowError)):
self.theclass.fromtimestamp(ts)
with self.assertRaises((ValueError, OverflowError)):
self.theclass.utcfromtimestamp(ts)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(OverflowError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(OverflowError, self.theclass.utcfromtimestamp,
insane)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_fromtimestamp(self):
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_utcfromtimestamp(self):
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.assertLessEqual(abs(from_timestamp - from_now), tolerance)
def test_strptime(self):
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
expected = _strptime._strptime_datetime(self.theclass, string, format)
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
self.assertIs(type(expected), self.theclass)
self.assertIs(type(got), self.theclass)
# bpo-34482: Check that surrogates are handled properly.
inputs = [
('2004-12-01\ud80013:02:47.197', '%Y-%m-%d\ud800%H:%M:%S.%f'),
('2004\ud80012-01 13:02:47.197', '%Y\ud800%m-%d %H:%M:%S.%f'),
('2004-12-01 13:02\ud80047.197', '%Y-%m-%d %H:%M\ud800%S.%f'),
]
for string, format in inputs:
with self.subTest(string=string, format=format):
expected = _strptime._strptime_datetime(self.theclass, string,
format)
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
strptime = self.theclass.strptime
self.assertEqual(strptime("+0002", "%z").utcoffset(), 2 * MINUTE)
self.assertEqual(strptime("-0002", "%z").utcoffset(), -2 * MINUTE)
self.assertEqual(
strptime("-00:02:01.000003", "%z").utcoffset(),
-timedelta(minutes=2, seconds=1, microseconds=3)
)
# Only local timezone and UTC are supported
for tzseconds, tzname in ((0, 'UTC'), (0, 'GMT'),
(-_time.timezone, _time.tzname[0])):
if tzseconds < 0:
sign = '-'
seconds = -tzseconds
else:
sign ='+'
seconds = tzseconds
hours, minutes = divmod(seconds//60, 60)
dtstr = "{}{:02d}{:02d} {}".format(sign, hours, minutes, tzname)
dt = strptime(dtstr, "%z %Z")
self.assertEqual(dt.utcoffset(), timedelta(seconds=tzseconds))
self.assertEqual(dt.tzname(), tzname)
# Can produce inconsistent datetime
dtstr, fmt = "+1234 UTC", "%z %Z"
dt = strptime(dtstr, fmt)
self.assertEqual(dt.utcoffset(), 12 * HOUR + 34 * MINUTE)
self.assertEqual(dt.tzname(), 'UTC')
# yet will roundtrip
self.assertEqual(dt.strftime(fmt), dtstr)
# Produce naive datetime if no %z is provided
self.assertEqual(strptime("UTC", "%Z").tzinfo, None)
with self.assertRaises(ValueError): strptime("-2400", "%z")
with self.assertRaises(ValueError): strptime("-000", "%z")
with self.assertRaises(ValueError): strptime("z", "%z")
def test_strptime_single_digit(self):
# bpo-34903: Check that single digit dates and times are allowed.
strptime = self.theclass.strptime
with self.assertRaises(ValueError):
# %y does require two digits.
newdate = strptime('01/02/3 04:05:06', '%d/%m/%y %H:%M:%S')
dt1 = self.theclass(2003, 2, 1, 4, 5, 6)
dt2 = self.theclass(2003, 1, 2, 4, 5, 6)
dt3 = self.theclass(2003, 2, 1, 0, 0, 0)
dt4 = self.theclass(2003, 1, 25, 0, 0, 0)
inputs = [
('%d', '1/02/03 4:5:6', '%d/%m/%y %H:%M:%S', dt1),
('%m', '01/2/03 4:5:6', '%d/%m/%y %H:%M:%S', dt1),
('%H', '01/02/03 4:05:06', '%d/%m/%y %H:%M:%S', dt1),
('%M', '01/02/03 04:5:06', '%d/%m/%y %H:%M:%S', dt1),
('%S', '01/02/03 04:05:6', '%d/%m/%y %H:%M:%S', dt1),
('%j', '2/03 04am:05:06', '%j/%y %I%p:%M:%S',dt2),
('%I', '02/03 4am:05:06', '%j/%y %I%p:%M:%S',dt2),
('%w', '6/04/03', '%w/%U/%y', dt3),
# %u requires a single digit.
('%W', '6/4/2003', '%u/%W/%Y', dt3),
('%V', '6/4/2003', '%u/%V/%G', dt4),
]
for reason, string, format, target in inputs:
reason = 'test single digit ' + reason
with self.subTest(reason=reason,
string=string,
format=format,
target=target):
newdate = strptime(string, format)
self.assertEqual(newdate, target, msg=reason)
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
date(t.year, 1, 1).toordinal() + 1)
self.assertEqual(tt.tm_isdst, -1)
def test_more_strftime(self):
# This tests fields beyond those tested by the TestDate.test_strftime.
t = self.theclass(2004, 12, 31, 6, 22, 33, 47)
self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"),
"12 31 04 000047 33 22 06 366")
for (s, us), z in [((33, 123), "33.000123"), ((33, 0), "33"),]:
tz = timezone(-timedelta(hours=2, seconds=s, microseconds=us))
t = t.replace(tzinfo=tz)
self.assertEqual(t.strftime("%z"), "-0200" + z)
# bpo-34482: Check that surrogates don't cause a crash.
try:
t.strftime('%y\ud800%m %H\ud800%M')
except UnicodeEncodeError:
pass
def test_extract(self):
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
def test_combine(self):
d = date(2002, 3, 4)
t = time(18, 45, 3, 1234)
expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
combine = self.theclass.combine
dt = combine(d, t)
self.assertEqual(dt, expected)
dt = combine(time=t, date=d)
self.assertEqual(dt, expected)
self.assertEqual(d, dt.date())
self.assertEqual(t, dt.time())
self.assertEqual(dt, combine(dt.date(), dt.time()))
self.assertRaises(TypeError, combine) # need an arg
self.assertRaises(TypeError, combine, d) # need two args
self.assertRaises(TypeError, combine, t, d) # args reversed
self.assertRaises(TypeError, combine, d, t, 1) # wrong tzinfo type
self.assertRaises(TypeError, combine, d, t, 1, 2) # too many args
self.assertRaises(TypeError, combine, "date", "time") # wrong types
self.assertRaises(TypeError, combine, d, "time") # wrong type
self.assertRaises(TypeError, combine, "date", t) # wrong type
# tzinfo= argument
dt = combine(d, t, timezone.utc)
self.assertIs(dt.tzinfo, timezone.utc)
dt = combine(d, t, tzinfo=timezone.utc)
self.assertIs(dt.tzinfo, timezone.utc)
t = time()
dt = combine(dt, t)
self.assertEqual(dt.date(), d)
self.assertEqual(dt.time(), t)
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
@support.run_with_tz('EDT4')
def test_astimezone(self):
dt = self.theclass.now()
f = FixedOffset(44, "0044")
dt_utc = dt.replace(tzinfo=timezone(timedelta(hours=-4), 'EDT'))
self.assertEqual(dt.astimezone(), dt_utc) # naive
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
dt_f = dt.replace(tzinfo=f) + timedelta(hours=4, minutes=44)
self.assertEqual(dt.astimezone(f), dt_f) # naive
self.assertEqual(dt.astimezone(tz=f), dt_f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
self.assertEqual(dt.replace(tzinfo=bog).astimezone(f), dt_f)
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
def dst(self, dt): return None
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
class Broken(tzinfo):
def utcoffset(self, dt): return 1
def dst(self, dt): return 1
broken = Broken()
dt_broken = dt.replace(tzinfo=broken)
with self.assertRaises(TypeError):
dt_broken.astimezone()
def test_subclass_datetime(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month + self.second
args = 2003, 4, 14, 12, 13, 41
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month +
dt1.second - 7)
def test_subclass_alternate_constructors_datetime(self):
# Test that alternate constructors call the constructor
class DateTimeSubclass(self.theclass):
def __new__(cls, *args, **kwargs):
result = self.theclass.__new__(cls, *args, **kwargs)
result.extra = 7
return result
args = (2003, 4, 14, 12, 30, 15, 123456)
d_isoformat = '2003-04-14T12:30:15.123456' # Equivalent isoformat()
utc_ts = 1050323415.123456 # UTC timestamp
base_d = DateTimeSubclass(*args)
self.assertIsInstance(base_d, DateTimeSubclass)
self.assertEqual(base_d.extra, 7)
# Timestamp depends on time zone, so we'll calculate the equivalent here
ts = base_d.timestamp()
test_cases = [
('fromtimestamp', (ts,), base_d),
# See https://bugs.python.org/issue32417
('fromtimestamp', (ts, timezone.utc),
base_d.astimezone(timezone.utc)),
('utcfromtimestamp', (utc_ts,), base_d),
('fromisoformat', (d_isoformat,), base_d),
('strptime', (d_isoformat, '%Y-%m-%dT%H:%M:%S.%f'), base_d),
('combine', (date(*args[0:3]), time(*args[3:])), base_d),
]
for constr_name, constr_args, expected in test_cases:
for base_obj in (DateTimeSubclass, base_d):
# Test both the classmethod and method
with self.subTest(base_obj_type=type(base_obj),
constr_name=constr_name):
constructor = getattr(base_obj, constr_name)
dt = constructor(*constr_args)
# Test that it creates the right subclass
self.assertIsInstance(dt, DateTimeSubclass)
# Test that it's equal to the base object
self.assertEqual(dt, expected)
# Test that it called the constructor
self.assertEqual(dt.extra, 7)
def test_subclass_now(self):
# Test that alternate constructors call the constructor
class DateTimeSubclass(self.theclass):
def __new__(cls, *args, **kwargs):
result = self.theclass.__new__(cls, *args, **kwargs)
result.extra = 7
return result
test_cases = [
('now', 'now', {}),
('utcnow', 'utcnow', {}),
('now_utc', 'now', {'tz': timezone.utc}),
('now_fixed', 'now', {'tz': timezone(timedelta(hours=-5), "EST")}),
]
for name, meth_name, kwargs in test_cases:
with self.subTest(name):
constr = getattr(DateTimeSubclass, meth_name)
dt = constr(**kwargs)
self.assertIsInstance(dt, DateTimeSubclass)
self.assertEqual(dt.extra, 7)
def test_fromisoformat_datetime(self):
# Test that isoformat() is reversible
base_dates = [
(1, 1, 1),
(1900, 1, 1),
(2004, 11, 12),
(2017, 5, 30)
]
base_times = [
(0, 0, 0, 0),
(0, 0, 0, 241000),
(0, 0, 0, 234567),
(12, 30, 45, 234567)
]
separators = [' ', 'T']
tzinfos = [None, timezone.utc,
timezone(timedelta(hours=-5)),
timezone(timedelta(hours=2))]
dts = [self.theclass(*date_tuple, *time_tuple, tzinfo=tzi)
for date_tuple in base_dates
for time_tuple in base_times
for tzi in tzinfos]
for dt in dts:
for sep in separators:
dtstr = dt.isoformat(sep=sep)
with self.subTest(dtstr=dtstr):
dt_rt = self.theclass.fromisoformat(dtstr)
self.assertEqual(dt, dt_rt)
def test_fromisoformat_timezone(self):
base_dt = self.theclass(2014, 12, 30, 12, 30, 45, 217456)
tzoffsets = [
timedelta(hours=5), timedelta(hours=2),
timedelta(hours=6, minutes=27),
timedelta(hours=12, minutes=32, seconds=30),
timedelta(hours=2, minutes=4, seconds=9, microseconds=123456)
]
tzoffsets += [-1 * td for td in tzoffsets]
tzinfos = [None, timezone.utc,
timezone(timedelta(hours=0))]
tzinfos += [timezone(td) for td in tzoffsets]
for tzi in tzinfos:
dt = base_dt.replace(tzinfo=tzi)
dtstr = dt.isoformat()
with self.subTest(tstr=dtstr):
dt_rt = self.theclass.fromisoformat(dtstr)
assert dt == dt_rt, dt_rt
def test_fromisoformat_separators(self):
separators = [
' ', 'T', '\u007f', # 1-bit widths
'\u0080', 'ʁ', # 2-bit widths
'ᛇ', '時', # 3-bit widths
'🐍', # 4-bit widths
'\ud800', # bpo-34454: Surrogate code point
]
for sep in separators:
dt = self.theclass(2018, 1, 31, 23, 59, 47, 124789)
dtstr = dt.isoformat(sep=sep)
with self.subTest(dtstr=dtstr):
dt_rt = self.theclass.fromisoformat(dtstr)
self.assertEqual(dt, dt_rt)
def test_fromisoformat_ambiguous(self):
# Test strings like 2018-01-31+12:15 (where +12:15 is not a time zone)
separators = ['+', '-']
for sep in separators:
dt = self.theclass(2018, 1, 31, 12, 15)
dtstr = dt.isoformat(sep=sep)
with self.subTest(dtstr=dtstr):
dt_rt = self.theclass.fromisoformat(dtstr)
self.assertEqual(dt, dt_rt)
def test_fromisoformat_timespecs(self):
datetime_bases = [
(2009, 12, 4, 8, 17, 45, 123456),
(2009, 12, 4, 8, 17, 45, 0)]
tzinfos = [None, timezone.utc,
timezone(timedelta(hours=-5)),
timezone(timedelta(hours=2)),
timezone(timedelta(hours=6, minutes=27))]
timespecs = ['hours', 'minutes', 'seconds',
'milliseconds', 'microseconds']
for ip, ts in enumerate(timespecs):
for tzi in tzinfos:
for dt_tuple in datetime_bases:
if ts == 'milliseconds':
new_microseconds = 1000 * (dt_tuple[6] // 1000)
dt_tuple = dt_tuple[0:6] + (new_microseconds,)
dt = self.theclass(*(dt_tuple[0:(4 + ip)]), tzinfo=tzi)
dtstr = dt.isoformat(timespec=ts)
with self.subTest(dtstr=dtstr):
dt_rt = self.theclass.fromisoformat(dtstr)
self.assertEqual(dt, dt_rt)
def test_fromisoformat_fails_datetime(self):
# Test that fromisoformat() fails on invalid values
bad_strs = [
'', # Empty string
'\ud800', # bpo-34454: Surrogate code point
'2009.04-19T03', # Wrong first separator
'2009-04.19T03', # Wrong second separator
'2009-04-19T0a', # Invalid hours
'2009-04-19T03:1a:45', # Invalid minutes
'2009-04-19T03:15:4a', # Invalid seconds
'2009-04-19T03;15:45', # Bad first time separator
'2009-04-19T03:15;45', # Bad second time separator
'2009-04-19T03:15:4500:00', # Bad time zone separator
'2009-04-19T03:15:45.2345', # Too many digits for milliseconds
'2009-04-19T03:15:45.1234567', # Too many digits for microseconds
'2009-04-19T03:15:45.123456+24:30', # Invalid time zone offset
'2009-04-19T03:15:45.123456-24:30', # Invalid negative offset
'2009-04-10ᛇᛇᛇᛇᛇ12:15', # Too many unicode separators
'2009-04\ud80010T12:15', # Surrogate char in date
'2009-04-10T12\ud80015', # Surrogate char in time
'2009-04-19T1', # Incomplete hours
'2009-04-19T12:3', # Incomplete minutes
'2009-04-19T12:30:4', # Incomplete seconds
'2009-04-19T12:', # Ends with time separator
'2009-04-19T12:30:', # Ends with time separator
'2009-04-19T12:30:45.', # Ends with time separator
'2009-04-19T12:30:45.123456+', # Ends with timzone separator
'2009-04-19T12:30:45.123456-', # Ends with timzone separator
'2009-04-19T12:30:45.123456-05:00a', # Extra text
'2009-04-19T12:30:45.123-05:00a', # Extra text
'2009-04-19T12:30:45-05:00a', # Extra text
]
for bad_str in bad_strs:
with self.subTest(bad_str=bad_str):
with self.assertRaises(ValueError):
self.theclass.fromisoformat(bad_str)
def test_fromisoformat_fails_surrogate(self):
# Test that when fromisoformat() fails with a surrogate character as
# the separator, the error message contains the original string
dtstr = "2018-01-03\ud80001:0113"
with self.assertRaisesRegex(ValueError, re.escape(repr(dtstr))):
self.theclass.fromisoformat(dtstr)
def test_fromisoformat_utc(self):
dt_str = '2014-04-19T13:21:13+00:00'
dt = self.theclass.fromisoformat(dt_str)
self.assertIs(dt.tzinfo, timezone.utc)
def test_fromisoformat_subclass(self):
class DateTimeSubclass(self.theclass):
pass
dt = DateTimeSubclass(2014, 12, 14, 9, 30, 45, 457390,
tzinfo=timezone(timedelta(hours=10, minutes=45)))
dt_rt = DateTimeSubclass.fromisoformat(dt.isoformat())
self.assertEqual(dt, dt_rt)
self.assertIsInstance(dt_rt, DateTimeSubclass)
class TestSubclassDateTime(TestDateTime):
theclass = SubclassDatetime
# Override tests not designed for subclass
@unittest.skip('not appropriate for subclasses')
def test_roundtrip(self):
pass
class SubclassTime(time):
sub_var = 1
class TestTime(HarmlessMixedComparison, unittest.TestCase):
theclass = time
def test_basic_attributes(self):
t = self.theclass(12, 0)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
t = self.theclass(12, 59, 59, 8000)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 59)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 8000)
def test_roundtrip(self):
t = self.theclass(1, 2, 3, 4)
# Verify t -> string -> time identity.
s = repr(t)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
t2 = eval(s)
self.assertEqual(t, t2)
# Verify identity via reconstructing from pieces.
t2 = self.theclass(t.hour, t.minute, t.second,
t.microsecond)
self.assertEqual(t, t2)
def test_comparing(self):
args = [1, 2, 3, 4]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertEqual(t1, t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertFalse(t1 != t2)
self.assertFalse(t1 < t2)
self.assertFalse(t1 > t2)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertFalse(t1 == t2)
self.assertFalse(t2 == t1)
self.assertFalse(t1 > t2)
self.assertFalse(t2 < t1)
self.assertFalse(t1 >= t2)
self.assertFalse(t2 <= t1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_bad_constructor_arguments(self):
# bad hours
self.theclass(0, 0) # no exception
self.theclass(23, 0) # no exception
self.assertRaises(ValueError, self.theclass, -1, 0)
self.assertRaises(ValueError, self.theclass, 24, 0)
# bad minutes
self.theclass(23, 0) # no exception
self.theclass(23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, -1)
self.assertRaises(ValueError, self.theclass, 23, 60)
# bad seconds
self.theclass(23, 59, 0) # no exception
self.theclass(23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 60)
# bad microseconds
self.theclass(23, 59, 59, 0) # no exception
self.theclass(23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000)
def test_hash_equality(self):
d = self.theclass(23, 30, 17)
e = self.theclass(23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(0, 5, 17)
e = self.theclass(0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_isoformat(self):
t = self.theclass(4, 5, 1, 123)
self.assertEqual(t.isoformat(), "04:05:01.000123")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass()
self.assertEqual(t.isoformat(), "00:00:00")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1)
self.assertEqual(t.isoformat(), "00:00:00.000001")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10)
self.assertEqual(t.isoformat(), "00:00:00.000010")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100)
self.assertEqual(t.isoformat(), "00:00:00.000100")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1000)
self.assertEqual(t.isoformat(), "00:00:00.001000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10000)
self.assertEqual(t.isoformat(), "00:00:00.010000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100000)
self.assertEqual(t.isoformat(), "00:00:00.100000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(hour=12, minute=34, second=56, microsecond=123456)
self.assertEqual(t.isoformat(timespec='hours'), "12")
self.assertEqual(t.isoformat(timespec='minutes'), "12:34")
self.assertEqual(t.isoformat(timespec='seconds'), "12:34:56")
self.assertEqual(t.isoformat(timespec='milliseconds'), "12:34:56.123")
self.assertEqual(t.isoformat(timespec='microseconds'), "12:34:56.123456")
self.assertEqual(t.isoformat(timespec='auto'), "12:34:56.123456")
self.assertRaises(ValueError, t.isoformat, timespec='monkey')
# bpo-34482: Check that surrogates are handled properly.
self.assertRaises(ValueError, t.isoformat, timespec='\ud800')
t = self.theclass(hour=12, minute=34, second=56, microsecond=999500)
self.assertEqual(t.isoformat(timespec='milliseconds'), "12:34:56.999")
t = self.theclass(hour=12, minute=34, second=56, microsecond=0)
self.assertEqual(t.isoformat(timespec='milliseconds'), "12:34:56.000")
self.assertEqual(t.isoformat(timespec='microseconds'), "12:34:56.000000")
self.assertEqual(t.isoformat(timespec='auto'), "12:34:56")
def test_isoformat_timezone(self):
tzoffsets = [
('05:00', timedelta(hours=5)),
('02:00', timedelta(hours=2)),
('06:27', timedelta(hours=6, minutes=27)),
('12:32:30', timedelta(hours=12, minutes=32, seconds=30)),
('02:04:09.123456', timedelta(hours=2, minutes=4, seconds=9, microseconds=123456))
]
tzinfos = [
('', None),
('+00:00', timezone.utc),
('+00:00', timezone(timedelta(0))),
]
tzinfos += [
(prefix + expected, timezone(sign * td))
for expected, td in tzoffsets
for prefix, sign in [('-', -1), ('+', 1)]
]
t_base = self.theclass(12, 37, 9)
exp_base = '12:37:09'
for exp_tz, tzi in tzinfos:
t = t_base.replace(tzinfo=tzi)
exp = exp_base + exp_tz
with self.subTest(tzi=tzi):
assert t.isoformat() == exp
def test_1653736(self):
# verify it doesn't accept extra keyword arguments
t = self.theclass(second=1)
self.assertRaises(TypeError, t.isoformat, foo=3)
def test_strftime(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004")
# A naive object replaces %z and %Z with empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
# bpo-34482: Check that surrogates don't cause a crash.
try:
t.strftime('%H\ud800%M')
except UnicodeEncodeError:
pass
def test_format(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.__format__(''), str(t))
with self.assertRaisesRegex(TypeError, 'must be str, not int'):
t.__format__(123)
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(1, 2, 3, 4)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(1, 2, 3, 4)
self.assertEqual(b.__format__(''), str(t))
for fmt in ['%H %M %S',
]:
self.assertEqual(t.__format__(fmt), t.strftime(fmt))
self.assertEqual(a.__format__(fmt), t.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_str(self):
self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004")
self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000")
self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000")
self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03")
self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1, 2, 3, 4)),
"%s(1, 2, 3, 4)" % name)
self.assertEqual(repr(self.theclass(10, 2, 3, 4000)),
"%s(10, 2, 3, 4000)" % name)
self.assertEqual(repr(self.theclass(0, 2, 3, 400000)),
"%s(0, 2, 3, 400000)" % name)
self.assertEqual(repr(self.theclass(12, 2, 3, 0)),
"%s(12, 2, 3)" % name)
self.assertEqual(repr(self.theclass(23, 15, 0, 0)),
"%s(23, 15)" % name)
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_pickling(self):
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2))
def test_pickling_subclass_time(self):
args = 20, 59, 16, 64**2
orig = SubclassTime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertTrue(isinstance(derived, SubclassTime))
def test_compat_unpickle(self):
tests = [
(b"cdatetime\ntime\n(S'\\x14;\\x10\\x00\\x10\\x00'\ntR.",
(20, 59, 16, 64**2)),
(b'cdatetime\ntime\n(U\x06\x14;\x10\x00\x10\x00tR.',
(20, 59, 16, 64**2)),
(b'\x80\x02cdatetime\ntime\nU\x06\x14;\x10\x00\x10\x00\x85R.',
(20, 59, 16, 64**2)),
(b"cdatetime\ntime\n(S'\\x14;\\x19\\x00\\x10\\x00'\ntR.",
(20, 59, 25, 64**2)),
(b'cdatetime\ntime\n(U\x06\x14;\x19\x00\x10\x00tR.',
(20, 59, 25, 64**2)),
(b'\x80\x02cdatetime\ntime\nU\x06\x14;\x19\x00\x10\x00\x85R.',
(20, 59, 25, 64**2)),
]
for i, (data, args) in enumerate(tests):
with self.subTest(i=i):
expected = self.theclass(*args)
for loads in pickle_loads:
derived = loads(data, encoding='latin1')
self.assertEqual(derived, expected)
def test_bool(self):
# time is always True.
cls = self.theclass
self.assertTrue(cls(1))
self.assertTrue(cls(0, 1))
self.assertTrue(cls(0, 0, 1))
self.assertTrue(cls(0, 0, 0, 1))
self.assertTrue(cls(0))
self.assertTrue(cls())
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_subclass_replace(self):
class TimeSubclass(self.theclass):
pass
ctime = TimeSubclass(12, 30)
self.assertIs(type(ctime.replace(hour=10)), TimeSubclass)
def test_subclass_time(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.isoformat(), dt2.isoformat())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
def test_backdoor_resistance(self):
# see TestDate.test_backdoor_resistance().
base = '2:59.0'
for hour_byte in ' ', '9', chr(24), '\xff':
self.assertRaises(TypeError, self.theclass,
hour_byte + base[1:])
# Good bytes, but bad tzinfo:
with self.assertRaisesRegex(TypeError, '^bad tzinfo state arg$'):
self.theclass(bytes([1] * len(base)), 'EST')
# A mixin for classes with a tzinfo= argument. Subclasses must define
# theclass as a class attribute, and theclass(1, 1, 1, tzinfo=whatever)
# must be legit (which is true for time and datetime).
class TZInfoBase:
def test_argument_passing(self):
cls = self.theclass
# A datetime passes itself on, a time passes None.
class introspective(tzinfo):
def tzname(self, dt): return dt and "real" or "none"
def utcoffset(self, dt):
return timedelta(minutes = dt and 42 or -42)
dst = utcoffset
obj = cls(1, 2, 3, tzinfo=introspective())
expected = cls is time and "none" or "real"
self.assertEqual(obj.tzname(), expected)
expected = timedelta(minutes=(cls is time and -42 or 42))
self.assertEqual(obj.utcoffset(), expected)
self.assertEqual(obj.dst(), expected)
def test_bad_tzinfo_classes(self):
cls = self.theclass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12)
class NiceTry(object):
def __init__(self): pass
def utcoffset(self, dt): pass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry)
class BetterTry(tzinfo):
def __init__(self): pass
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
self.assertIs(t.tzinfo, b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
def __init__(self, offset):
self.offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.offset
cls = self.theclass
for offset, legit in ((-1440, False),
(-1439, True),
(1439, True),
(1440, False)):
if cls is time:
t = cls(1, 2, 3, tzinfo=Edgy(offset))
elif cls is datetime:
t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset))
else:
assert 0, "impossible"
if legit:
aofs = abs(offset)
h, m = divmod(aofs, 60)
tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m)
if isinstance(t, datetime):
t = t.timetz()
self.assertEqual(str(t), "01:02:03" + tag)
else:
self.assertRaises(ValueError, str, t)
def test_tzinfo_classes(self):
cls = self.theclass
class C1(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return None
def tzname(self, dt): return None
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
self.assertIsNone(t.utcoffset())
self.assertIsNone(t.dst())
self.assertIsNone(t.tzname())
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
def dst(self, dt): return timedelta(minutes=1439)
def tzname(self, dt): return "aname"
t = cls(1, 1, 1, tzinfo=C3())
self.assertEqual(t.utcoffset(), timedelta(minutes=-1439))
self.assertEqual(t.dst(), timedelta(minutes=1439))
self.assertEqual(t.tzname(), "aname")
# Wrong types.
class C4(tzinfo):
def utcoffset(self, dt): return "aname"
def dst(self, dt): return 7
def tzname(self, dt): return 0
t = cls(1, 1, 1, tzinfo=C4())
self.assertRaises(TypeError, t.utcoffset)
self.assertRaises(TypeError, t.dst)
self.assertRaises(TypeError, t.tzname)
# Offset out of range.
class C6(tzinfo):
def utcoffset(self, dt): return timedelta(hours=-24)
def dst(self, dt): return timedelta(hours=24)
t = cls(1, 1, 1, tzinfo=C6())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
# Not a whole number of seconds.
class C7(tzinfo):
def utcoffset(self, dt): return timedelta(microseconds=61)
def dst(self, dt): return timedelta(microseconds=-81)
t = cls(1, 1, 1, tzinfo=C7())
self.assertEqual(t.utcoffset(), timedelta(microseconds=61))
self.assertEqual(t.dst(), timedelta(microseconds=-81))
def test_aware_compare(self):
cls = self.theclass
# Ensure that utcoffset() gets ignored if the comparands have
# the same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
for op in lt, le, gt, ge, eq, ne:
got = op(x, y)
expected = op(x.minute, y.minute)
self.assertEqual(got, expected)
# However, if they're different members, uctoffset is not ignored.
# Note that a time can't actually have an operand-dependent offset,
# though (and time.utcoffset() passes None to tzinfo.utcoffset()),
# so skip this test for time.
if cls is not time:
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = (x > y) - (x < y)
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = 0
elif x is y is d2:
expected = 0
elif x is d2:
expected = -1
else:
assert y is d2
expected = 1
self.assertEqual(got, expected)
# Testing time objects with a non-None tzinfo.
class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
theclass = time
def test_empty(self):
t = self.theclass()
self.assertEqual(t.hour, 0)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
self.assertIsNone(t.tzinfo)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
utc = FixedOffset(0, "UTC", -2)
met = FixedOffset(60, "MET", 3)
t1 = time( 7, 47, tzinfo=est)
t2 = time(12, 47, tzinfo=utc)
t3 = time(13, 47, tzinfo=met)
t4 = time(microsecond=40)
t5 = time(microsecond=40, tzinfo=utc)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertIsNone(t4.tzinfo)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertIsNone(t4.utcoffset())
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertIsNone(t4.tzname())
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
self.assertIsNone(t4.dst())
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertNotEqual(t4, t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive
self.assertEqual(str(t1), "07:47:00-05:00")
self.assertEqual(str(t2), "12:47:00+00:00")
self.assertEqual(str(t3), "13:47:00+01:00")
self.assertEqual(str(t4), "00:00:00.000040")
self.assertEqual(str(t5), "00:00:00.000040+00:00")
self.assertEqual(t1.isoformat(), "07:47:00-05:00")
self.assertEqual(t2.isoformat(), "12:47:00+00:00")
self.assertEqual(t3.isoformat(), "13:47:00+01:00")
self.assertEqual(t4.isoformat(), "00:00:00.000040")
self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00")
d = 'datetime.time'
self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)")
self.assertEqual(repr(t4), d + "(0, 0, 0, 40)")
self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)")
self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"),
"07:47:00 %Z=EST %z=-0500")
self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000")
self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100")
yuck = FixedOffset(-1439, "%z %Z %%z%%Z")
t1 = time(23, 59, tzinfo=yuck)
self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"),
"23:59 %Z='%z %Z %%z%%Z' %z='-2359'")
# Check that an invalid tzname result raises an exception.
class Badtzname(tzinfo):
tz = 42
def tzname(self, dt): return self.tz
t = time(2, 3, 4, tzinfo=Badtzname())
self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04")
self.assertRaises(TypeError, t.strftime, "%Z")
# Issue #6697:
if '_Fast' in self.__class__.__name__:
Badtzname.tz = '\ud800'
self.assertRaises(ValueError, t.strftime, "%Z")
def test_hash_edge_cases(self):
# Offsets that overflow a basic time.
t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, ""))
t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, ""))
self.assertEqual(hash(t1), hash(t2))
t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, ""))
t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, ""))
self.assertEqual(hash(t1), hash(t2))
def test_pickling(self):
# Try one without a tzinfo.
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2))
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(5, 6, 7, tzinfo=tinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2))
def test_compat_unpickle(self):
tests = [
b"cdatetime\ntime\n(S'\\x05\\x06\\x07\\x01\\xe2@'\n"
b"ctest.datetimetester\nPicklableFixedOffset\n(tR"
b"(dS'_FixedOffset__offset'\ncdatetime\ntimedelta\n"
b"(I-1\nI68400\nI0\ntRs"
b"S'_FixedOffset__dstoffset'\nNs"
b"S'_FixedOffset__name'\nS'cookie'\nsbtR.",
b'cdatetime\ntime\n(U\x06\x05\x06\x07\x01\xe2@'
b'ctest.datetimetester\nPicklableFixedOffset\n)R'
b'}(U\x14_FixedOffset__offsetcdatetime\ntimedelta\n'
b'(J\xff\xff\xff\xffJ0\x0b\x01\x00K\x00tR'
b'U\x17_FixedOffset__dstoffsetN'
b'U\x12_FixedOffset__nameU\x06cookieubtR.',
b'\x80\x02cdatetime\ntime\nU\x06\x05\x06\x07\x01\xe2@'
b'ctest.datetimetester\nPicklableFixedOffset\n)R'
b'}(U\x14_FixedOffset__offsetcdatetime\ntimedelta\n'
b'J\xff\xff\xff\xffJ0\x0b\x01\x00K\x00\x87R'
b'U\x17_FixedOffset__dstoffsetN'
b'U\x12_FixedOffset__nameU\x06cookieub\x86R.',
]
tinfo = PicklableFixedOffset(-300, 'cookie')
expected = self.theclass(5, 6, 7, 123456, tzinfo=tinfo)
for data in tests:
for loads in pickle_loads:
derived = loads(data, encoding='latin1')
self.assertEqual(derived, expected, repr(data))
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_more_bool(self):
# time is always True.
cls = self.theclass
t = cls(0, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
self.assertTrue(t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(t)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertIsNone(base2.tzinfo)
self.assertIsNone(base2.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertIs(base.tzinfo, base3.tzinfo)
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_mixed_compare(self):
t1 = self.theclass(1, 2, 3)
t2 = self.theclass(1, 2, 3)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertNotEqual(t1, t2)
# In time w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_fromisoformat(self):
time_examples = [
(0, 0, 0, 0),
(23, 59, 59, 999999),
]
hh = (9, 12, 20)
mm = (5, 30)
ss = (4, 45)
usec = (0, 245000, 678901)
time_examples += list(itertools.product(hh, mm, ss, usec))
tzinfos = [None, timezone.utc,
timezone(timedelta(hours=2)),
timezone(timedelta(hours=6, minutes=27))]
for ttup in time_examples:
for tzi in tzinfos:
t = self.theclass(*ttup, tzinfo=tzi)
tstr = t.isoformat()
with self.subTest(tstr=tstr):
t_rt = self.theclass.fromisoformat(tstr)
self.assertEqual(t, t_rt)
def test_fromisoformat_timezone(self):
base_time = self.theclass(12, 30, 45, 217456)
tzoffsets = [
timedelta(hours=5), timedelta(hours=2),
timedelta(hours=6, minutes=27),
timedelta(hours=12, minutes=32, seconds=30),
timedelta(hours=2, minutes=4, seconds=9, microseconds=123456)
]
tzoffsets += [-1 * td for td in tzoffsets]
tzinfos = [None, timezone.utc,
timezone(timedelta(hours=0))]
tzinfos += [timezone(td) for td in tzoffsets]
for tzi in tzinfos:
t = base_time.replace(tzinfo=tzi)
tstr = t.isoformat()
with self.subTest(tstr=tstr):
t_rt = self.theclass.fromisoformat(tstr)
assert t == t_rt, t_rt
def test_fromisoformat_timespecs(self):
time_bases = [
(8, 17, 45, 123456),
(8, 17, 45, 0)
]
tzinfos = [None, timezone.utc,
timezone(timedelta(hours=-5)),
timezone(timedelta(hours=2)),
timezone(timedelta(hours=6, minutes=27))]
timespecs = ['hours', 'minutes', 'seconds',
'milliseconds', 'microseconds']
for ip, ts in enumerate(timespecs):
for tzi in tzinfos:
for t_tuple in time_bases:
if ts == 'milliseconds':
new_microseconds = 1000 * (t_tuple[-1] // 1000)
t_tuple = t_tuple[0:-1] + (new_microseconds,)
t = self.theclass(*(t_tuple[0:(1 + ip)]), tzinfo=tzi)
tstr = t.isoformat(timespec=ts)
with self.subTest(tstr=tstr):
t_rt = self.theclass.fromisoformat(tstr)
self.assertEqual(t, t_rt)
def test_fromisoformat_fails(self):
bad_strs = [
'', # Empty string
'12\ud80000', # Invalid separator - surrogate char
'12:', # Ends on a separator
'12:30:', # Ends on a separator
'12:30:15.', # Ends on a separator
'1', # Incomplete hours
'12:3', # Incomplete minutes
'12:30:1', # Incomplete seconds
'1a:30:45.334034', # Invalid character in hours
'12:a0:45.334034', # Invalid character in minutes
'12:30:a5.334034', # Invalid character in seconds
'12:30:45.1234', # Too many digits for milliseconds
'12:30:45.1234567', # Too many digits for microseconds
'12:30:45.123456+24:30', # Invalid time zone offset
'12:30:45.123456-24:30', # Invalid negative offset
'12:30:45', # Uses full-width unicode colons
'12:30:45․123456', # Uses \u2024 in place of decimal point
'12:30:45a', # Extra at tend of basic time
'12:30:45.123a', # Extra at end of millisecond time
'12:30:45.123456a', # Extra at end of microsecond time
'12:30:45.123456+12:00:30a', # Extra at end of full time
]
for bad_str in bad_strs:
with self.subTest(bad_str=bad_str):
with self.assertRaises(ValueError):
self.theclass.fromisoformat(bad_str)
def test_fromisoformat_fails_typeerror(self):
# Test the fromisoformat fails when passed the wrong type
import io
bad_types = [b'12:30:45', None, io.StringIO('12:30:45')]
for bad_type in bad_types:
with self.assertRaises(TypeError):
self.theclass.fromisoformat(bad_type)
def test_fromisoformat_subclass(self):
class TimeSubclass(self.theclass):
pass
tsc = TimeSubclass(12, 14, 45, 203745, tzinfo=timezone.utc)
tsc_rt = TimeSubclass.fromisoformat(tsc.isoformat())
self.assertEqual(tsc, tsc_rt)
self.assertIsInstance(tsc_rt, TimeSubclass)
def test_subclass_timetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
# Testing datetime objects with a non-None tzinfo.
class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
theclass = datetime
def test_trivial(self):
dt = self.theclass(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(dt.year, 1)
self.assertEqual(dt.month, 2)
self.assertEqual(dt.day, 3)
self.assertEqual(dt.hour, 4)
self.assertEqual(dt.minute, 5)
self.assertEqual(dt.second, 6)
self.assertEqual(dt.microsecond, 7)
self.assertEqual(dt.tzinfo, None)
def test_even_more_compare(self):
# The test_compare() and test_more_compare() inherited from TestDate
# and TestDateTime covered non-tzinfo cases.
# Smallest possible after UTC adjustment.
t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
# Largest possible after UTC adjustment.
t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
# Make sure those compare correctly, and w/o overflow.
self.assertTrue(t1 < t2)
self.assertTrue(t1 != t2)
self.assertTrue(t2 > t1)
self.assertEqual(t1, t1)
self.assertEqual(t2, t2)
# Equal afer adjustment.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""))
t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, ""))
self.assertEqual(t1, t2)
# Change t1 not to subtract a minute, and t1 should be larger.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, ""))
self.assertTrue(t1 > t2)
# Change t1 to subtract 2 minutes, and t1 should be smaller.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, ""))
self.assertTrue(t1 < t2)
# Back to the original t1, but make seconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
second=1)
self.assertTrue(t1 > t2)
# Likewise, but make microseconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
microsecond=1)
self.assertTrue(t1 > t2)
# Make t2 naive and it should differ.
t2 = self.theclass.min
self.assertNotEqual(t1, t2)
self.assertEqual(t2, t2)
# and > comparison should fail
with self.assertRaises(TypeError):
t1 > t2
# It's also naive if it has tzinfo but tzinfo.utcoffset() is None.
class Naive(tzinfo):
def utcoffset(self, dt): return None
t2 = self.theclass(5, 6, 7, tzinfo=Naive())
self.assertNotEqual(t1, t2)
self.assertEqual(t2, t2)
# OTOH, it's OK to compare two of these mixing the two ways of being
# naive.
t1 = self.theclass(5, 6, 7)
self.assertEqual(t1, t2)
# Try a bogus uctoffset.
class Bogus(tzinfo):
def utcoffset(self, dt):
return timedelta(minutes=1440) # out of bounds
t1 = self.theclass(2, 2, 2, tzinfo=Bogus())
t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, ""))
self.assertRaises(ValueError, lambda: t1 == t2)
def test_pickling(self):
# Try one without a tzinfo.
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2))
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(*args, **{'tzinfo': tinfo})
derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0))
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
self.assertEqual(orig.__reduce__(), orig.__reduce_ex__(2))
def test_compat_unpickle(self):
tests = [
b'cdatetime\ndatetime\n'
b"(S'\\x07\\xdf\\x0b\\x1b\\x14;\\x01\\x01\\xe2@'\n"
b'ctest.datetimetester\nPicklableFixedOffset\n(tR'
b"(dS'_FixedOffset__offset'\ncdatetime\ntimedelta\n"
b'(I-1\nI68400\nI0\ntRs'
b"S'_FixedOffset__dstoffset'\nNs"
b"S'_FixedOffset__name'\nS'cookie'\nsbtR.",
b'cdatetime\ndatetime\n'
b'(U\n\x07\xdf\x0b\x1b\x14;\x01\x01\xe2@'
b'ctest.datetimetester\nPicklableFixedOffset\n)R'
b'}(U\x14_FixedOffset__offsetcdatetime\ntimedelta\n'
b'(J\xff\xff\xff\xffJ0\x0b\x01\x00K\x00tR'
b'U\x17_FixedOffset__dstoffsetN'
b'U\x12_FixedOffset__nameU\x06cookieubtR.',
b'\x80\x02cdatetime\ndatetime\n'
b'U\n\x07\xdf\x0b\x1b\x14;\x01\x01\xe2@'
b'ctest.datetimetester\nPicklableFixedOffset\n)R'
b'}(U\x14_FixedOffset__offsetcdatetime\ntimedelta\n'
b'J\xff\xff\xff\xffJ0\x0b\x01\x00K\x00\x87R'
b'U\x17_FixedOffset__dstoffsetN'
b'U\x12_FixedOffset__nameU\x06cookieub\x86R.',
]
args = 2015, 11, 27, 20, 59, 1, 123456
tinfo = PicklableFixedOffset(-300, 'cookie')
expected = self.theclass(*args, **{'tzinfo': tinfo})
for data in tests:
for loads in pickle_loads:
derived = loads(data, encoding='latin1')
self.assertEqual(derived, expected)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_extreme_hashes(self):
# If an attempt is made to hash these via subtracting the offset
# then hashing a datetime object, OverflowError results. The
# Python implementation used to blow up here.
t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
hash(t)
t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
hash(t)
# OTOH, an OOB offset should blow up.
t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, ""))
self.assertRaises(ValueError, hash, t)
def test_zones(self):
est = FixedOffset(-300, "EST")
utc = FixedOffset(0, "UTC")
met = FixedOffset(60, "MET")
t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est)
t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc)
t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00")
self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00")
self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00")
d = 'datetime.datetime(2002, 3, 19, '
self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)")
def test_combine(self):
met = FixedOffset(60, "MET")
d = date(2002, 3, 4)
tz = time(18, 45, 3, 1234, tzinfo=met)
dt = datetime.combine(d, tz)
self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234,
tzinfo=met))
def test_extract(self):
met = FixedOffset(60, "MET")
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met))
def test_tz_aware_arithmetic(self):
now = self.theclass.now()
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
self.assertIs(nowaware.tzinfo, tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
self.assertRaises(TypeError, lambda: now - nowaware)
self.assertRaises(TypeError, lambda: nowaware - now)
# And adding datetime's doesn't make sense, aware or not.
self.assertRaises(TypeError, lambda: now + nowaware)
self.assertRaises(TypeError, lambda: nowaware + now)
self.assertRaises(TypeError, lambda: nowaware + nowaware)
# Subtracting should yield 0.
self.assertEqual(now - now, timedelta(0))
self.assertEqual(nowaware - nowaware, timedelta(0))
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
self.assertIs(nowaware.tzinfo, tz55)
nowawareplus2 = delta + nowaware
self.assertIs(nowawareplus2.tzinfo, tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
self.assertIs(diff.tzinfo, tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
# Make up a random timezone.
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
self.assertIs(nowawareplus.tzinfo, tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
# (nowawareplus base - nowawareplus offset) =
# (nowaware base - nowawareplus base) +
# (nowawareplus offset - nowaware offset) =
# -delta + nowawareplus offset - nowaware offset
expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta
self.assertEqual(got, expected)
# Try max possible difference.
min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min"))
max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, "max"))
maxdiff = max - min
self.assertEqual(maxdiff, self.theclass.max - self.theclass.min +
timedelta(minutes=2*1439))
# Different tzinfo, but the same offset
tza = timezone(HOUR, 'A')
tzb = timezone(HOUR, 'B')
delta = min.replace(tzinfo=tza) - max.replace(tzinfo=tzb)
self.assertEqual(delta, self.theclass.min - self.theclass.max)
def test_tzinfo_now(self):
meth = self.theclass.now
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
self.assertIs(another.tzinfo, again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
self.assertRaises(TypeError, meth, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, off42, off42)
# We don't know which time zone we're in, and don't have a tzinfo
# class to represent it, so seeing whether a tz argument actually
# does a conversion is tricky.
utc = FixedOffset(0, "utc", 0)
for weirdtz in [FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0),
timezone(timedelta(hours=15, minutes=58), "weirdtz"),]:
for dummy in range(3):
now = datetime.now(weirdtz)
self.assertIs(now.tzinfo, weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
break
# Else the code is broken, or more than 30 seconds passed between
# calls; assuming the latter, just try again.
else:
# Three strikes and we're out.
self.fail("utcnow(), now(tz), or astimezone() may be broken")
def test_tzinfo_fromtimestamp(self):
import time
meth = self.theclass.fromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
self.assertIs(another.tzinfo, again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
self.assertRaises(TypeError, meth, ts, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, ts, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, ts, off42, off42)
# Too few args.
self.assertRaises(TypeError, meth)
# Try to make sure tz= actually does some conversion.
timestamp = 1000000000
utcdatetime = datetime.utcfromtimestamp(timestamp)
# In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take.
# But on some flavor of Mac, it's nowhere near that. So we can't have
# any idea here what time that actually is, we can only test that
# relative changes match.
utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero
tz = FixedOffset(utcoffset, "tz", 0)
expected = utcdatetime + utcoffset
got = datetime.fromtimestamp(timestamp, tz)
self.assertEqual(expected, got.replace(tzinfo=None))
def test_tzinfo_utcnow(self):
meth = self.theclass.utcnow
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword; for whatever reason,
# utcnow() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, off42)
self.assertRaises(TypeError, meth, tzinfo=off42)
def test_tzinfo_utcfromtimestamp(self):
import time
meth = self.theclass.utcfromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword; for whatever reason,
# utcfromtimestamp() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, ts, off42)
self.assertRaises(TypeError, meth, ts, tzinfo=off42)
def test_tzinfo_timetuple(self):
# TestDateTime tested most of this. datetime adds a twist to the
# DST flag.
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1):
d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue))
t = d.timetuple()
self.assertEqual(1, t.tm_year)
self.assertEqual(1, t.tm_mon)
self.assertEqual(1, t.tm_mday)
self.assertEqual(10, t.tm_hour)
self.assertEqual(20, t.tm_min)
self.assertEqual(30, t.tm_sec)
self.assertEqual(0, t.tm_wday)
self.assertEqual(1, t.tm_yday)
self.assertEqual(flag, t.tm_isdst)
# dst() returns wrong type.
self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple)
# dst() at the edge.
self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1)
self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1)
# dst() out of range.
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple)
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple)
def test_utctimetuple(self):
class DST(tzinfo):
def __init__(self, dstvalue=0):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
# This can't work: DST didn't implement utcoffset.
self.assertRaises(NotImplementedError,
cls(1, 1, 1, tzinfo=DST(0)).utcoffset)
class UOFS(DST):
def __init__(self, uofs, dofs=None):
DST.__init__(self, dofs)
self.uofs = timedelta(minutes=uofs)
def utcoffset(self, dt):
return self.uofs
for dstvalue in -33, 33, 0, None:
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue))
t = d.utctimetuple()
self.assertEqual(d.year, t.tm_year)
self.assertEqual(d.month, t.tm_mon)
self.assertEqual(d.day, t.tm_mday)
self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm
self.assertEqual(13, t.tm_min)
self.assertEqual(d.second, t.tm_sec)
self.assertEqual(d.weekday(), t.tm_wday)
self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1,
t.tm_yday)
# Ensure tm_isdst is 0 regardless of what dst() says: DST
# is never in effect for a UTC time.
self.assertEqual(0, t.tm_isdst)
# For naive datetime, utctimetuple == timetuple except for isdst
d = cls(1, 2, 3, 10, 20, 30, 40)
t = d.utctimetuple()
self.assertEqual(t[:-1], d.timetuple()[:-1])
self.assertEqual(0, t.tm_isdst)
# Same if utcoffset is None
class NOFS(DST):
def utcoffset(self, dt):
return None
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=NOFS())
t = d.utctimetuple()
self.assertEqual(t[:-1], d.timetuple()[:-1])
self.assertEqual(0, t.tm_isdst)
# Check that bad tzinfo is detected
class BOFS(DST):
def utcoffset(self, dt):
return "EST"
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=BOFS())
self.assertRaises(TypeError, d.utctimetuple)
# Check that utctimetuple() is the same as
# astimezone(utc).timetuple()
d = cls(2010, 11, 13, 14, 15, 16, 171819)
for tz in [timezone.min, timezone.utc, timezone.max]:
dtz = d.replace(tzinfo=tz)
self.assertEqual(dtz.utctimetuple()[:-1],
dtz.astimezone(timezone.utc).timetuple()[:-1])
# At the edges, UTC adjustment can produce years out-of-range
# for a datetime object. Ensure that an OverflowError is
# raised.
tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439))
# That goes back 1 minute less than a full day.
self.assertRaises(OverflowError, tiny.utctimetuple)
huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439))
# That goes forward 1 minute less than a full day.
self.assertRaises(OverflowError, huge.utctimetuple)
# More overflow cases
tiny = cls.min.replace(tzinfo=timezone(MINUTE))
self.assertRaises(OverflowError, tiny.utctimetuple)
huge = cls.max.replace(tzinfo=timezone(-MINUTE))
self.assertRaises(OverflowError, huge.utctimetuple)
def test_tzinfo_isoformat(self):
zero = FixedOffset(0, "+00:00")
plus = FixedOffset(220, "+03:40")
minus = FixedOffset(-231, "-03:51")
unknown = FixedOffset(None, "")
cls = self.theclass
datestr = '0001-02-03'
for ofs in None, zero, plus, minus, unknown:
for us in 0, 987001:
d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs)
timestr = '04:05:59' + (us and '.987001' or '')
ofsstr = ofs is not None and d.tzname() or ''
tailstr = timestr + ofsstr
iso = d.isoformat()
self.assertEqual(iso, datestr + 'T' + tailstr)
self.assertEqual(iso, d.isoformat('T'))
self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr)
self.assertEqual(d.isoformat('\u1234'), datestr + '\u1234' + tailstr)
self.assertEqual(str(d), datestr + ' ' + tailstr)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertIsNone(base2.tzinfo)
self.assertIsNone(base2.tzname())
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertIs(base.tzinfo, base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
fnone = FixedOffset(None, "None")
f44m = FixedOffset(44, "44")
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
self.assertIs(dt.tzinfo, f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
self.assertIs(x.tzinfo, f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
self.assertIs(got.tzinfo, fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
self.assertIs(got.tzinfo, expected.tzinfo)
self.assertEqual(got, expected)
@support.run_with_tz('UTC')
def test_astimezone_default_utc(self):
dt = self.theclass.now(timezone.utc)
self.assertEqual(dt.astimezone(None), dt)
self.assertEqual(dt.astimezone(), dt)
# Note that offset in TZ variable has the opposite sign to that
# produced by %z directive.
@support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_astimezone_default_eastern(self):
dt = self.theclass(2012, 11, 4, 6, 30, tzinfo=timezone.utc)
local = dt.astimezone()
self.assertEqual(dt, local)
self.assertEqual(local.strftime("%z %Z"), "-0500 EST")
dt = self.theclass(2012, 11, 4, 5, 30, tzinfo=timezone.utc)
local = dt.astimezone()
self.assertEqual(dt, local)
self.assertEqual(local.strftime("%z %Z"), "-0400 EDT")
@support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_astimezone_default_near_fold(self):
# Issue #26616.
u = datetime(2015, 11, 1, 5, tzinfo=timezone.utc)
t = u.astimezone()
s = t.astimezone()
self.assertEqual(t.tzinfo, s.tzinfo)
def test_aware_subtract(self):
cls = self.theclass
# Ensure that utcoffset() is ignored when the operands have the
# same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
expected = timedelta(minutes=x.minute - y.minute)
self.assertEqual(got, expected)
# OTOH, if the tzinfo members are distinct, utcoffsets aren't
# ignored.
base = cls(8, 9, 10, 11, 12, 13, 14)
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = timedelta(0)
elif x is y is d2:
expected = timedelta(0)
elif x is d2:
expected = timedelta(minutes=(11-59)-0)
else:
assert y is d2
expected = timedelta(minutes=0-(11-59))
self.assertEqual(got, expected)
def test_mixed_compare(self):
t1 = datetime(1, 2, 3, 4, 5, 6, 7)
t2 = datetime(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertNotEqual(t1, t2)
# In datetime w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_datetimetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.year
args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7)
# Pain to set up DST-aware tzinfo classes.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
ZERO = timedelta(0)
MINUTE = timedelta(minutes=1)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct,
# which is the first Sunday on or after Oct 25. Because we view 1:MM as
# being standard time on that day, there is no spelling in local time of
# the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time).
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
utc_real = FixedOffset(0, "UTC", 0)
# For better test coverage, we want another flavor of UTC that's west of
# the Eastern and Pacific timezones.
utc_fake = FixedOffset(-12*60, "UTCfake", 0)
class TestTimezoneConversions(unittest.TestCase):
# The DST switch times for 2002, in std time.
dston = datetime(2002, 4, 7, 2)
dstoff = datetime(2002, 10, 27, 1)
theclass = datetime
# Check a time that's inside DST.
def checkinside(self, dt, tz, utc, dston, dstoff):
self.assertEqual(dt.dst(), HOUR)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
# Conversion to UTC and back isn't always an identity here,
# because there are redundant spellings (in local time) of
# UTC time when DST begins: the clock jumps from 1:59:59
# to 3:00:00, and a local time of 2:MM:SS doesn't really
# make sense then. The classes above treat 2:MM:SS as
# daylight time then (it's "after 2am"), really an alias
# for 1:MM:SS standard time. The latter form is what
# conversion back from UTC produces.
if dt.date() == dston.date() and dt.hour == 2:
# We're in the redundant hour, and coming back from
# UTC gives the 1:MM:SS standard-time spelling.
self.assertEqual(there_and_back + HOUR, dt)
# Although during was considered to be in daylight
# time, there_and_back is not.
self.assertEqual(there_and_back.dst(), ZERO)
# They're the same times in UTC.
self.assertEqual(there_and_back.astimezone(utc),
dt.astimezone(utc))
else:
# We're not in the redundant hour.
self.assertEqual(dt, there_and_back)
# Because we have a redundant spelling when DST begins, there is
# (unfortunately) an hour when DST ends that can't be spelled at all in
# local time. When DST ends, the clock jumps from 1:59 back to 1:00
# again. The hour 1:MM DST has no spelling then: 1:MM is taken to be
# standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be
# daylight time. The hour 1:MM daylight == 0:MM standard can't be
# expressed in local time. Nevertheless, we want conversion back
# from UTC to mimic the local clock's "repeat an hour" behavior.
nexthour_utc = asutc + HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
if dt.date() == dstoff.date() and dt.hour == 0:
# We're in the hour before the last DST hour. The last DST hour
# is ineffable. We want the conversion back to repeat 1:MM.
self.assertEqual(nexthour_tz, dt.replace(hour=1))
nexthour_utc += HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
self.assertEqual(nexthour_tz, dt.replace(hour=1))
else:
self.assertEqual(nexthour_tz - dt, HOUR)
# Check a time that's outside DST.
def checkoutside(self, dt, tz, utc):
self.assertEqual(dt.dst(), ZERO)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
# Converting to UTC and back is an identity too.
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
self.assertEqual(dt, there_and_back)
def convert_between_tz_and_utc(self, tz, utc):
dston = self.dston.replace(tzinfo=tz)
# Because 1:MM on the day DST ends is taken as being standard time,
# there is no spelling in tz for the last hour of daylight time.
# For purposes of the test, the last hour of DST is 0:MM, which is
# taken as being daylight time (and 1:MM is taken as being standard
# time).
dstoff = self.dstoff.replace(tzinfo=tz)
for delta in (timedelta(weeks=13),
DAY,
HOUR,
timedelta(minutes=1),
timedelta(microseconds=1)):
self.checkinside(dston, tz, utc, dston, dstoff)
for during in dston + delta, dstoff - delta:
self.checkinside(during, tz, utc, dston, dstoff)
self.checkoutside(dstoff, tz, utc)
for outside in dston - delta, dstoff + delta:
self.checkoutside(outside, tz, utc)
def test_easy(self):
# Despite the name of this test, the endcases are excruciating.
self.convert_between_tz_and_utc(Eastern, utc_real)
self.convert_between_tz_and_utc(Pacific, utc_real)
self.convert_between_tz_and_utc(Eastern, utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
# Sometimes blow up. In the following, tzinfo.dst()
# implementation may return None or not None depending on
# whether DST is assumed to be in effect. In this situation,
# a ValueError should be raised by astimezone().
class tricky_notok(ok):
def dst(self, dt):
if dt.year == 2000:
return None
else:
return 10*HOUR
dt = self.theclass(2001, 1, 1).replace(tzinfo=utc_real)
self.assertRaises(ValueError, dt.astimezone, tricky_notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
self.assertFalse(as_date == as_datetime)
self.assertFalse(as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Nevertheless, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assertEqual(as_date.__eq__(as_datetime), True)
different_day = (as_date.day + 1) % 20 + 1
as_different = as_datetime.replace(day= different_day)
self.assertEqual(as_date.__eq__(as_different), False)
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_extra_attributes(self):
for x in [date.today(),
time(),
datetime.utcnow(),
timedelta(),
tzinfo(),
timezone(timedelta())]:
with self.assertRaises(AttributeError):
x.abc = 1
def test_check_arg_types(self):
class Number:
def __init__(self, value):
self.value = value
def __int__(self):
return self.value
for xx in [decimal.Decimal(10),
decimal.Decimal('10.9'),
Number(10)]:
with self.assertWarns(DeprecationWarning):
self.assertEqual(datetime(10, 10, 10, 10, 10, 10, 10),
datetime(xx, xx, xx, xx, xx, xx, xx))
with self.assertRaisesRegex(TypeError, '^an integer is required '
r'\(got type str\)$'):
datetime(10, 10, '10')
f10 = Number(10.9)
with self.assertRaisesRegex(TypeError, '^__int__ returned non-int '
r'\(type float\)$'):
datetime(10, 10, f10)
class Float(float):
pass
s10 = Float(10.9)
with self.assertRaisesRegex(TypeError, '^integer argument expected, '
'got float$'):
datetime(10, 10, s10)
with self.assertRaises(TypeError):
datetime(10., 10, 10)
with self.assertRaises(TypeError):
datetime(10, 10., 10)
with self.assertRaises(TypeError):
datetime(10, 10, 10.)
with self.assertRaises(TypeError):
datetime(10, 10, 10, 10.)
with self.assertRaises(TypeError):
datetime(10, 10, 10, 10, 10.)
with self.assertRaises(TypeError):
datetime(10, 10, 10, 10, 10, 10.)
with self.assertRaises(TypeError):
datetime(10, 10, 10, 10, 10, 10, 10.)
#############################################################################
# Local Time Disambiguation
# An experimental reimplementation of fromutc that respects the "fold" flag.
class tzinfo2(tzinfo):
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
# Returned value satisfies
# dt + ldt.utcoffset() = ldt
off0 = dt.replace(fold=0).utcoffset()
off1 = dt.replace(fold=1).utcoffset()
if off0 is None or off1 is None or dt.dst() is None:
raise ValueError
if off0 == off1:
ldt = dt + off0
off1 = ldt.utcoffset()
if off0 == off1:
return ldt
# Now, we discovered both possible offsets, so
# we can just try four possible solutions:
for off in [off0, off1]:
ldt = dt + off
if ldt.utcoffset() == off:
return ldt
ldt = ldt.replace(fold=1)
if ldt.utcoffset() == off:
return ldt
raise ValueError("No suitable local time found")
# Reimplementing simplified US timezones to respect the "fold" flag:
class USTimeZone2(tzinfo2):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
dt = dt.replace(tzinfo=None)
if start + HOUR <= dt < end:
# DST is in effect.
return HOUR
elif end <= dt < end + HOUR:
# Fold (an ambiguous hour): use dt.fold to disambiguate.
return ZERO if dt.fold else HOUR
elif start <= dt < start + HOUR:
# Gap (a non-existent hour): reverse the fold rule.
return HOUR if dt.fold else ZERO
else:
# DST is off.
return ZERO
Eastern2 = USTimeZone2(-5, "Eastern2", "EST", "EDT")
Central2 = USTimeZone2(-6, "Central2", "CST", "CDT")
Mountain2 = USTimeZone2(-7, "Mountain2", "MST", "MDT")
Pacific2 = USTimeZone2(-8, "Pacific2", "PST", "PDT")
# Europe_Vilnius_1941 tzinfo implementation reproduces the following
# 1941 transition from Olson's tzdist:
#
# Zone NAME GMTOFF RULES FORMAT [UNTIL]
# ZoneEurope/Vilnius 1:00 - CET 1940 Aug 3
# 3:00 - MSK 1941 Jun 24
# 1:00 C-Eur CE%sT 1944 Aug
#
# $ zdump -v Europe/Vilnius | grep 1941
# Europe/Vilnius Mon Jun 23 20:59:59 1941 UTC = Mon Jun 23 23:59:59 1941 MSK isdst=0 gmtoff=10800
# Europe/Vilnius Mon Jun 23 21:00:00 1941 UTC = Mon Jun 23 23:00:00 1941 CEST isdst=1 gmtoff=7200
class Europe_Vilnius_1941(tzinfo):
def _utc_fold(self):
return [datetime(1941, 6, 23, 21, tzinfo=self), # Mon Jun 23 21:00:00 1941 UTC
datetime(1941, 6, 23, 22, tzinfo=self)] # Mon Jun 23 22:00:00 1941 UTC
def _loc_fold(self):
return [datetime(1941, 6, 23, 23, tzinfo=self), # Mon Jun 23 23:00:00 1941 MSK / CEST
datetime(1941, 6, 24, 0, tzinfo=self)] # Mon Jun 24 00:00:00 1941 CEST
def utcoffset(self, dt):
fold_start, fold_stop = self._loc_fold()
if dt < fold_start:
return 3 * HOUR
if dt < fold_stop:
return (2 if dt.fold else 3) * HOUR
# if dt >= fold_stop
return 2 * HOUR
def dst(self, dt):
fold_start, fold_stop = self._loc_fold()
if dt < fold_start:
return 0 * HOUR
if dt < fold_stop:
return (1 if dt.fold else 0) * HOUR
# if dt >= fold_stop
return 1 * HOUR
def tzname(self, dt):
fold_start, fold_stop = self._loc_fold()
if dt < fold_start:
return 'MSK'
if dt < fold_stop:
return ('MSK', 'CEST')[dt.fold]
# if dt >= fold_stop
return 'CEST'
def fromutc(self, dt):
assert dt.fold == 0
assert dt.tzinfo is self
if dt.year != 1941:
raise NotImplementedError
fold_start, fold_stop = self._utc_fold()
if dt < fold_start:
return dt + 3 * HOUR
if dt < fold_stop:
return (dt + 2 * HOUR).replace(fold=1)
# if dt >= fold_stop
return dt + 2 * HOUR
class TestLocalTimeDisambiguation(unittest.TestCase):
def test_vilnius_1941_fromutc(self):
Vilnius = Europe_Vilnius_1941()
gdt = datetime(1941, 6, 23, 20, 59, 59, tzinfo=timezone.utc)
ldt = gdt.astimezone(Vilnius)
self.assertEqual(ldt.strftime("%c %Z%z"),
'Mon Jun 23 23:59:59 1941 MSK+0300')
self.assertEqual(ldt.fold, 0)
self.assertFalse(ldt.dst())
gdt = datetime(1941, 6, 23, 21, tzinfo=timezone.utc)
ldt = gdt.astimezone(Vilnius)
self.assertEqual(ldt.strftime("%c %Z%z"),
'Mon Jun 23 23:00:00 1941 CEST+0200')
self.assertEqual(ldt.fold, 1)
self.assertTrue(ldt.dst())
gdt = datetime(1941, 6, 23, 22, tzinfo=timezone.utc)
ldt = gdt.astimezone(Vilnius)
self.assertEqual(ldt.strftime("%c %Z%z"),
'Tue Jun 24 00:00:00 1941 CEST+0200')
self.assertEqual(ldt.fold, 0)
self.assertTrue(ldt.dst())
def test_vilnius_1941_toutc(self):
Vilnius = Europe_Vilnius_1941()
ldt = datetime(1941, 6, 23, 22, 59, 59, tzinfo=Vilnius)
gdt = ldt.astimezone(timezone.utc)
self.assertEqual(gdt.strftime("%c %Z"),
'Mon Jun 23 19:59:59 1941 UTC')
ldt = datetime(1941, 6, 23, 23, 59, 59, tzinfo=Vilnius)
gdt = ldt.astimezone(timezone.utc)
self.assertEqual(gdt.strftime("%c %Z"),
'Mon Jun 23 20:59:59 1941 UTC')
ldt = datetime(1941, 6, 23, 23, 59, 59, tzinfo=Vilnius, fold=1)
gdt = ldt.astimezone(timezone.utc)
self.assertEqual(gdt.strftime("%c %Z"),
'Mon Jun 23 21:59:59 1941 UTC')
ldt = datetime(1941, 6, 24, 0, tzinfo=Vilnius)
gdt = ldt.astimezone(timezone.utc)
self.assertEqual(gdt.strftime("%c %Z"),
'Mon Jun 23 22:00:00 1941 UTC')
def test_constructors(self):
t = time(0, fold=1)
dt = datetime(1, 1, 1, fold=1)
self.assertEqual(t.fold, 1)
self.assertEqual(dt.fold, 1)
with self.assertRaises(TypeError):
time(0, 0, 0, 0, None, 0)
def test_member(self):
dt = datetime(1, 1, 1, fold=1)
t = dt.time()
self.assertEqual(t.fold, 1)
t = dt.timetz()
self.assertEqual(t.fold, 1)
def test_replace(self):
t = time(0)
dt = datetime(1, 1, 1)
self.assertEqual(t.replace(fold=1).fold, 1)
self.assertEqual(dt.replace(fold=1).fold, 1)
self.assertEqual(t.replace(fold=0).fold, 0)
self.assertEqual(dt.replace(fold=0).fold, 0)
# Check that replacement of other fields does not change "fold".
t = t.replace(fold=1, tzinfo=Eastern)
dt = dt.replace(fold=1, tzinfo=Eastern)
self.assertEqual(t.replace(tzinfo=None).fold, 1)
self.assertEqual(dt.replace(tzinfo=None).fold, 1)
# Out of bounds.
with self.assertRaises(ValueError):
t.replace(fold=2)
with self.assertRaises(ValueError):
dt.replace(fold=2)
# Check that fold is a keyword-only argument
with self.assertRaises(TypeError):
t.replace(1, 1, 1, None, 1)
with self.assertRaises(TypeError):
dt.replace(1, 1, 1, 1, 1, 1, 1, None, 1)
def test_comparison(self):
t = time(0)
dt = datetime(1, 1, 1)
self.assertEqual(t, t.replace(fold=1))
self.assertEqual(dt, dt.replace(fold=1))
def test_hash(self):
t = time(0)
dt = datetime(1, 1, 1)
self.assertEqual(hash(t), hash(t.replace(fold=1)))
self.assertEqual(hash(dt), hash(dt.replace(fold=1)))
@support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_fromtimestamp(self):
s = 1414906200
dt0 = datetime.fromtimestamp(s)
dt1 = datetime.fromtimestamp(s + 3600)
self.assertEqual(dt0.fold, 0)
self.assertEqual(dt1.fold, 1)
@support.run_with_tz('Australia/Lord_Howe')
def test_fromtimestamp_lord_howe(self):
tm = _time.localtime(1.4e9)
if _time.strftime('%Z%z', tm) != 'LHST+1030':
self.skipTest('Australia/Lord_Howe timezone is not supported on this platform')
# $ TZ=Australia/Lord_Howe date -r 1428158700
# Sun Apr 5 01:45:00 LHDT 2015
# $ TZ=Australia/Lord_Howe date -r 1428160500
# Sun Apr 5 01:45:00 LHST 2015
s = 1428158700
t0 = datetime.fromtimestamp(s)
t1 = datetime.fromtimestamp(s + 1800)
self.assertEqual(t0, t1)
self.assertEqual(t0.fold, 0)
self.assertEqual(t1.fold, 1)
def test_fromtimestamp_low_fold_detection(self):
# Ensure that fold detection doesn't cause an
# OSError for really low values, see bpo-29097
self.assertEqual(datetime.fromtimestamp(0).fold, 0)
@support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_timestamp(self):
dt0 = datetime(2014, 11, 2, 1, 30)
dt1 = dt0.replace(fold=1)
self.assertEqual(dt0.timestamp() + 3600,
dt1.timestamp())
@support.run_with_tz('Australia/Lord_Howe')
def test_timestamp_lord_howe(self):
tm = _time.localtime(1.4e9)
if _time.strftime('%Z%z', tm) != 'LHST+1030':
self.skipTest('Australia/Lord_Howe timezone is not supported on this platform')
t = datetime(2015, 4, 5, 1, 45)
s0 = t.replace(fold=0).timestamp()
s1 = t.replace(fold=1).timestamp()
self.assertEqual(s0 + 1800, s1)
@support.run_with_tz('EST+05EDT,M3.2.0,M11.1.0')
def test_astimezone(self):
dt0 = datetime(2014, 11, 2, 1, 30)
dt1 = dt0.replace(fold=1)
# Convert both naive instances to aware.
adt0 = dt0.astimezone()
adt1 = dt1.astimezone()
# Check that the first instance in DST zone and the second in STD
self.assertEqual(adt0.tzname(), 'EDT')
self.assertEqual(adt1.tzname(), 'EST')
self.assertEqual(adt0 + HOUR, adt1)
# Aware instances with fixed offset tzinfo's always have fold=0
self.assertEqual(adt0.fold, 0)
self.assertEqual(adt1.fold, 0)
def test_pickle_fold(self):
t = time(fold=1)
dt = datetime(1, 1, 1, fold=1)
for pickler, unpickler, proto in pickle_choices:
for x in [t, dt]:
s = pickler.dumps(x, proto)
y = unpickler.loads(s)
self.assertEqual(x, y)
self.assertEqual((0 if proto < 4 else x.fold), y.fold)
def test_repr(self):
t = time(fold=1)
dt = datetime(1, 1, 1, fold=1)
self.assertEqual(repr(t), 'datetime.time(0, 0, fold=1)')
self.assertEqual(repr(dt),
'datetime.datetime(1, 1, 1, 0, 0, fold=1)')
def test_dst(self):
# Let's first establish that things work in regular times.
dt_summer = datetime(2002, 10, 27, 1, tzinfo=Eastern2) - timedelta.resolution
dt_winter = datetime(2002, 10, 27, 2, tzinfo=Eastern2)
self.assertEqual(dt_summer.dst(), HOUR)
self.assertEqual(dt_winter.dst(), ZERO)
# The disambiguation flag is ignored
self.assertEqual(dt_summer.replace(fold=1).dst(), HOUR)
self.assertEqual(dt_winter.replace(fold=1).dst(), ZERO)
# Pick local time in the fold.
for minute in [0, 30, 59]:
dt = datetime(2002, 10, 27, 1, minute, tzinfo=Eastern2)
# With fold=0 (the default) it is in DST.
self.assertEqual(dt.dst(), HOUR)
# With fold=1 it is in STD.
self.assertEqual(dt.replace(fold=1).dst(), ZERO)
# Pick local time in the gap.
for minute in [0, 30, 59]:
dt = datetime(2002, 4, 7, 2, minute, tzinfo=Eastern2)
# With fold=0 (the default) it is in STD.
self.assertEqual(dt.dst(), ZERO)
# With fold=1 it is in DST.
self.assertEqual(dt.replace(fold=1).dst(), HOUR)
def test_utcoffset(self):
# Let's first establish that things work in regular times.
dt_summer = datetime(2002, 10, 27, 1, tzinfo=Eastern2) - timedelta.resolution
dt_winter = datetime(2002, 10, 27, 2, tzinfo=Eastern2)
self.assertEqual(dt_summer.utcoffset(), -4 * HOUR)
self.assertEqual(dt_winter.utcoffset(), -5 * HOUR)
# The disambiguation flag is ignored
self.assertEqual(dt_summer.replace(fold=1).utcoffset(), -4 * HOUR)
self.assertEqual(dt_winter.replace(fold=1).utcoffset(), -5 * HOUR)
def test_fromutc(self):
# Let's first establish that things work in regular times.
u_summer = datetime(2002, 10, 27, 6, tzinfo=Eastern2) - timedelta.resolution
u_winter = datetime(2002, 10, 27, 7, tzinfo=Eastern2)
t_summer = Eastern2.fromutc(u_summer)
t_winter = Eastern2.fromutc(u_winter)
self.assertEqual(t_summer, u_summer - 4 * HOUR)
self.assertEqual(t_winter, u_winter - 5 * HOUR)
self.assertEqual(t_summer.fold, 0)
self.assertEqual(t_winter.fold, 0)
# What happens in the fall-back fold?
u = datetime(2002, 10, 27, 5, 30, tzinfo=Eastern2)
t0 = Eastern2.fromutc(u)
u += HOUR
t1 = Eastern2.fromutc(u)
self.assertEqual(t0, t1)
self.assertEqual(t0.fold, 0)
self.assertEqual(t1.fold, 1)
# The tricky part is when u is in the local fold:
u = datetime(2002, 10, 27, 1, 30, tzinfo=Eastern2)
t = Eastern2.fromutc(u)
self.assertEqual((t.day, t.hour), (26, 21))
# .. or gets into the local fold after a standard time adjustment
u = datetime(2002, 10, 27, 6, 30, tzinfo=Eastern2)
t = Eastern2.fromutc(u)
self.assertEqual((t.day, t.hour), (27, 1))
# What happens in the spring-forward gap?
u = datetime(2002, 4, 7, 2, 0, tzinfo=Eastern2)
t = Eastern2.fromutc(u)
self.assertEqual((t.day, t.hour), (6, 21))
def test_mixed_compare_regular(self):
t = datetime(2000, 1, 1, tzinfo=Eastern2)
self.assertEqual(t, t.astimezone(timezone.utc))
t = datetime(2000, 6, 1, tzinfo=Eastern2)
self.assertEqual(t, t.astimezone(timezone.utc))
def test_mixed_compare_fold(self):
t_fold = datetime(2002, 10, 27, 1, 45, tzinfo=Eastern2)
t_fold_utc = t_fold.astimezone(timezone.utc)
self.assertNotEqual(t_fold, t_fold_utc)
self.assertNotEqual(t_fold_utc, t_fold)
def test_mixed_compare_gap(self):
t_gap = datetime(2002, 4, 7, 2, 45, tzinfo=Eastern2)
t_gap_utc = t_gap.astimezone(timezone.utc)
self.assertNotEqual(t_gap, t_gap_utc)
self.assertNotEqual(t_gap_utc, t_gap)
def test_hash_aware(self):
t = datetime(2000, 1, 1, tzinfo=Eastern2)
self.assertEqual(hash(t), hash(t.replace(fold=1)))
t_fold = datetime(2002, 10, 27, 1, 45, tzinfo=Eastern2)
t_gap = datetime(2002, 4, 7, 2, 45, tzinfo=Eastern2)
self.assertEqual(hash(t_fold), hash(t_fold.replace(fold=1)))
self.assertEqual(hash(t_gap), hash(t_gap.replace(fold=1)))
SEC = timedelta(0, 1)
def pairs(iterable):
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
class ZoneInfo(tzinfo):
zoneroot = '/usr/share/zoneinfo'
def __init__(self, ut, ti):
"""
:param ut: array
Array of transition point timestamps
:param ti: list
A list of (offset, isdst, abbr) tuples
:return: None
"""
self.ut = ut
self.ti = ti
self.lt = self.invert(ut, ti)
@staticmethod
def invert(ut, ti):
lt = (array('q', ut), array('q', ut))
if ut:
offset = ti[0][0] // SEC
lt[0][0] += offset
lt[1][0] += offset
for i in range(1, len(ut)):
lt[0][i] += ti[i-1][0] // SEC
lt[1][i] += ti[i][0] // SEC
return lt
@classmethod
def fromfile(cls, fileobj):
if fileobj.read(4).decode() != "TZif":
raise ValueError("not a zoneinfo file")
fileobj.seek(32)
counts = array('i')
counts.fromfile(fileobj, 3)
if sys.byteorder != 'big':
counts.byteswap()
ut = array('i')
ut.fromfile(fileobj, counts[0])
if sys.byteorder != 'big':
ut.byteswap()
type_indices = array('B')
type_indices.fromfile(fileobj, counts[0])
ttis = []
for i in range(counts[1]):
ttis.append(struct.unpack(">lbb", fileobj.read(6)))
abbrs = fileobj.read(counts[2])
# Convert ttis
for i, (gmtoff, isdst, abbrind) in enumerate(ttis):
abbr = abbrs[abbrind:abbrs.find(0, abbrind)].decode()
ttis[i] = (timedelta(0, gmtoff), isdst, abbr)
ti = [None] * len(ut)
for i, idx in enumerate(type_indices):
ti[i] = ttis[idx]
self = cls(ut, ti)
return self
@classmethod
def fromname(cls, name):
path = os.path.join(cls.zoneroot, name)
with open(path, 'rb') as f:
return cls.fromfile(f)
EPOCHORDINAL = date(1970, 1, 1).toordinal()
def fromutc(self, dt):
"""datetime in UTC -> datetime in local time."""
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
timestamp = ((dt.toordinal() - self.EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
if timestamp < self.ut[1]:
tti = self.ti[0]
fold = 0
else:
idx = bisect.bisect_right(self.ut, timestamp)
assert self.ut[idx-1] <= timestamp
assert idx == len(self.ut) or timestamp < self.ut[idx]
tti_prev, tti = self.ti[idx-2:idx]
# Detect fold
shift = tti_prev[0] - tti[0]
fold = (shift > timedelta(0, timestamp - self.ut[idx-1]))
dt += tti[0]
if fold:
return dt.replace(fold=1)
else:
return dt
def _find_ti(self, dt, i):
timestamp = ((dt.toordinal() - self.EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
lt = self.lt[dt.fold]
idx = bisect.bisect_right(lt, timestamp)
return self.ti[max(0, idx - 1)][i]
def utcoffset(self, dt):
return self._find_ti(dt, 0)
def dst(self, dt):
isdst = self._find_ti(dt, 1)
# XXX: We cannot accurately determine the "save" value,
# so let's return 1h whenever DST is in effect. Since
# we don't use dst() in fromutc(), it is unlikely that
# it will be needed for anything more than bool(dst()).
return ZERO if isdst else HOUR
def tzname(self, dt):
return self._find_ti(dt, 2)
@classmethod
def zonenames(cls, zonedir=None):
if zonedir is None:
zonedir = cls.zoneroot
zone_tab = os.path.join(zonedir, 'zone.tab')
try:
f = open(zone_tab)
except OSError:
return
with f:
for line in f:
line = line.strip()
if line and not line.startswith('#'):
yield line.split()[2]
@classmethod
def stats(cls, start_year=1):
count = gap_count = fold_count = zeros_count = 0
min_gap = min_fold = timedelta.max
max_gap = max_fold = ZERO
min_gap_datetime = max_gap_datetime = datetime.min
min_gap_zone = max_gap_zone = None
min_fold_datetime = max_fold_datetime = datetime.min
min_fold_zone = max_fold_zone = None
stats_since = datetime(start_year, 1, 1) # Starting from 1970 eliminates a lot of noise
for zonename in cls.zonenames():
count += 1
tz = cls.fromname(zonename)
for dt, shift in tz.transitions():
if dt < stats_since:
continue
if shift > ZERO:
gap_count += 1
if (shift, dt) > (max_gap, max_gap_datetime):
max_gap = shift
max_gap_zone = zonename
max_gap_datetime = dt
if (shift, datetime.max - dt) < (min_gap, datetime.max - min_gap_datetime):
min_gap = shift
min_gap_zone = zonename
min_gap_datetime = dt
elif shift < ZERO:
fold_count += 1
shift = -shift
if (shift, dt) > (max_fold, max_fold_datetime):
max_fold = shift
max_fold_zone = zonename
max_fold_datetime = dt
if (shift, datetime.max - dt) < (min_fold, datetime.max - min_fold_datetime):
min_fold = shift
min_fold_zone = zonename
min_fold_datetime = dt
else:
zeros_count += 1
trans_counts = (gap_count, fold_count, zeros_count)
print("Number of zones: %5d" % count)
print("Number of transitions: %5d = %d (gaps) + %d (folds) + %d (zeros)" %
((sum(trans_counts),) + trans_counts))
print("Min gap: %16s at %s in %s" % (min_gap, min_gap_datetime, min_gap_zone))
print("Max gap: %16s at %s in %s" % (max_gap, max_gap_datetime, max_gap_zone))
print("Min fold: %16s at %s in %s" % (min_fold, min_fold_datetime, min_fold_zone))
print("Max fold: %16s at %s in %s" % (max_fold, max_fold_datetime, max_fold_zone))
def transitions(self):
for (_, prev_ti), (t, ti) in pairs(zip(self.ut, self.ti)):
shift = ti[0] - prev_ti[0]
yield datetime.utcfromtimestamp(t), shift
def nondst_folds(self):
"""Find all folds with the same value of isdst on both sides of the transition."""
for (_, prev_ti), (t, ti) in pairs(zip(self.ut, self.ti)):
shift = ti[0] - prev_ti[0]
if shift < ZERO and ti[1] == prev_ti[1]:
yield datetime.utcfromtimestamp(t), -shift, prev_ti[2], ti[2]
@classmethod
def print_all_nondst_folds(cls, same_abbr=False, start_year=1):
count = 0
for zonename in cls.zonenames():
tz = cls.fromname(zonename)
for dt, shift, prev_abbr, abbr in tz.nondst_folds():
if dt.year < start_year or same_abbr and prev_abbr != abbr:
continue
count += 1
print("%3d) %-30s %s %10s %5s -> %s" %
(count, zonename, dt, shift, prev_abbr, abbr))
def folds(self):
for t, shift in self.transitions():
if shift < ZERO:
yield t, -shift
def gaps(self):
for t, shift in self.transitions():
if shift > ZERO:
yield t, shift
def zeros(self):
for t, shift in self.transitions():
if not shift:
yield t
class ZoneInfoTest(unittest.TestCase):
zonename = 'America/New_York'
def setUp(self):
if sys.platform == "vxworks":
self.skipTest("Skipping zoneinfo tests on VxWorks")
if sys.platform == "win32":
self.skipTest("Skipping zoneinfo tests on Windows")
try:
self.tz = ZoneInfo.fromname(self.zonename)
except FileNotFoundError as err:
self.skipTest("Skipping %s: %s" % (self.zonename, err))
def assertEquivDatetimes(self, a, b):
self.assertEqual((a.replace(tzinfo=None), a.fold, id(a.tzinfo)),
(b.replace(tzinfo=None), b.fold, id(b.tzinfo)))
def test_folds(self):
tz = self.tz
for dt, shift in tz.folds():
for x in [0 * shift, 0.5 * shift, shift - timedelta.resolution]:
udt = dt + x
ldt = tz.fromutc(udt.replace(tzinfo=tz))
self.assertEqual(ldt.fold, 1)
adt = udt.replace(tzinfo=timezone.utc).astimezone(tz)
self.assertEquivDatetimes(adt, ldt)
utcoffset = ldt.utcoffset()
self.assertEqual(ldt.replace(tzinfo=None), udt + utcoffset)
# Round trip
self.assertEquivDatetimes(ldt.astimezone(timezone.utc),
udt.replace(tzinfo=timezone.utc))
for x in [-timedelta.resolution, shift]:
udt = dt + x
udt = udt.replace(tzinfo=tz)
ldt = tz.fromutc(udt)
self.assertEqual(ldt.fold, 0)
def test_gaps(self):
tz = self.tz
for dt, shift in tz.gaps():
for x in [0 * shift, 0.5 * shift, shift - timedelta.resolution]:
udt = dt + x
udt = udt.replace(tzinfo=tz)
ldt = tz.fromutc(udt)
self.assertEqual(ldt.fold, 0)
adt = udt.replace(tzinfo=timezone.utc).astimezone(tz)
self.assertEquivDatetimes(adt, ldt)
utcoffset = ldt.utcoffset()
self.assertEqual(ldt.replace(tzinfo=None), udt.replace(tzinfo=None) + utcoffset)
# Create a local time inside the gap
ldt = tz.fromutc(dt.replace(tzinfo=tz)) - shift + x
self.assertLess(ldt.replace(fold=1).utcoffset(),
ldt.replace(fold=0).utcoffset(),
"At %s." % ldt)
for x in [-timedelta.resolution, shift]:
udt = dt + x
ldt = tz.fromutc(udt.replace(tzinfo=tz))
self.assertEqual(ldt.fold, 0)
def test_system_transitions(self):
if ('Riyadh8' in self.zonename or
# From tzdata NEWS file:
# The files solar87, solar88, and solar89 are no longer distributed.
# They were a negative experiment - that is, a demonstration that
# tz data can represent solar time only with some difficulty and error.
# Their presence in the distribution caused confusion, as Riyadh
# civil time was generally not solar time in those years.
self.zonename.startswith('right/')):
self.skipTest("Skipping %s" % self.zonename)
tz = self.tz
TZ = os.environ.get('TZ')
os.environ['TZ'] = self.zonename
try:
_time.tzset()
for udt, shift in tz.transitions():
if udt.year >= 2037:
# System support for times around the end of 32-bit time_t
# and later is flaky on many systems.
break
s0 = (udt - datetime(1970, 1, 1)) // SEC
ss = shift // SEC # shift seconds
for x in [-40 * 3600, -20*3600, -1, 0,
ss - 1, ss + 20 * 3600, ss + 40 * 3600]:
s = s0 + x
sdt = datetime.fromtimestamp(s)
tzdt = datetime.fromtimestamp(s, tz).replace(tzinfo=None)
self.assertEquivDatetimes(sdt, tzdt)
s1 = sdt.timestamp()
self.assertEqual(s, s1)
if ss > 0: # gap
# Create local time inside the gap
dt = datetime.fromtimestamp(s0) - shift / 2
ts0 = dt.timestamp()
ts1 = dt.replace(fold=1).timestamp()
self.assertEqual(ts0, s0 + ss / 2)
self.assertEqual(ts1, s0 - ss / 2)
finally:
if TZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = TZ
_time.tzset()
class ZoneInfoCompleteTest(unittest.TestSuite):
def __init__(self):
tests = []
if is_resource_enabled('tzdata'):
for name in ZoneInfo.zonenames():
Test = type('ZoneInfoTest[%s]' % name, (ZoneInfoTest,), {})
Test.zonename = name
for method in dir(Test):
if method.startswith('test_'):
tests.append(Test(method))
super().__init__(tests)
# Iran had a sub-minute UTC offset before 1946.
class IranTest(ZoneInfoTest):
zonename = 'Asia/Tehran'
class CapiTest(unittest.TestCase):
def setUp(self):
# Since the C API is not present in the _Pure tests, skip all tests
if self.__class__.__name__.endswith('Pure'):
self.skipTest('Not relevant in pure Python')
# This *must* be called, and it must be called first, so until either
# restriction is loosened, we'll call it as part of test setup
_testcapi.test_datetime_capi()
def test_utc_capi(self):
for use_macro in (True, False):
capi_utc = _testcapi.get_timezone_utc_capi(use_macro)
with self.subTest(use_macro=use_macro):
self.assertIs(capi_utc, timezone.utc)
def test_timezones_capi(self):
est_capi, est_macro, est_macro_nn = _testcapi.make_timezones_capi()
exp_named = timezone(timedelta(hours=-5), "EST")
exp_unnamed = timezone(timedelta(hours=-5))
cases = [
('est_capi', est_capi, exp_named),
('est_macro', est_macro, exp_named),
('est_macro_nn', est_macro_nn, exp_unnamed)
]
for name, tz_act, tz_exp in cases:
with self.subTest(name=name):
self.assertEqual(tz_act, tz_exp)
dt1 = datetime(2000, 2, 4, tzinfo=tz_act)
dt2 = datetime(2000, 2, 4, tzinfo=tz_exp)
self.assertEqual(dt1, dt2)
self.assertEqual(dt1.tzname(), dt2.tzname())
dt_utc = datetime(2000, 2, 4, 5, tzinfo=timezone.utc)
self.assertEqual(dt1.astimezone(timezone.utc), dt_utc)
def test_PyDateTime_DELTA_GET(self):
class TimeDeltaSubclass(timedelta):
pass
for klass in [timedelta, TimeDeltaSubclass]:
for args in [(26, 55, 99999), (26, 55, 99999)]:
d = klass(*args)
with self.subTest(cls=klass, date=args):
days, seconds, microseconds = _testcapi.PyDateTime_DELTA_GET(d)
self.assertEqual(days, d.days)
self.assertEqual(seconds, d.seconds)
self.assertEqual(microseconds, d.microseconds)
def test_PyDateTime_GET(self):
class DateSubclass(date):
pass
for klass in [date, DateSubclass]:
for args in [(2000, 1, 2), (2012, 2, 29)]:
d = klass(*args)
with self.subTest(cls=klass, date=args):
year, month, day = _testcapi.PyDateTime_GET(d)
self.assertEqual(year, d.year)
self.assertEqual(month, d.month)
self.assertEqual(day, d.day)
def test_PyDateTime_DATE_GET(self):
class DateTimeSubclass(datetime):
pass
for klass in [datetime, DateTimeSubclass]:
for args in [(1993, 8, 26, 22, 12, 55, 99999),
(1993, 8, 26, 22, 12, 55, 99999)]:
d = klass(*args)
with self.subTest(cls=klass, date=args):
hour, minute, second, microsecond = _testcapi.PyDateTime_DATE_GET(d)
self.assertEqual(hour, d.hour)
self.assertEqual(minute, d.minute)
self.assertEqual(second, d.second)
self.assertEqual(microsecond, d.microsecond)
def test_PyDateTime_TIME_GET(self):
class TimeSubclass(time):
pass
for klass in [time, TimeSubclass]:
for args in [(12, 30, 20, 10), (12, 30, 20, 10)]:
d = klass(*args)
with self.subTest(cls=klass, date=args):
hour, minute, second, microsecond = _testcapi.PyDateTime_TIME_GET(d)
self.assertEqual(hour, d.hour)
self.assertEqual(minute, d.minute)
self.assertEqual(second, d.second)
self.assertEqual(microsecond, d.microsecond)
def test_timezones_offset_zero(self):
utc0, utc1, non_utc = _testcapi.get_timezones_offset_zero()
with self.subTest(testname="utc0"):
self.assertIs(utc0, timezone.utc)
with self.subTest(testname="utc1"):
self.assertIs(utc1, timezone.utc)
with self.subTest(testname="non_utc"):
self.assertIsNot(non_utc, timezone.utc)
non_utc_exp = timezone(timedelta(hours=0), "")
self.assertEqual(non_utc, non_utc_exp)
dt1 = datetime(2000, 2, 4, tzinfo=non_utc)
dt2 = datetime(2000, 2, 4, tzinfo=non_utc_exp)
self.assertEqual(dt1, dt2)
self.assertEqual(dt1.tzname(), dt2.tzname())
def test_check_date(self):
class DateSubclass(date):
pass
d = date(2011, 1, 1)
ds = DateSubclass(2011, 1, 1)
dt = datetime(2011, 1, 1)
is_date = _testcapi.datetime_check_date
# Check the ones that should be valid
self.assertTrue(is_date(d))
self.assertTrue(is_date(dt))
self.assertTrue(is_date(ds))
self.assertTrue(is_date(d, True))
# Check that the subclasses do not match exactly
self.assertFalse(is_date(dt, True))
self.assertFalse(is_date(ds, True))
# Check that various other things are not dates at all
args = [tuple(), list(), 1, '2011-01-01',
timedelta(1), timezone.utc, time(12, 00)]
for arg in args:
for exact in (True, False):
with self.subTest(arg=arg, exact=exact):
self.assertFalse(is_date(arg, exact))
def test_check_time(self):
class TimeSubclass(time):
pass
t = time(12, 30)
ts = TimeSubclass(12, 30)
is_time = _testcapi.datetime_check_time
# Check the ones that should be valid
self.assertTrue(is_time(t))
self.assertTrue(is_time(ts))
self.assertTrue(is_time(t, True))
# Check that the subclass does not match exactly
self.assertFalse(is_time(ts, True))
# Check that various other things are not times
args = [tuple(), list(), 1, '2011-01-01',
timedelta(1), timezone.utc, date(2011, 1, 1)]
for arg in args:
for exact in (True, False):
with self.subTest(arg=arg, exact=exact):
self.assertFalse(is_time(arg, exact))
def test_check_datetime(self):
class DateTimeSubclass(datetime):
pass
dt = datetime(2011, 1, 1, 12, 30)
dts = DateTimeSubclass(2011, 1, 1, 12, 30)
is_datetime = _testcapi.datetime_check_datetime
# Check the ones that should be valid
self.assertTrue(is_datetime(dt))
self.assertTrue(is_datetime(dts))
self.assertTrue(is_datetime(dt, True))
# Check that the subclass does not match exactly
self.assertFalse(is_datetime(dts, True))
# Check that various other things are not datetimes
args = [tuple(), list(), 1, '2011-01-01',
timedelta(1), timezone.utc, date(2011, 1, 1)]
for arg in args:
for exact in (True, False):
with self.subTest(arg=arg, exact=exact):
self.assertFalse(is_datetime(arg, exact))
def test_check_delta(self):
class TimeDeltaSubclass(timedelta):
pass
td = timedelta(1)
tds = TimeDeltaSubclass(1)
is_timedelta = _testcapi.datetime_check_delta
# Check the ones that should be valid
self.assertTrue(is_timedelta(td))
self.assertTrue(is_timedelta(tds))
self.assertTrue(is_timedelta(td, True))
# Check that the subclass does not match exactly
self.assertFalse(is_timedelta(tds, True))
# Check that various other things are not timedeltas
args = [tuple(), list(), 1, '2011-01-01',
timezone.utc, date(2011, 1, 1), datetime(2011, 1, 1)]
for arg in args:
for exact in (True, False):
with self.subTest(arg=arg, exact=exact):
self.assertFalse(is_timedelta(arg, exact))
def test_check_tzinfo(self):
class TZInfoSubclass(tzinfo):
pass
tzi = tzinfo()
tzis = TZInfoSubclass()
tz = timezone(timedelta(hours=-5))
is_tzinfo = _testcapi.datetime_check_tzinfo
# Check the ones that should be valid
self.assertTrue(is_tzinfo(tzi))
self.assertTrue(is_tzinfo(tz))
self.assertTrue(is_tzinfo(tzis))
self.assertTrue(is_tzinfo(tzi, True))
# Check that the subclasses do not match exactly
self.assertFalse(is_tzinfo(tz, True))
self.assertFalse(is_tzinfo(tzis, True))
# Check that various other things are not tzinfos
args = [tuple(), list(), 1, '2011-01-01',
date(2011, 1, 1), datetime(2011, 1, 1)]
for arg in args:
for exact in (True, False):
with self.subTest(arg=arg, exact=exact):
self.assertFalse(is_tzinfo(arg, exact))
def test_date_from_date(self):
exp_date = date(1993, 8, 26)
for macro in False, True:
with self.subTest(macro=macro):
c_api_date = _testcapi.get_date_fromdate(
macro,
exp_date.year,
exp_date.month,
exp_date.day)
self.assertEqual(c_api_date, exp_date)
def test_datetime_from_dateandtime(self):
exp_date = datetime(1993, 8, 26, 22, 12, 55, 99999)
for macro in False, True:
with self.subTest(macro=macro):
c_api_date = _testcapi.get_datetime_fromdateandtime(
macro,
exp_date.year,
exp_date.month,
exp_date.day,
exp_date.hour,
exp_date.minute,
exp_date.second,
exp_date.microsecond)
self.assertEqual(c_api_date, exp_date)
def test_datetime_from_dateandtimeandfold(self):
exp_date = datetime(1993, 8, 26, 22, 12, 55, 99999)
for fold in [0, 1]:
for macro in False, True:
with self.subTest(macro=macro, fold=fold):
c_api_date = _testcapi.get_datetime_fromdateandtimeandfold(
macro,
exp_date.year,
exp_date.month,
exp_date.day,
exp_date.hour,
exp_date.minute,
exp_date.second,
exp_date.microsecond,
exp_date.fold)
self.assertEqual(c_api_date, exp_date)
self.assertEqual(c_api_date.fold, exp_date.fold)
def test_time_from_time(self):
exp_time = time(22, 12, 55, 99999)
for macro in False, True:
with self.subTest(macro=macro):
c_api_time = _testcapi.get_time_fromtime(
macro,
exp_time.hour,
exp_time.minute,
exp_time.second,
exp_time.microsecond)
self.assertEqual(c_api_time, exp_time)
def test_time_from_timeandfold(self):
exp_time = time(22, 12, 55, 99999)
for fold in [0, 1]:
for macro in False, True:
with self.subTest(macro=macro, fold=fold):
c_api_time = _testcapi.get_time_fromtimeandfold(
macro,
exp_time.hour,
exp_time.minute,
exp_time.second,
exp_time.microsecond,
exp_time.fold)
self.assertEqual(c_api_time, exp_time)
self.assertEqual(c_api_time.fold, exp_time.fold)
def test_delta_from_dsu(self):
exp_delta = timedelta(26, 55, 99999)
for macro in False, True:
with self.subTest(macro=macro):
c_api_delta = _testcapi.get_delta_fromdsu(
macro,
exp_delta.days,
exp_delta.seconds,
exp_delta.microseconds)
self.assertEqual(c_api_delta, exp_delta)
def test_date_from_timestamp(self):
ts = datetime(1995, 4, 12).timestamp()
for macro in False, True:
with self.subTest(macro=macro):
d = _testcapi.get_date_fromtimestamp(int(ts), macro)
self.assertEqual(d, date(1995, 4, 12))
def test_datetime_from_timestamp(self):
cases = [
((1995, 4, 12), None, False),
((1995, 4, 12), None, True),
((1995, 4, 12), timezone(timedelta(hours=1)), True),
((1995, 4, 12, 14, 30), None, False),
((1995, 4, 12, 14, 30), None, True),
((1995, 4, 12, 14, 30), timezone(timedelta(hours=1)), True),
]
from_timestamp = _testcapi.get_datetime_fromtimestamp
for case in cases:
for macro in False, True:
with self.subTest(case=case, macro=macro):
dtup, tzinfo, usetz = case
dt_orig = datetime(*dtup, tzinfo=tzinfo)
ts = int(dt_orig.timestamp())
dt_rt = from_timestamp(ts, tzinfo, usetz, macro)
self.assertEqual(dt_orig, dt_rt)
def load_tests(loader, standard_tests, pattern):
standard_tests.addTest(ZoneInfoCompleteTest())
return standard_tests
if __name__ == "__main__":
unittest.main()
```
#### File: test/test_asyncio/functional.py
```python
import asyncio
import asyncio.events
import contextlib
import os
import pprint
import select
import socket
import tempfile
import threading
from test import support
class FunctionalTestCaseMixin:
def new_loop(self):
return asyncio.new_event_loop()
def run_loop_briefly(self, *, delay=0.01):
self.loop.run_until_complete(asyncio.sleep(delay))
def loop_exception_handler(self, loop, context):
self.__unhandled_exceptions.append(context)
self.loop.default_exception_handler(context)
def setUp(self):
self.loop = self.new_loop()
asyncio.set_event_loop(None)
self.loop.set_exception_handler(self.loop_exception_handler)
self.__unhandled_exceptions = []
# Disable `_get_running_loop`.
self._old_get_running_loop = asyncio.events._get_running_loop
asyncio.events._get_running_loop = lambda: None
def tearDown(self):
try:
self.loop.close()
if self.__unhandled_exceptions:
print('Unexpected calls to loop.call_exception_handler():')
pprint.pprint(self.__unhandled_exceptions)
self.fail('unexpected calls to loop.call_exception_handler()')
finally:
asyncio.events._get_running_loop = self._old_get_running_loop
asyncio.set_event_loop(None)
self.loop = None
def tcp_server(self, server_prog, *,
family=socket.AF_INET,
addr=None,
timeout=support.LOOPBACK_TIMEOUT,
backlog=1,
max_clients=10):
if addr is None:
if hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
with tempfile.NamedTemporaryFile() as tmp:
addr = tmp.name
else:
addr = ('127.0.0.1', 0)
sock = socket.create_server(addr, family=family, backlog=backlog)
if timeout is None:
raise RuntimeError('timeout is required')
if timeout <= 0:
raise RuntimeError('only blocking sockets are supported')
sock.settimeout(timeout)
return TestThreadedServer(
self, sock, server_prog, timeout, max_clients)
def tcp_client(self, client_prog,
family=socket.AF_INET,
timeout=support.LOOPBACK_TIMEOUT):
sock = socket.socket(family, socket.SOCK_STREAM)
if timeout is None:
raise RuntimeError('timeout is required')
if timeout <= 0:
raise RuntimeError('only blocking sockets are supported')
sock.settimeout(timeout)
return TestThreadedClient(
self, sock, client_prog, timeout)
def unix_server(self, *args, **kwargs):
if not hasattr(socket, 'AF_UNIX'):
raise NotImplementedError
return self.tcp_server(*args, family=socket.AF_UNIX, **kwargs)
def unix_client(self, *args, **kwargs):
if not hasattr(socket, 'AF_UNIX'):
raise NotImplementedError
return self.tcp_client(*args, family=socket.AF_UNIX, **kwargs)
@contextlib.contextmanager
def unix_sock_name(self):
with tempfile.TemporaryDirectory() as td:
fn = os.path.join(td, 'sock')
try:
yield fn
finally:
try:
os.unlink(fn)
except OSError:
pass
def _abort_socket_test(self, ex):
try:
self.loop.stop()
finally:
self.fail(ex)
##############################################################################
# Socket Testing Utilities
##############################################################################
class TestSocketWrapper:
def __init__(self, sock):
self.__sock = sock
def recv_all(self, n):
buf = b''
while len(buf) < n:
data = self.recv(n - len(buf))
if data == b'':
raise ConnectionAbortedError
buf += data
return buf
def start_tls(self, ssl_context, *,
server_side=False,
server_hostname=None):
ssl_sock = ssl_context.wrap_socket(
self.__sock, server_side=server_side,
server_hostname=server_hostname,
do_handshake_on_connect=False)
try:
ssl_sock.do_handshake()
except:
ssl_sock.close()
raise
finally:
self.__sock.close()
self.__sock = ssl_sock
def __getattr__(self, name):
return getattr(self.__sock, name)
def __repr__(self):
return '<{} {!r}>'.format(type(self).__name__, self.__sock)
class SocketThread(threading.Thread):
def stop(self):
self._active = False
self.join()
def __enter__(self):
self.start()
return self
def __exit__(self, *exc):
self.stop()
class TestThreadedClient(SocketThread):
def __init__(self, test, sock, prog, timeout):
threading.Thread.__init__(self, None, None, 'test-client')
self.daemon = True
self._timeout = timeout
self._sock = sock
self._active = True
self._prog = prog
self._test = test
def run(self):
try:
self._prog(TestSocketWrapper(self._sock))
except Exception as ex:
self._test._abort_socket_test(ex)
class TestThreadedServer(SocketThread):
def __init__(self, test, sock, prog, timeout, max_clients):
threading.Thread.__init__(self, None, None, 'test-server')
self.daemon = True
self._clients = 0
self._finished_clients = 0
self._max_clients = max_clients
self._timeout = timeout
self._sock = sock
self._active = True
self._prog = prog
self._s1, self._s2 = socket.socketpair()
self._s1.setblocking(False)
self._test = test
def stop(self):
try:
if self._s2 and self._s2.fileno() != -1:
try:
self._s2.send(b'stop')
except OSError:
pass
finally:
super().stop()
def run(self):
try:
with self._sock:
self._sock.setblocking(False)
self._run()
finally:
self._s1.close()
self._s2.close()
def _run(self):
while self._active:
if self._clients >= self._max_clients:
return
r, w, x = select.select(
[self._sock, self._s1], [], [], self._timeout)
if self._s1 in r:
return
if self._sock in r:
try:
conn, addr = self._sock.accept()
except BlockingIOError:
continue
except socket.timeout:
if not self._active:
return
else:
raise
else:
self._clients += 1
conn.settimeout(self._timeout)
try:
with conn:
self._handle_client(conn)
except Exception as ex:
self._active = False
try:
raise
finally:
self._test._abort_socket_test(ex)
def _handle_client(self, sock):
self._prog(TestSocketWrapper(sock))
@property
def addr(self):
return self._sock.getsockname()
```
#### File: python_standard_lib/test/test_pyexpat.py
```python
from io import BytesIO
import os
import platform
import sys
import sysconfig
import unittest
import traceback
from xml.parsers import expat
from xml.parsers.expat import errors
from test.support import sortdict
class SetAttributeTest(unittest.TestCase):
def setUp(self):
self.parser = expat.ParserCreate(namespace_separator='!')
def test_buffer_text(self):
self.assertIs(self.parser.buffer_text, False)
for x in 0, 1, 2, 0:
self.parser.buffer_text = x
self.assertIs(self.parser.buffer_text, bool(x))
def test_namespace_prefixes(self):
self.assertIs(self.parser.namespace_prefixes, False)
for x in 0, 1, 2, 0:
self.parser.namespace_prefixes = x
self.assertIs(self.parser.namespace_prefixes, bool(x))
def test_ordered_attributes(self):
self.assertIs(self.parser.ordered_attributes, False)
for x in 0, 1, 2, 0:
self.parser.ordered_attributes = x
self.assertIs(self.parser.ordered_attributes, bool(x))
def test_specified_attributes(self):
self.assertIs(self.parser.specified_attributes, False)
for x in 0, 1, 2, 0:
self.parser.specified_attributes = x
self.assertIs(self.parser.specified_attributes, bool(x))
def test_invalid_attributes(self):
with self.assertRaises(AttributeError):
self.parser.returns_unicode = 1
with self.assertRaises(AttributeError):
self.parser.returns_unicode
# Issue #25019
self.assertRaises(TypeError, setattr, self.parser, range(0xF), 0)
self.assertRaises(TypeError, self.parser.__setattr__, range(0xF), 0)
self.assertRaises(TypeError, getattr, self.parser, range(0xF))
data = b'''\
<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<?xml-stylesheet href="stylesheet.css"?>
<!-- comment data -->
<!DOCTYPE quotations SYSTEM "quotations.dtd" [
<!ELEMENT root ANY>
<!ATTLIST root attr1 CDATA #REQUIRED attr2 CDATA #IMPLIED>
<!NOTATION notation SYSTEM "notation.jpeg">
<!ENTITY acirc "â">
<!ENTITY external_entity SYSTEM "entity.file">
<!ENTITY unparsed_entity SYSTEM "entity.file" NDATA notation>
%unparsed_entity;
]>
<root attr1="value1" attr2="value2ὀ">
<myns:subelement xmlns:myns="http://www.python.org/namespace">
Contents of subelements
</myns:subelement>
<sub2><![CDATA[contents of CDATA section]]></sub2>
&external_entity;
&skipped_entity;
\xb5
</root>
'''
# Produce UTF-8 output
class ParseTest(unittest.TestCase):
class Outputter:
def __init__(self):
self.out = []
def StartElementHandler(self, name, attrs):
self.out.append('Start element: ' + repr(name) + ' ' +
sortdict(attrs))
def EndElementHandler(self, name):
self.out.append('End element: ' + repr(name))
def CharacterDataHandler(self, data):
data = data.strip()
if data:
self.out.append('Character data: ' + repr(data))
def ProcessingInstructionHandler(self, target, data):
self.out.append('PI: ' + repr(target) + ' ' + repr(data))
def StartNamespaceDeclHandler(self, prefix, uri):
self.out.append('NS decl: ' + repr(prefix) + ' ' + repr(uri))
def EndNamespaceDeclHandler(self, prefix):
self.out.append('End of NS decl: ' + repr(prefix))
def StartCdataSectionHandler(self):
self.out.append('Start of CDATA section')
def EndCdataSectionHandler(self):
self.out.append('End of CDATA section')
def CommentHandler(self, text):
self.out.append('Comment: ' + repr(text))
def NotationDeclHandler(self, *args):
name, base, sysid, pubid = args
self.out.append('Notation declared: %s' %(args,))
def UnparsedEntityDeclHandler(self, *args):
entityName, base, systemId, publicId, notationName = args
self.out.append('Unparsed entity decl: %s' %(args,))
def NotStandaloneHandler(self):
self.out.append('Not standalone')
return 1
def ExternalEntityRefHandler(self, *args):
context, base, sysId, pubId = args
self.out.append('External entity ref: %s' %(args[1:],))
return 1
def StartDoctypeDeclHandler(self, *args):
self.out.append(('Start doctype', args))
return 1
def EndDoctypeDeclHandler(self):
self.out.append("End doctype")
return 1
def EntityDeclHandler(self, *args):
self.out.append(('Entity declaration', args))
return 1
def XmlDeclHandler(self, *args):
self.out.append(('XML declaration', args))
return 1
def ElementDeclHandler(self, *args):
self.out.append(('Element declaration', args))
return 1
def AttlistDeclHandler(self, *args):
self.out.append(('Attribute list declaration', args))
return 1
def SkippedEntityHandler(self, *args):
self.out.append(("Skipped entity", args))
return 1
def DefaultHandler(self, userData):
pass
def DefaultHandlerExpand(self, userData):
pass
handler_names = [
'StartElementHandler', 'EndElementHandler', 'CharacterDataHandler',
'ProcessingInstructionHandler', 'UnparsedEntityDeclHandler',
'NotationDeclHandler', 'StartNamespaceDeclHandler',
'EndNamespaceDeclHandler', 'CommentHandler',
'StartCdataSectionHandler', 'EndCdataSectionHandler', 'DefaultHandler',
'DefaultHandlerExpand', 'NotStandaloneHandler',
'ExternalEntityRefHandler', 'StartDoctypeDeclHandler',
'EndDoctypeDeclHandler', 'EntityDeclHandler', 'XmlDeclHandler',
'ElementDeclHandler', 'AttlistDeclHandler', 'SkippedEntityHandler',
]
def _hookup_callbacks(self, parser, handler):
"""
Set each of the callbacks defined on handler and named in
self.handler_names on the given parser.
"""
for name in self.handler_names:
setattr(parser, name, getattr(handler, name))
def _verify_parse_output(self, operations):
expected_operations = [
('XML declaration', ('1.0', 'iso-8859-1', 0)),
'PI: \'xml-stylesheet\' \'href="stylesheet.css"\'',
"Comment: ' comment data '",
"Not standalone",
("Start doctype", ('quotations', 'quotations.dtd', None, 1)),
('Element declaration', ('root', (2, 0, None, ()))),
('Attribute list declaration', ('root', 'attr1', 'CDATA', None,
1)),
('Attribute list declaration', ('root', 'attr2', 'CDATA', None,
0)),
"Notation declared: ('notation', None, 'notation.jpeg', None)",
('Entity declaration', ('acirc', 0, '\xe2', None, None, None, None)),
('Entity declaration', ('external_entity', 0, None, None,
'entity.file', None, None)),
"Unparsed entity decl: ('unparsed_entity', None, 'entity.file', None, 'notation')",
"Not standalone",
"End doctype",
"Start element: 'root' {'attr1': 'value1', 'attr2': 'value2\u1f40'}",
"NS decl: 'myns' 'http://www.python.org/namespace'",
"Start element: 'http://www.python.org/namespace!subelement' {}",
"Character data: 'Contents of subelements'",
"End element: 'http://www.python.org/namespace!subelement'",
"End of NS decl: 'myns'",
"Start element: 'sub2' {}",
'Start of CDATA section',
"Character data: 'contents of CDATA section'",
'End of CDATA section',
"End element: 'sub2'",
"External entity ref: (None, 'entity.file', None)",
('Skipped entity', ('skipped_entity', 0)),
"Character data: '\xb5'",
"End element: 'root'",
]
for operation, expected_operation in zip(operations, expected_operations):
self.assertEqual(operation, expected_operation)
def test_parse_bytes(self):
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
self._hookup_callbacks(parser, out)
parser.Parse(data, True)
operations = out.out
self._verify_parse_output(operations)
# Issue #6697.
self.assertRaises(AttributeError, getattr, parser, '\uD800')
def test_parse_str(self):
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
self._hookup_callbacks(parser, out)
parser.Parse(data.decode('iso-8859-1'), True)
operations = out.out
self._verify_parse_output(operations)
def test_parse_file(self):
# Try parsing a file
out = self.Outputter()
parser = expat.ParserCreate(namespace_separator='!')
self._hookup_callbacks(parser, out)
file = BytesIO(data)
parser.ParseFile(file)
operations = out.out
self._verify_parse_output(operations)
def test_parse_again(self):
parser = expat.ParserCreate()
file = BytesIO(data)
parser.ParseFile(file)
# Issue 6676: ensure a meaningful exception is raised when attempting
# to parse more than one XML document per xmlparser instance,
# a limitation of the Expat library.
with self.assertRaises(expat.error) as cm:
parser.ParseFile(file)
self.assertEqual(expat.ErrorString(cm.exception.code),
expat.errors.XML_ERROR_FINISHED)
class NamespaceSeparatorTest(unittest.TestCase):
def test_legal(self):
# Tests that make sure we get errors when the namespace_separator value
# is illegal, and that we don't for good values:
expat.ParserCreate()
expat.ParserCreate(namespace_separator=None)
expat.ParserCreate(namespace_separator=' ')
def test_illegal(self):
try:
expat.ParserCreate(namespace_separator=42)
self.fail()
except TypeError as e:
self.assertEqual(str(e),
"ParserCreate() argument 'namespace_separator' must be str or None, not int")
try:
expat.ParserCreate(namespace_separator='too long')
self.fail()
except ValueError as e:
self.assertEqual(str(e),
'namespace_separator must be at most one character, omitted, or None')
def test_zero_length(self):
# ParserCreate() needs to accept a namespace_separator of zero length
# to satisfy the requirements of RDF applications that are required
# to simply glue together the namespace URI and the localname. Though
# considered a wart of the RDF specifications, it needs to be supported.
#
# See XML-SIG mailing list thread starting with
# http://mail.python.org/pipermail/xml-sig/2001-April/005202.html
#
expat.ParserCreate(namespace_separator='') # too short
class InterningTest(unittest.TestCase):
def test(self):
# Test the interning machinery.
p = expat.ParserCreate()
L = []
def collector(name, *args):
L.append(name)
p.StartElementHandler = collector
p.EndElementHandler = collector
p.Parse(b"<e> <e/> <e></e> </e>", True)
tag = L[0]
self.assertEqual(len(L), 6)
for entry in L:
# L should have the same string repeated over and over.
self.assertTrue(tag is entry)
def test_issue9402(self):
# create an ExternalEntityParserCreate with buffer text
class ExternalOutputter:
def __init__(self, parser):
self.parser = parser
self.parser_result = None
def ExternalEntityRefHandler(self, context, base, sysId, pubId):
external_parser = self.parser.ExternalEntityParserCreate("")
self.parser_result = external_parser.Parse(b"", True)
return 1
parser = expat.ParserCreate(namespace_separator='!')
parser.buffer_text = 1
out = ExternalOutputter(parser)
parser.ExternalEntityRefHandler = out.ExternalEntityRefHandler
parser.Parse(data, True)
self.assertEqual(out.parser_result, 1)
class BufferTextTest(unittest.TestCase):
def setUp(self):
self.stuff = []
self.parser = expat.ParserCreate()
self.parser.buffer_text = 1
self.parser.CharacterDataHandler = self.CharacterDataHandler
def check(self, expected, label):
self.assertEqual(self.stuff, expected,
"%s\nstuff = %r\nexpected = %r"
% (label, self.stuff, map(str, expected)))
def CharacterDataHandler(self, text):
self.stuff.append(text)
def StartElementHandler(self, name, attrs):
self.stuff.append("<%s>" % name)
bt = attrs.get("buffer-text")
if bt == "yes":
self.parser.buffer_text = 1
elif bt == "no":
self.parser.buffer_text = 0
def EndElementHandler(self, name):
self.stuff.append("</%s>" % name)
def CommentHandler(self, data):
self.stuff.append("<!--%s-->" % data)
def setHandlers(self, handlers=[]):
for name in handlers:
setattr(self.parser, name, getattr(self, name))
def test_default_to_disabled(self):
parser = expat.ParserCreate()
self.assertFalse(parser.buffer_text)
def test_buffering_enabled(self):
# Make sure buffering is turned on
self.assertTrue(self.parser.buffer_text)
self.parser.Parse(b"<a>1<b/>2<c/>3</a>", True)
self.assertEqual(self.stuff, ['123'],
"buffered text not properly collapsed")
def test1(self):
# XXX This test exposes more detail of Expat's text chunking than we
# XXX like, but it tests what we need to concisely.
self.setHandlers(["StartElementHandler"])
self.parser.Parse(b"<a>1<b buffer-text='no'/>2\n3<c buffer-text='yes'/>4\n5</a>", True)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "2", "\n", "3", "<c>", "4\n5"],
"buffering control not reacting as expected")
def test2(self):
self.parser.Parse(b"<a>1<b/><2><c/> \n 3</a>", True)
self.assertEqual(self.stuff, ["1<2> \n 3"],
"buffered text not properly collapsed")
def test3(self):
self.setHandlers(["StartElementHandler"])
self.parser.Parse(b"<a>1<b/>2<c/>3</a>", True)
self.assertEqual(self.stuff, ["<a>", "1", "<b>", "2", "<c>", "3"],
"buffered text not properly split")
def test4(self):
self.setHandlers(["StartElementHandler", "EndElementHandler"])
self.parser.CharacterDataHandler = None
self.parser.Parse(b"<a>1<b/>2<c/>3</a>", True)
self.assertEqual(self.stuff,
["<a>", "<b>", "</b>", "<c>", "</c>", "</a>"])
def test5(self):
self.setHandlers(["StartElementHandler", "EndElementHandler"])
self.parser.Parse(b"<a>1<b></b>2<c/>3</a>", True)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3", "</a>"])
def test6(self):
self.setHandlers(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
self.parser.Parse(b"<a>1<b/>2<c></c>345</a> ", True)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "345", "</a>"],
"buffered text not properly split")
def test7(self):
self.setHandlers(["CommentHandler", "EndElementHandler",
"StartElementHandler"])
self.parser.Parse(b"<a>1<b/>2<c></c>3<!--abc-->4<!--def-->5</a> ", True)
self.assertEqual(self.stuff,
["<a>", "1", "<b>", "</b>", "2", "<c>", "</c>", "3",
"<!--abc-->", "4", "<!--def-->", "5", "</a>"],
"buffered text not properly split")
# Test handling of exception from callback:
class HandlerExceptionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
raise RuntimeError(name)
def check_traceback_entry(self, entry, filename, funcname):
self.assertEqual(os.path.basename(entry[0]), filename)
self.assertEqual(entry[2], funcname)
def test_exception(self):
parser = expat.ParserCreate()
parser.StartElementHandler = self.StartElementHandler
try:
parser.Parse(b"<a><b><c/></b></a>", True)
self.fail()
except RuntimeError as e:
self.assertEqual(e.args[0], 'a',
"Expected RuntimeError for element 'a', but" + \
" found %r" % e.args[0])
# Check that the traceback contains the relevant line in pyexpat.c
entries = traceback.extract_tb(e.__traceback__)
self.assertEqual(len(entries), 3)
self.check_traceback_entry(entries[0],
"test_pyexpat.py", "test_exception")
self.check_traceback_entry(entries[1],
"pyexpat.c", "StartElement")
self.check_traceback_entry(entries[2],
"test_pyexpat.py", "StartElementHandler")
if sysconfig.is_python_build() and not (sys.platform == 'win32' and platform.machine() == 'ARM'):
self.assertIn('call_with_frame("StartElement"', entries[1][3])
# Test Current* members:
class PositionTest(unittest.TestCase):
def StartElementHandler(self, name, attrs):
self.check_pos('s')
def EndElementHandler(self, name):
self.check_pos('e')
def check_pos(self, event):
pos = (event,
self.parser.CurrentByteIndex,
self.parser.CurrentLineNumber,
self.parser.CurrentColumnNumber)
self.assertTrue(self.upto < len(self.expected_list),
'too many parser events')
expected = self.expected_list[self.upto]
self.assertEqual(pos, expected,
'Expected position %s, got position %s' %(pos, expected))
self.upto += 1
def test(self):
self.parser = expat.ParserCreate()
self.parser.StartElementHandler = self.StartElementHandler
self.parser.EndElementHandler = self.EndElementHandler
self.upto = 0
self.expected_list = [('s', 0, 1, 0), ('s', 5, 2, 1), ('s', 11, 3, 2),
('e', 15, 3, 6), ('e', 17, 4, 1), ('e', 22, 5, 0)]
xml = b'<a>\n <b>\n <c/>\n </b>\n</a>'
self.parser.Parse(xml, True)
class sf1296433Test(unittest.TestCase):
def test_parse_only_xml_data(self):
# http://python.org/sf/1296433
#
xml = "<?xml version='1.0' encoding='iso8859'?><s>%s</s>" % ('a' * 1025)
# this one doesn't crash
#xml = "<?xml version='1.0'?><s>%s</s>" % ('a' * 10000)
class SpecificException(Exception):
pass
def handler(text):
raise SpecificException
parser = expat.ParserCreate()
parser.CharacterDataHandler = handler
self.assertRaises(Exception, parser.Parse, xml.encode('iso8859'))
class ChardataBufferTest(unittest.TestCase):
"""
test setting of chardata buffer size
"""
def test_1025_bytes(self):
self.assertEqual(self.small_buffer_test(1025), 2)
def test_1000_bytes(self):
self.assertEqual(self.small_buffer_test(1000), 1)
def test_wrong_size(self):
parser = expat.ParserCreate()
parser.buffer_text = 1
with self.assertRaises(ValueError):
parser.buffer_size = -1
with self.assertRaises(ValueError):
parser.buffer_size = 0
with self.assertRaises((ValueError, OverflowError)):
parser.buffer_size = sys.maxsize + 1
with self.assertRaises(TypeError):
parser.buffer_size = 512.0
def test_unchanged_size(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><s>" + b'a' * 512
xml2 = b'a'*512 + b'</s>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_size = 512
parser.buffer_text = 1
# Feed 512 bytes of character data: the handler should be called
# once.
self.n = 0
parser.Parse(xml1)
self.assertEqual(self.n, 1)
# Reassign to buffer_size, but assign the same size.
parser.buffer_size = parser.buffer_size
self.assertEqual(self.n, 1)
# Try parsing rest of the document
parser.Parse(xml2)
self.assertEqual(self.n, 2)
def test_disabling_buffer(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><a>" + b'a' * 512
xml2 = b'b' * 1024
xml3 = b'c' * 1024 + b'</a>';
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 1024
self.assertEqual(parser.buffer_size, 1024)
# Parse one chunk of XML
self.n = 0
parser.Parse(xml1, False)
self.assertEqual(parser.buffer_size, 1024)
self.assertEqual(self.n, 1)
# Turn off buffering and parse the next chunk.
parser.buffer_text = 0
self.assertFalse(parser.buffer_text)
self.assertEqual(parser.buffer_size, 1024)
for i in range(10):
parser.Parse(xml2, False)
self.assertEqual(self.n, 11)
parser.buffer_text = 1
self.assertTrue(parser.buffer_text)
self.assertEqual(parser.buffer_size, 1024)
parser.Parse(xml3, True)
self.assertEqual(self.n, 12)
def counting_handler(self, text):
self.n += 1
def small_buffer_test(self, buffer_len):
xml = b"<?xml version='1.0' encoding='iso8859'?><s>" + b'a' * buffer_len + b'</s>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_size = 1024
parser.buffer_text = 1
self.n = 0
parser.Parse(xml)
return self.n
def test_change_size_1(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><a><s>" + b'a' * 1024
xml2 = b'aaa</s><s>' + b'a' * 1025 + b'</s></a>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 1024
self.assertEqual(parser.buffer_size, 1024)
self.n = 0
parser.Parse(xml1, False)
parser.buffer_size *= 2
self.assertEqual(parser.buffer_size, 2048)
parser.Parse(xml2, True)
self.assertEqual(self.n, 2)
def test_change_size_2(self):
xml1 = b"<?xml version='1.0' encoding='iso8859'?><a>a<s>" + b'a' * 1023
xml2 = b'aaa</s><s>' + b'a' * 1025 + b'</s></a>'
parser = expat.ParserCreate()
parser.CharacterDataHandler = self.counting_handler
parser.buffer_text = 1
parser.buffer_size = 2048
self.assertEqual(parser.buffer_size, 2048)
self.n=0
parser.Parse(xml1, False)
parser.buffer_size = parser.buffer_size // 2
self.assertEqual(parser.buffer_size, 1024)
parser.Parse(xml2, True)
self.assertEqual(self.n, 4)
class MalformedInputTest(unittest.TestCase):
def test1(self):
xml = b"\0\r\n"
parser = expat.ParserCreate()
try:
parser.Parse(xml, True)
self.fail()
except expat.ExpatError as e:
self.assertEqual(str(e), 'unclosed token: line 2, column 0')
def test2(self):
# \xc2\x85 is UTF-8 encoded U+0085 (NEXT LINE)
xml = b"<?xml version\xc2\x85='1.0'?>\r\n"
parser = expat.ParserCreate()
err_pattern = r'XML declaration not well-formed: line 1, column \d+'
with self.assertRaisesRegex(expat.ExpatError, err_pattern):
parser.Parse(xml, True)
class ErrorMessageTest(unittest.TestCase):
def test_codes(self):
# verify mapping of errors.codes and errors.messages
self.assertEqual(errors.XML_ERROR_SYNTAX,
errors.messages[errors.codes[errors.XML_ERROR_SYNTAX]])
def test_expaterror(self):
xml = b'<'
parser = expat.ParserCreate()
try:
parser.Parse(xml, True)
self.fail()
except expat.ExpatError as e:
self.assertEqual(e.code,
errors.codes[errors.XML_ERROR_UNCLOSED_TOKEN])
class ForeignDTDTests(unittest.TestCase):
"""
Tests for the UseForeignDTD method of expat parser objects.
"""
def test_use_foreign_dtd(self):
"""
If UseForeignDTD is passed True and a document without an external
entity reference is parsed, ExternalEntityRefHandler is first called
with None for the public and system ids.
"""
handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse(b"<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
# test UseForeignDTD() is equal to UseForeignDTD(True)
handler_call_args[:] = []
parser = expat.ParserCreate()
parser.UseForeignDTD()
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse(b"<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
def test_ignore_use_foreign_dtd(self):
"""
If UseForeignDTD is passed True and a document with an external
entity reference is parsed, ExternalEntityRefHandler is called with
the public and system ids from the document.
"""
handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse(
b"<?xml version='1.0'?><!DOCTYPE foo PUBLIC 'bar' 'baz'><element/>")
self.assertEqual(handler_call_args, [("bar", "baz")])
if __name__ == "__main__":
unittest.main()
```
#### File: test_c_analyzer/test_variables/test_find.py
```python
import unittest
from .. import tool_imports_for_tests
with tool_imports_for_tests():
from c_analyzer.variables import info
from c_analyzer.variables.find import (
vars_from_binary,
)
class _Base(unittest.TestCase):
maxDiff = None
@property
def calls(self):
try:
return self._calls
except AttributeError:
self._calls = []
return self._calls
class VarsFromBinaryTests(_Base):
_return_iter_vars = ()
_return_get_symbol_resolver = None
def setUp(self):
super().setUp()
self.kwargs = dict(
_iter_vars=self._iter_vars,
_get_symbol_resolver=self._get_symbol_resolver,
)
def _iter_vars(self, binfile, resolve, handle_id):
self.calls.append(('_iter_vars', (binfile, resolve, handle_id)))
return [(v, v.id) for v in self._return_iter_vars]
def _get_symbol_resolver(self, known=None, dirnames=(), *,
handle_var,
filenames=None,
check_filename=None,
perfilecache=None,
):
self.calls.append(('_get_symbol_resolver',
(known, dirnames, handle_var, filenames,
check_filename, perfilecache)))
return self._return_get_symbol_resolver
def test_typical(self):
resolver = self._return_get_symbol_resolver = object()
variables = self._return_iter_vars = [
info.Variable.from_parts('dir1/spam.c', None, 'var1', 'int'),
info.Variable.from_parts('dir1/spam.c', None, 'var2', 'static int'),
info.Variable.from_parts('dir1/spam.c', None, 'var3', 'char *'),
info.Variable.from_parts('dir1/spam.c', 'func2', 'var4', 'const char *'),
info.Variable.from_parts('dir1/eggs.c', None, 'var1', 'static int'),
info.Variable.from_parts('dir1/eggs.c', 'func1', 'var2', 'static char *'),
]
known = object()
filenames = object()
found = list(vars_from_binary('python',
known=known,
filenames=filenames,
**self.kwargs))
self.assertEqual(found, [
info.Variable.from_parts('dir1/spam.c', None, 'var1', 'int'),
info.Variable.from_parts('dir1/spam.c', None, 'var2', 'static int'),
info.Variable.from_parts('dir1/spam.c', None, 'var3', 'char *'),
info.Variable.from_parts('dir1/spam.c', 'func2', 'var4', 'const char *'),
info.Variable.from_parts('dir1/eggs.c', None, 'var1', 'static int'),
info.Variable.from_parts('dir1/eggs.c', 'func1', 'var2', 'static char *'),
])
self.assertEqual(self.calls, [
('_get_symbol_resolver', (filenames, known, info.Variable.from_id, None, None, {})),
('_iter_vars', ('python', resolver, None)),
])
# self._return_iter_symbols = [
# s_info.Symbol(('dir1/spam.c', None, 'var1'), 'variable', False),
# s_info.Symbol(('dir1/spam.c', None, 'var2'), 'variable', False),
# s_info.Symbol(('dir1/spam.c', None, 'func1'), 'function', False),
# s_info.Symbol(('dir1/spam.c', None, 'func2'), 'function', True),
# s_info.Symbol(('dir1/spam.c', None, 'var3'), 'variable', False),
# s_info.Symbol(('dir1/spam.c', 'func2', 'var4'), 'variable', False),
# s_info.Symbol(('dir1/ham.c', None, 'var1'), 'variable', True),
# s_info.Symbol(('dir1/eggs.c', None, 'var1'), 'variable', False),
# s_info.Symbol(('dir1/eggs.c', None, 'xyz'), 'other', False),
# s_info.Symbol(('dir1/eggs.c', '???', 'var2'), 'variable', False),
# s_info.Symbol(('???', None, 'var_x'), 'variable', False),
# s_info.Symbol(('???', '???', 'var_y'), 'variable', False),
# s_info.Symbol((None, None, '???'), 'other', False),
# ]
# known = object()
#
# vars_from_binary('python', knownvars=known, **this.kwargs)
# found = list(globals_from_symbols(['dir1'], self.iter_symbols))
#
# self.assertEqual(found, [
# info.Variable.from_parts('dir1/spam.c', None, 'var1', '???'),
# info.Variable.from_parts('dir1/spam.c', None, 'var2', '???'),
# info.Variable.from_parts('dir1/spam.c', None, 'var3', '???'),
# info.Variable.from_parts('dir1/spam.c', 'func2', 'var4', '???'),
# info.Variable.from_parts('dir1/eggs.c', None, 'var1', '???'),
# ])
# self.assertEqual(self.calls, [
# ('iter_symbols', (['dir1'],)),
# ])
#
# def test_no_symbols(self):
# self._return_iter_symbols = []
#
# found = list(globals_from_symbols(['dir1'], self.iter_symbols))
#
# self.assertEqual(found, [])
# self.assertEqual(self.calls, [
# ('iter_symbols', (['dir1'],)),
# ])
# XXX need functional test
```
#### File: test/test_tools/test_pathfix.py
```python
import os
import subprocess
import sys
import unittest
from test import support
from test.test_tools import scriptsdir, skip_if_missing
# need Tools/script/ directory: skip if run on Python installed on the system
skip_if_missing()
class TestPathfixFunctional(unittest.TestCase):
script = os.path.join(scriptsdir, 'pathfix.py')
def setUp(self):
self.addCleanup(support.unlink, support.TESTFN)
def pathfix(self, shebang, pathfix_flags, exitcode=0, stdout='', stderr='',
directory=''):
if directory:
# bpo-38347: Test filename should contain lowercase, uppercase,
# "-", "_" and digits.
filename = os.path.join(directory, 'script-A_1.py')
pathfix_arg = directory
else:
filename = support.TESTFN
pathfix_arg = filename
with open(filename, 'w', encoding='utf8') as f:
f.write(f'{shebang}\n' + 'print("Hello world")\n')
encoding = sys.getfilesystemencoding()
proc = subprocess.run(
[sys.executable, self.script,
*pathfix_flags, '-n', pathfix_arg],
env={**os.environ, 'PYTHONIOENCODING': encoding},
capture_output=True)
if stdout == '' and proc.returncode == 0:
stdout = f'{filename}: updating\n'
self.assertEqual(proc.returncode, exitcode, proc)
self.assertEqual(proc.stdout.decode(encoding), stdout.replace('\n', os.linesep), proc)
self.assertEqual(proc.stderr.decode(encoding), stderr.replace('\n', os.linesep), proc)
with open(filename, 'r', encoding='utf8') as f:
output = f.read()
lines = output.split('\n')
self.assertEqual(lines[1:], ['print("Hello world")', ''])
new_shebang = lines[0]
if proc.returncode != 0:
self.assertEqual(shebang, new_shebang)
return new_shebang
def test_recursive(self):
tmpdir = support.TESTFN + '.d'
self.addCleanup(support.rmtree, tmpdir)
os.mkdir(tmpdir)
expected_stderr = f"recursedown('{os.path.basename(tmpdir)}')\n"
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python',
['-i', '/usr/bin/python3'],
directory=tmpdir,
stderr=expected_stderr),
'#! /usr/bin/python3')
def test_pathfix(self):
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python',
['-i', '/usr/bin/python3']),
'#! /usr/bin/python3')
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python -R',
['-i', '/usr/bin/python3']),
'#! /usr/bin/python3')
def test_pathfix_keeping_flags(self):
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python -R',
['-i', '/usr/bin/python3', '-k']),
'#! /usr/bin/python3 -R')
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python',
['-i', '/usr/bin/python3', '-k']),
'#! /usr/bin/python3')
def test_pathfix_adding_flag(self):
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python',
['-i', '/usr/bin/python3', '-a', 's']),
'#! /usr/bin/python3 -s')
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python -S',
['-i', '/usr/bin/python3', '-a', 's']),
'#! /usr/bin/python3 -s')
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python -V',
['-i', '/usr/bin/python3', '-a', 'v', '-k']),
'#! /usr/bin/python3 -vV')
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python',
['-i', '/usr/bin/python3', '-a', 'Rs']),
'#! /usr/bin/python3 -Rs')
self.assertEqual(
self.pathfix(
'#! /usr/bin/env python -W default',
['-i', '/usr/bin/python3', '-a', 's', '-k']),
'#! /usr/bin/python3 -sW default')
def test_pathfix_adding_errors(self):
self.pathfix(
'#! /usr/bin/env python -E',
['-i', '/usr/bin/python3', '-a', 'W default', '-k'],
exitcode=2,
stderr="-a option doesn't support whitespaces")
if __name__ == '__main__':
unittest.main()
```
#### File: test_zoneinfo/data/update_test_data.py
```python
from __future__ import annotations
import base64
import functools
import json
import lzma
import pathlib
import textwrap
import typing
import zoneinfo
KEYS = [
"Africa/Abidjan",
"Africa/Casablanca",
"America/Los_Angeles",
"America/Santiago",
"Asia/Tokyo",
"Australia/Sydney",
"Europe/Dublin",
"Europe/Lisbon",
"Europe/London",
"Pacific/Kiritimati",
"UTC",
]
TEST_DATA_LOC = pathlib.Path(__file__).parent
@functools.lru_cache(maxsize=None)
def get_zoneinfo_path() -> pathlib.Path:
"""Get the first zoneinfo directory on TZPATH containing the "UTC" zone."""
key = "UTC"
for path in map(pathlib.Path, zoneinfo.TZPATH):
if (path / key).exists():
return path
else:
raise OSError("Cannot find time zone data.")
def get_zoneinfo_metadata() -> typing.Dict[str, str]:
path = get_zoneinfo_path()
tzdata_zi = path / "tzdata.zi"
if not tzdata_zi.exists():
# tzdata.zi is necessary to get the version information
raise OSError("Time zone data does not include tzdata.zi.")
with open(tzdata_zi, "r") as f:
version_line = next(f)
_, version = version_line.strip().rsplit(" ", 1)
if (
not version[0:4].isdigit()
or len(version) < 5
or not version[4:].isalpha()
):
raise ValueError(
"Version string should be YYYYx, "
+ "where YYYY is the year and x is a letter; "
+ f"found: {version}"
)
return {"version": version}
def get_zoneinfo(key: str) -> bytes:
path = get_zoneinfo_path()
with open(path / key, "rb") as f:
return f.read()
def encode_compressed(data: bytes) -> typing.List[str]:
compressed_zone = lzma.compress(data)
raw = base64.b85encode(compressed_zone)
raw_data_str = raw.decode("utf-8")
data_str = textwrap.wrap(raw_data_str, width=70)
return data_str
def load_compressed_keys() -> typing.Dict[str, typing.List[str]]:
output = {key: encode_compressed(get_zoneinfo(key)) for key in KEYS}
return output
def update_test_data(fname: str = "zoneinfo_data.json") -> None:
TEST_DATA_LOC.mkdir(exist_ok=True, parents=True)
# Annotation required: https://github.com/python/mypy/issues/8772
json_kwargs: typing.Dict[str, typing.Any] = dict(
indent=2, sort_keys=True,
)
compressed_keys = load_compressed_keys()
metadata = get_zoneinfo_metadata()
output = {
"metadata": metadata,
"data": compressed_keys,
}
with open(TEST_DATA_LOC / fname, "w") as f:
json.dump(output, f, **json_kwargs)
if __name__ == "__main__":
update_test_data()
```
#### File: pyExpandObjects/src/main.py
```python
import argparse
import os
import pathlib
import re
from hvac_template import HVACTemplate
from epjson_handler import EPJSON
import logging
import json
def get_property(prop):
"""
Get property value from __init__.py file in src directory
:param prop: Property name
:return: Return value for a given property
"""
try:
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop),
open(os.path.join(os.path.dirname(__file__), '__init__.py')).read())
output = result.group(1)
except AttributeError:
output = '{} could not be found'.format(prop)
return output
def build_parser(): # pragma: no cover
"""
Build argument parser.
"""
parser = argparse.ArgumentParser(
prog='pyExpandObjects',
description='Automated process that expands HVACTemplate objects into regular EnergyPlus objects.')
parser.add_argument(
'--no-schema',
'-ns',
action='store_true',
help='Skip schema validations')
parser.add_argument(
"--file",
'-f',
nargs='?',
help='Path of epJSON file to convert'
)
parser.add_argument(
'--output_directory',
'-o',
nargs='?',
help='Specify output directory. If not provided, then input file directory is used'
)
parser.add_argument(
'--no_backup',
'-nb',
action='store_true',
help='Prevent backup files from being written'
)
parser.add_argument(
'--logger_level',
'-l',
nargs='?',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='WARNING',
help='Specify logger level.'
)
parser.add_argument(
'--version',
'-v',
action='store_true',
help='Display version information')
parser.add_argument(
'--write_logs',
'-wl',
action='store_true',
help='Write logs to file')
return parser
def output_preprocessor_message_formatter(output_stream):
messages = {}
for line in output_stream.split('\n'):
counter = len(messages.keys()) + 1
messages.update({
'Output:PreprocessorMessage {}'.format(str(counter)): {
'preprocessor_name': 'pyExpandObjects'
}
})
if line.startswith('Error:'):
messages['Output:PreprocessorMessage {}'.format(str(counter))]['error_severity'] = 'Severe'
elif line.startswith('Warning:'):
messages['Output:PreprocessorMessage {}'.format(str(counter))]['error_severity'] = 'Warning'
else:
messages['Output:PreprocessorMessage {}'.format(str(counter))]['error_severity'] = 'Information'
words = line.split()
word_groups = [words[i:i + 10] for i in range(0, len(words), 10)]
word_group_counter = 1
for wg in word_groups:
messages['Output:PreprocessorMessage {}'.format(str(counter))][
'message_line_{}'.format(word_group_counter)] = ' '.join(wg)
word_group_counter += 1
if not messages['Output:PreprocessorMessage {}'.format(str(counter))].get('message_line_1'):
messages.pop('Output:PreprocessorMessage {}'.format(str(counter)))
return messages
def main(args=None):
if hasattr(args, 'version') and args.version:
version = get_property('__version__')
print('pyExpandObjects Version: {}'.format(version))
return
# set the arg defaults for testing when Namespace is used
if not hasattr(args, 'logger_level'):
args.logger_level = 'WARNING'
if not hasattr(args, 'no_backup'):
args.no_backup = False
if not hasattr(args, 'no_schema'):
args.no_schema = False
if getattr(args, 'write_logs', None):
logger_name = 'expand_objects_logger'
else:
logger_name = 'console_only_logger'
hvt = HVACTemplate(
no_schema=args.no_schema,
logger_level=args.logger_level,
logger_name=logger_name)
if isinstance(args.file, str):
file_suffix_check = args.file.endswith('.epJSON')
elif isinstance(args.file, (pathlib.PosixPath, pathlib.WindowsPath)):
file_suffix_check = args.file.suffix == '.epJSON'
else:
hvt.logger.error('Error: Invalid input file reference')
return
# get or set output directory
if hasattr(args, 'output_directory') and args.output_directory:
if not os.path.exists(args.output_directory):
hvt.logger.error('Specified output directory %s does not exist. '
'Files will be written to default directory %s.',
args.output_directory,
os.path.dirname(os.path.abspath(args.file)))
output_directory = os.path.dirname(os.path.abspath(args.file))
else:
output_directory = args.output_directory
else:
output_directory = os.path.dirname(os.path.abspath(args.file))
# start blank dictionary for processing
output = {
'epJSON': {},
'Output:PreprocessorMessage': ''}
if file_suffix_check:
# write output and keep list of written files
output_file_dictionary = {}
# create file names and raise error if modified name is the same as the base name
input_file_name = os.path.basename(args.file)
expanded_file_name = input_file_name.replace('.epJSON', '_expanded.epJSON')
hvac_templates_file_name = input_file_name.replace('.epJSON', '_hvac_templates.epJSON') \
if not args.no_backup else None
base_file_name = input_file_name.replace('.epJSON', '_base.epJSON') \
if not args.no_backup else None
# check that file names are not the same as the original
if input_file_name in [expanded_file_name, hvac_templates_file_name, base_file_name]:
hvt.logger.error('Error: file could not be renamed') # pragma: no cover - unlikely to be hit
return
if os.path.exists(args.file):
hvt.logger.info('Processing %s', args.file)
# QA skipped since any unanticipated condition should still get caught and returned to user.
try:
output.update(hvt.run(input_epjson=args.file))
output_file_dictionary['expanded'] = os.path.join(output_directory, str(expanded_file_name))
except: # noqa: E722
output.update({'Output:PreprocessorMessage': hvt.stream.getvalue()})
if output.get('epJSON'):
# verify expanded epJSON is valid if schema validation is turned on.
if not args.no_schema:
ej = EPJSON(
no_schema=False,
logger_level=args.logger_level,
logger_name=logger_name)
try:
ej.epjson_process(epjson_ref=output['epJSON'])
except: # noqa: E722
output['Output:PreprocessorMessage'] = '\n'.join([
output['Output:PreprocessorMessage'],
'Error: Output epJSON schema validation failed. See output files for details.\n',
ej.stream.getvalue()])
if not args.no_backup and output.get('epJSON_hvac_templates'):
with open(os.path.join(output_directory, hvac_templates_file_name), 'w') as hvac_template_file:
json.dump(output['epJSON_hvac_templates'], hvac_template_file, indent=4, sort_keys=True)
output_file_dictionary['hvac_templates'] = \
os.path.join(output_directory, str(hvac_templates_file_name))
if not args.no_backup and output.get('epJSON_base'):
with open(os.path.join(output_directory, base_file_name), 'w') as base_file:
json.dump(output['epJSON_base'], base_file, indent=4, sort_keys=True)
output_file_dictionary['base'] = os.path.join(output_directory, str(base_file_name))
if output_file_dictionary and output['epJSON']:
output_file_dictionary['expanded'] = os.path.join(output_directory, str(expanded_file_name))
hvt.logger.info('Output files written %s', output_file_dictionary)
else:
output['Output:PreprocessorMessage'] = '\n'.join([
output['Output:PreprocessorMessage'],
'Error: No expanded epJSON object created, check Output:Preprocessor object at {} for details'
.format(os.path.join(output_directory, str(expanded_file_name)))])
hvt.logger.error('Error: No expanded epJSON object created, check '
'Output:Preprocessor object at %s for details',
os.path.join(output_directory, str(expanded_file_name)))
output['output_files'] = output_file_dictionary
else:
output['Output:PreprocessorMessage'] = r'\n'.join([
output['Output:PreprocessorMessage'],
'Error: File does not exist: {}. File not processed'.format(args.file)])
hvt.logger.error('Error: File does not exist: %s. File not processed', args.file)
output['epJSON']['Output:PreprocessorMessage'] = \
output_preprocessor_message_formatter(output['Output:PreprocessorMessage'])
# Write out epJSON file.
with open(os.path.join(output_directory, expanded_file_name), 'w') as expanded_file:
json.dump(output['epJSON'], expanded_file, indent=4, sort_keys=True)
# write out successful file creation to base preprocessor object
if output_file_dictionary and output['epJSON']:
output['Output:PreprocessorMessage'] = '\n'.join([
output['Output:PreprocessorMessage'],
'Output files written: {}'.format(output_file_dictionary)])
else:
output['Output:PreprocessorMessage'] = r'\n'.join([
output['Output:PreprocessorMessage'],
'Error: Bad file extension for {}. File not processed'.format(args.file)])
hvt.logger.error('Error: Bad file extension for %s. File not processed', args.file)
return output
if __name__ == "__main__":
epJSON_parser = build_parser()
epJSON_args, unknown_args = epJSON_parser.parse_known_args()
# If unknown arguments are passed, and no file specified, then put the arguments
# in the file namespace.
if not epJSON_args.file and unknown_args:
epJSON_args.file = unknown_args[0]
main(epJSON_args)
logging.shutdown()
```
#### File: pyExpandObjects/tests/build_test_output.py
```python
import os
import pandas as pd
base_project_path = os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
)
def make_table(df):
html_tables = {}
df[['DocSection', 'DocText']] = df["DocText"].str.rsplit(":", 1, expand=True)
for section, sub_df in df.groupby(['DocSection']):
sub_df.drop(['DocSection'], axis=1, inplace=True)
html_text = ''
html_text += '<h4>' + section.replace(":", " : ") + "</h4>"
sub_df.rename(columns={
"DocText": "Test",
"TimeStamp": "Last Check",
"FunctionStatus": "Status"
}, inplace=True)
html_text += sub_df.to_html(index=False) + '\n'
html_tables[section.strip()] = html_text
return html_tables
def main():
df = pd.read_csv(
os.path.join(base_project_path, "logs", "test.log"),
header=None,
names=["TimeStamp", "DocText", "FileName", "FunctionName", "FunctionStatus"])
# Get latest function return
df = df\
.sort_values(['TimeStamp'], ascending=True)\
.groupby(['DocText', 'FileName', 'FunctionName'])\
.last()\
.reset_index()
html_text = make_table(df)
sections = sorted([i for i in html_text.keys()])
# Push the general section to the top
if "General" in sections:
sections.remove("General")
sections.insert(0, "General")
# create html file and save to docs static folder.
# write out all testing sections
with open(os.path.join(base_project_path, "docs", "_static", "testing_output.html"), 'w') as f:
f.write("""
<!DOCTYPE html>
<html>
<head>
<title>Unittest Results</title>
""")
with open(os.path.join(base_project_path, "docs", "_static", "testing_base.html"), 'r') as f2:
notes_data = f2.read()
f.write(notes_data)
for section in sections:
if not section.startswith('Simulation'):
f.write(html_text[section])
f.write("""
</head>
<body>
""")
with open(os.path.join(base_project_path, "docs", "_static", "simulation_output.html"), 'w') as f:
# write out all full simulation sections
f.write("""
<!DOCTYPE html>
<html>
<head>
<title>Simulation Test Results</title>
""")
with open(os.path.join(base_project_path, "docs", "_static", "testing_base.html"), 'r') as f2:
notes_data = f2.read()
f.write(notes_data)
for section in sections:
if section.startswith('Simulation'):
f.write(html_text[section])
f.write("""
</head>
<body>
""")
return
if __name__ == "__main__":
main()
```
#### File: simulations/plant_equipment/test_plant_equipment_tower_object_reference.py
```python
from pathlib import Path
from tests.simulations import BaseSimulationTest
from src.epjson_handler import EPJSON
test_dir = Path(__file__).parent.parent.parent
mixed_water_objects = {
'HVACTemplate:Plant:Boiler:ObjectReference': {
'Second Boiler Connections': {
"boiler_object_type": "Boiler:HotWater",
"boiler_name": "Heat Pump Loop Boiler",
"priority": 1,
'template_plant_loop_type': 'MixedWater'
}
},
'Curve:Quadratic': {
"Heat Pump Loop Boiler Efficiency Curve": {
"coefficient1_constant": 0.97,
"coefficient2_x": 0.0633,
"coefficient3_x_2": -0.0333,
"maximum_value_of_x": 1.0,
"minimum_value_of_x": 0.0
}
},
"Boiler:HotWater": {
"Heat Pump Loop Boiler": {
"boiler_flow_mode": "ConstantFlow",
"boiler_water_inlet_node_name": "Heat Pump Loop Boiler HW Inlet",
"boiler_water_outlet_node_name": "Heat Pump Loop Boiler HW Outlet",
"design_water_flow_rate": "Autosize",
"efficiency_curve_temperature_evaluation_variable": "LeavingBoiler",
"fuel_type": "NaturalGas",
"maximum_part_load_ratio": 1.1,
"minimum_part_load_ratio": 0.0,
"nominal_capacity": "Autosize",
"nominal_thermal_efficiency": 0.8,
"normalized_boiler_efficiency_curve_name": "Heat Pump Loop Boiler Efficiency Curve",
"optimum_part_load_ratio": 1,
"parasitic_electric_load": 0,
"sizing_factor": 1,
"water_outlet_upper_temperature_limit": 100
}
},
"HVACTemplate:Plant:MixedWaterLoop": {
"Heat Pump Water Loop": {
"demand_side_bypass_pipe": "Yes",
"fluid_type": "Water",
"high_temperature_design_setpoint": 33,
"load_distribution_scheme": "SequentialLoad",
"loop_design_delta_temperature": 5.6,
"low_temperature_design_setpoint": 20,
"operation_scheme_type": "Default",
"pump_control_type": "Intermittent",
"supply_side_bypass_pipe": "Yes",
"water_pump_configuration": "ConstantFlow",
"water_pump_rated_head": 179352,
"water_pump_type": "SinglePump"
}
},
"HVACTemplate:Plant:Tower": {
"Heat Pump Loop Tower": {
"free_convection_capacity": "Autosize",
"high_speed_fan_power": "Autosize",
"high_speed_nominal_capacity": "Autosize",
"low_speed_fan_power": "Autosize",
"low_speed_nominal_capacity": "Autosize",
"sizing_factor": 1,
"template_plant_loop_type": "MixedWater",
"tower_type": "TwoSpeed"
}
},
"HVACTemplate:System:UnitarySystem": {
"Sys 4 Heat Pump WaterSource SnglSpd": {
"control_type": "Load",
"control_zone_or_thermostat_location_name": "SPACE4-1",
"cooling_coil_type": "SingleSpeedDXWaterCooled",
"cooling_design_supply_air_temperature": 12.8,
"cooling_supply_air_flow_rate": "Autosize",
"dehumidification_control_type": "None",
"dehumidification_relative_humidity_setpoint": 60,
"dx_cooling_coil_gross_rated_cop": 3,
"dx_cooling_coil_gross_rated_sensible_heat_ratio": "Autosize",
"dx_cooling_coil_gross_rated_total_capacity": "Autosize",
"economizer_lockout": "LockoutWithCompressor",
"economizer_maximum_limit_dry_bulb_temperature": 20,
"economizer_type": "DifferentialDryBulb",
"gas_heating_coil_efficiency": 0.8,
"heat_pump_defrost_maximum_outdoor_dry_bulb_temperature": 5,
"heat_pump_heating_coil_gross_rated_cop": 2.75,
"heat_pump_heating_minimum_outdoor_dry_bulb_temperature": -8,
"heat_recovery_frost_control_type": "None",
"heat_recovery_heat_exchanger_type": "Plate",
"heat_recovery_type": "None",
"heating_coil_gross_rated_capacity": "Autosize",
"heating_coil_type": "SingleSpeedDXHeatPumpWaterSource",
"heating_design_supply_air_temperature": 50,
"heating_supply_air_flow_rate": "Autosize",
"humidifier_rated_capacity": 1e-06,
"humidifier_rated_electric_power": "Autosize",
"humidifier_relative_humidity_setpoint": 30,
"humidifier_type": "None",
"latent_heat_recovery_effectiveness": 0.65,
"maximum_outdoor_air_flow_rate": "Autosize",
"minimum_outdoor_air_flow_rate": "Autosize",
"minimum_outdoor_air_schedule_name": "Min OA Sched",
"no_load_supply_air_flow_rate": "Autosize",
"number_of_speeds_for_cooling": 1,
"number_of_speeds_or_stages_for_heating": 1,
"return_fan": "No",
"return_fan_delta_pressure": 300,
"return_fan_motor_efficiency": 0.9,
"return_fan_motor_in_air_stream_fraction": 1,
"return_fan_total_efficiency": 0.7,
"sensible_heat_recovery_effectiveness": 0.7,
"sizing_option": "NonCoincident",
"supplemental_gas_heating_or_reheat_coil_efficiency": 0.8,
"supplemental_heating_or_reheat_coil_capacity": "Autosize",
"supplemental_heating_or_reheat_coil_maximum_outdoor_dry_bulb_temperature": 21,
"supplemental_heating_or_reheat_coil_type": "Electric",
"supply_fan_delta_pressure": 600,
"supply_fan_motor_efficiency": 0.9,
"supply_fan_motor_in_air_stream_fraction": 1,
"supply_fan_operating_mode_schedule_name": "FanAvailSched",
"supply_fan_placement": "BlowThrough",
"supply_fan_total_efficiency": 0.7
}
},
"HVACTemplate:Zone:Unitary": {
"HVACTemplate:Zone:Unitary 4": {
"baseboard_heating_capacity": "Autosize",
"baseboard_heating_type": "None",
"outdoor_air_flow_rate_per_person": 0.00944,
"outdoor_air_flow_rate_per_zone": 0.0,
"outdoor_air_flow_rate_per_zone_floor_area": 0.0,
"outdoor_air_method": "Flow/Person",
"supply_air_maximum_flow_rate": "Autosize",
"template_thermostat_name": "All Zones",
"template_unitary_system_name": "Sys 4 Heat Pump WaterSource SnglSpd",
"zone_cooling_design_supply_air_temperature_input_method": "SystemSupplyAirTemperature",
"zone_heating_design_supply_air_temperature_input_method": "SystemSupplyAirTemperature",
"zone_name": "SPACE4-1"
}
}
}
class TestSimulationsPlantEquipmentTowerObjectReference(BaseSimulationTest):
def setUp(self):
self.ej = EPJSON()
base_idf_file_path = test_dir.joinpath('..', 'simulation', 'ExampleFiles',
'HVACTemplate-5ZoneVAVWaterCooled-ObjectReference.idf')
base_copy_file_path = self._copy_to_test_directory(base_idf_file_path)
# read in base file, then edit inputs for alternate tests
self.base_epjson = self.get_epjson_object_from_idf_file(base_copy_file_path)
return
def teardown(self):
return
@BaseSimulationTest._test_logger(doc_text="Simulation:PlantEquipment:Tower:ObjectReference:test_minimum_inputs")
def test_minimum_inputs(self):
# todo_eo: legacy fails with IDD message without 'priority', but it is not a required field
self.base_epjson['HVACTemplate:Plant:Tower:ObjectReference'].pop('Main Tower Connection')
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary={
'HVACTemplate:Plant:Tower:ObjectReference': {
'Main Tower Connection': {
'cooling_tower_name': 'Main Tower',
'priority': 1
}
}
}
)
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
return
@BaseSimulationTest._test_logger(doc_text="Simulation:PlantEquipment:Tower:ObjectReference:priority")
def test_priority(self):
# todo_eo: discuss with team that priority requires a string and not integer.
self.base_epjson['HVACTemplate:Plant:Tower:ObjectReference']['Main Tower Connection']['priority'] = 2
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary={
"HVACTemplate:Plant:Tower:ObjectReference": {
"Second Tower Connection": {
"cooling_tower_name": "Second Tower",
"cooling_tower_object_type": "CoolingTower:SingleSpeed",
"priority": 1
}
},
"CoolingTower:SingleSpeed": {
"Second Tower": {
"blowdown_calculation_mode": "ConcentrationRatio",
"blowdown_concentration_ratio": 3,
"capacity_control": "FanCycling",
"design_air_flow_rate": "Autosize",
"design_fan_power": "Autosize",
"design_u_factor_times_area_value": "Autosize",
"design_water_flow_rate": "Autosize",
"drift_loss_percent": 0.008,
"evaporation_loss_mode": "SaturatedExit",
"free_convection_regime_air_flow_rate": "Autocalculate",
"free_convection_regime_u_factor_times_area_value": "Autocalculate",
"outdoor_air_inlet_node_name": "Second Tower Cooling Tower Outdoor Air Inlet Node",
"performance_input_method": "UFactorTimesAreaAndDesignWaterFlowRate",
"sizing_factor": 1.0,
"water_inlet_node_name": "Second Tower CndW Inlet",
"water_outlet_node_name": "Second Tower CndW Outlet"
}
},
"OutdoorAir:Node": {
"Second Tower Cooling Tower Outdoor Air Inlet Node": {
"height_above_ground": -1
}
}
}
)
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Second Tower',
epjson_output['CondenserEquipmentList']['Condenser Water Loop All Equipment']['equipment'][0][
'equipment_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:PlantEquipment:Tower:ObjectReference:"
"template_plant_loop_type")
def test_template_plant_loop_type(self):
# todo_eo: legacy fails if template_plant_loop_type not explicity set
self.base_epjson['HVACTemplate:Plant:Boiler:ObjectReference']['Main Boiler Connection']['priority'] = 2
self.base_epjson['HVACTemplate:Plant:Boiler:ObjectReference']['Main Boiler Connection'][
'template_plant_loop_type'] = 'HotWater'
self.base_epjson['HVACTemplate:Plant:Tower:ObjectReference']['Main Tower Connection'][
'template_plant_loop_type'] = 'ChilledWater'
self.base_epjson['HVACTemplate:Zone:VAV'].pop('HVACTemplate:Zone:VAV 4')
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=mixed_water_objects)
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Heat Pump Loop Tower',
epjson_output['PlantEquipmentList']['Heat Pump Water Loop Cooling All Equipment']['equipment'][0][
'equipment_name']
)
return
```
#### File: simulations/system/test_system_unitary_system.py
```python
from pathlib import Path
from tests.simulations import BaseSimulationTest
from src.epjson_handler import EPJSON
test_dir = Path(__file__).parent.parent.parent
schedule_objects = {
"Schedule:Compact": {
"Always0.8": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 0.8
}
],
"schedule_type_limits_name": "Any Number"
},
"Always62": {
"data": [
{
"field": "Through: 12/31"
},
{
"field": "For: AllDays"
},
{
"field": "Until: 24:00"
},
{
"field": 62.0
}
],
"schedule_type_limits_name": "Any Number"
}
}
}
class TestSimulationsSystemUnitarySystem(BaseSimulationTest):
def setUp(self):
self.ej = EPJSON()
base_idf_file_path = test_dir.joinpath('..', 'simulation', 'ExampleFiles',
'HVACTemplate-5ZoneUnitarySystem.idf')
base_copy_file_path = self._copy_to_test_directory(base_idf_file_path)
# read in base file, then edit inputs for alternate tests
self.base_epjson = self.get_epjson_object_from_idf_file(base_copy_file_path)
# todo_eo: errors output in legaacy unless non-default system is set to single speed dx
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 2 Furnace DX Cool MultiSpd'][
'cooling_coil_type'] = 'SingleSpeedDX'
self.base_epjson.pop('Output:Variable')
return
def teardown(self):
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:Unitary:minimum_inputs")
def test_minimum_inputs(self):
self.base_epjson['HVACTemplate:Zone:Unitary']['HVACTemplate:Zone:Unitary 1'][
'zone_cooling_design_supply_air_temperature_input_method'] = 'SupplyAirTemperature'
self.base_epjson['HVACTemplate:Zone:Unitary']['HVACTemplate:Zone:Unitary 1'][
'zone_heating_design_supply_air_temperature_input_method'] = 'SupplyAirTemperature'
self.base_epjson['HVACTemplate:System:UnitarySystem'].pop('Sys 1 Furnace DX Cool SnglSpd')
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary={
'HVACTemplate:System:UnitarySystem': {
'Sys 1 Furnace DX Cool SnglSpd': {
'control_zone_or_thermostat_location_name': 'SPACE1-1',
'supplemental_heating_or_reheat_coil_type': 'Electric'
}
}
}
)
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:system_availability_schedule_name")
def test_system_availability_schedule_name(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'system_availability_schedule_name'] = 'OCCUPY-1'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'night_cycle_control'] = 'CycleOnAny'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Fan:OnOff']['Sys 1 Furnace DX Cool SnglSpd Supply Fan']['availability_schedule_name'])
self.assertEqual(
'OCCUPY-1',
epjson_output['AvailabilityManager:NightCycle']['Sys 1 Furnace DX Cool SnglSpd Availability'][
'fan_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"control_type_load")
def test_control_type_load(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'control_type'] = 'Load'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Load',
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"control_type_set_point")
def test_control_type_set_point(self):
# todo_eo: supply fan operating mode schedule must be constant or
# else error is issues, which happens in legacy
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'control_type'] = 'SetPoint'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'SetPoint',
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:cooling_supplY_air_flow_rate")
def test_cooling_supply_air_flow_rate(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_supply_air_flow_rate'] = 1.01
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1.01,
epjson_output['Sizing:System']['Sys 1 Furnace DX Cool SnglSpd Sizing System'][
'cooling_supply_air_flow_rate'])
self.assertEqual(
1.01,
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'cooling_supply_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:heating_supplY_air_flow_rate")
def test_heating_supply_air_flow_rate(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_supply_air_flow_rate'] = 1.01
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1.01,
epjson_output['Sizing:System']['Sys 1 Furnace DX Cool SnglSpd Sizing System'][
'heating_supply_air_flow_rate'])
self.assertEqual(
1.01,
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'heating_supply_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:no_load_supplY_air_flow_rate")
def test_no_load_supply_air_flow_rate(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'no_load_supply_air_flow_rate'] = 1.01
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1.01,
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'no_load_supply_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supply_fan_operating_mode_schedule_name")
def test_supply_fan_operating_mode_schedule_name(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supply_fan_operating_mode_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'supply_air_fan_operating_mode_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supply_fan_placement_blow_through")
def test_supply_fan_placement_blow_through(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supply_fan_placement'] = 'BlowThrough'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'BlowThrough',
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'fan_placement'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supply_fan_placement_draw_through")
def test_supply_fan_placement_draw_through(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supply_fan_placement'] = 'DrawThrough'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DrawThrough',
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'fan_placement'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:supply_fan_total_efficiency")
def test_supply_fan_total_efficiency(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supply_fan_total_efficiency'] = 0.65
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.65,
epjson_output['Fan:OnOff']['Sys 1 Furnace DX Cool SnglSpd Supply Fan']['fan_total_efficiency'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:supply_fan_delta_pressure")
def test_supply_fan_delta_pressure(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supply_fan_delta_pressure'] = 500
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
500,
epjson_output['Fan:OnOff']['Sys 1 Furnace DX Cool SnglSpd Supply Fan']['pressure_rise'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:supply_fan_motor_efficiency")
def test_supply_fan_motor_efficiency(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supply_fan_motor_efficiency'] = 0.8
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.8,
epjson_output['Fan:OnOff']['Sys 1 Furnace DX Cool SnglSpd Supply Fan']['motor_efficiency'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supply_fan_motor_in_air_stream_fraction")
def test_supply_fan_motor_in_air_stream_fraction(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supply_fan_motor_in_air_stream_fraction'] = 0.9
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.9,
epjson_output['Fan:OnOff']['Sys 1 Furnace DX Cool SnglSpd Supply Fan']['motor_in_airstream_fraction'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_single_speed_dx")
def test_cooling_coil_type_single_speed_dx(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'SingleSpeedDX'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:DX:SingleSpeed'].get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_two_speed_dx")
def test_cooling_coil_type_two_speed_dx(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'TwoSpeedDX'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:DX:TwoSpeed'].get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_multi_speed_dx")
def test_cooling_coil_type_multi_speed_dx(self):
# todo_eo: EO fails with input error on value type
# [Coil:Cooling:DX:MultiSpeed][Sys 1 Furnace DX Cool SnglSpd Cooling Coil]
# [speed_2_evaporative_condenser_effectiveness] - Value type "string" for input
# "Dimensionless" not permitted by \'type\' constraint.\r\n<root>[Coil:Cooling:DX:MultiSpeed]
# [Sys 1 Furnace DX Cool SnglSpd Cooling Coil][speed_2_gross_rated_sensible_heat_ratio] -
# Value type "string" for input "Sys 1 Furnace DX Cool SnglSpd Cool Coil Cap-FT" not permitted by
# \'type\' constraint.
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'MultiSpeedDX'
# proper speed input must be se to pass schema validation
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'number_of_speeds_for_cooling'] = 2
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:DX:TwoSpeed'].get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_two_stage_dx")
def test_cooling_coil_type_two_stage_dx(self):
# todo_eo: humidity control enabled on this option, which is not consistent with other systems
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'TwoStageDX'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:DX:TwoStageWithHumidityControlMode']
.get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_two_stage_humidity_control_dx")
def test_cooling_coil_type_two_stage_humidity_control_dx(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'TwoStageHumidityControlDX'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:DX:TwoStageWithHumidityControlMode']
.get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_heat_exchanger_assisted_dx")
def test_cooling_coil_type_heat_exchanger_assisted_dx(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'HeatExchangerAssistedDX'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['CoilSystem:Cooling:DX:HeatExchangerAssisted']
.get('Sys 1 Furnace DX Cool SnglSpd Heat Exchanger Assisted Cooling Coil'))
self.assertIsNotNone(
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']
.get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil Heat Exchanger'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_single_speed_dx_water_cooled")
def test_cooling_coil_type_single_speed_dx_water_cooled(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'SingleSpeedDXWaterCooled'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:WaterToAirHeatPump:EquationFit']
.get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_chilled_water")
def test_cooling_coil_type_chilled_water(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'ChilledWater'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:Water']
.get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_type_chilled_water_detailed_flat_model")
def test_cooling_coil_type_chilled_water_detailed_flat_model(self):
# todo_eo: EO and pyEO fail with same error. ** Severe ** Coil:Cooling:Water:DetailedGeometry:
# "SYS 1 FURNACE DX COOL SNGLSPD COOLING COIL"
# ** ~~~ ** Coil Minimum Airflow Area must be greater than 0. Coil area = 0.000000
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'ChilledWaterDetailedFlatModel'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
# self.assertIsNotNone(
# epjson_output['Coil:Cooling:Water']
# .get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
def test_cooling_coil_type_heat_exchanger_assisted_chilled_water(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'HeatExchangerAssistedChilledWater'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Cooling:Water']
.get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
self.assertIsNotNone(
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']
.get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil Heat Exchanger'))
return
def test_cooling_coil_type_none(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(
epjson_output['Coil:Cooling:DX:SingleSpeed']
.get('Sys 1 Furnace DX Cool SnglSpd Cooling Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_coil_availability_schedule_name")
def test_cooling_coil_availability_schedule_name(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Coil:Cooling:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Cooling Coil'][
'availability_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"cooling_design_supply_air_temperature")
def test_cooling_design_supply_air_temperature(self):
# todo_eo: why is the SetpointManager:SingleZone:Cooling object not affected by this input
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_design_supply_air_temperature'] = 12.9
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
12.9,
epjson_output['Sizing:System']['Sys 1 Furnace DX Cool SnglSpd Sizing System'][
'central_cooling_design_supply_air_temperature'])
self.assertEqual(
12.9,
epjson_output['SetpointManager:SingleZone:Cooling'][
'Sys 1 Furnace DX Cool SnglSpd Cooling Supply Air Temp Manager']['minimum_supply_air_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"dx_cooling_coil_gross_rated_total_capacity")
def test_dx_cooling_coil_gross_rated_total_capacity(self):
# todo_eo: It appears that SHR and cooling capacity should be required for sizing but EO doesn't enforce it.
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dx_cooling_coil_gross_rated_sensible_heat_ratio'] = 0.66
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dx_cooling_coil_gross_rated_total_capacity'] = 2000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2000,
epjson_output['Coil:Cooling:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Cooling Coil'][
'gross_rated_total_cooling_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"dx_cooling_coil_gross_rated_cop")
def test_dx_cooling_coil_gross_rated_cop(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dx_cooling_coil_gross_rated_cop'] = 2.77
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2.77,
epjson_output['Coil:Cooling:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Cooling Coil'][
'gross_rated_cooling_cop'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_gas")
def test_heating_coil_type_gas(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'Gas'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:Fuel'].get('Sys 1 Furnace DX Cool SnglSpd Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_electric")
def test_heating_coil_type_electric(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'Electric'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:Electric'].get('Sys 1 Furnace DX Cool SnglSpd Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_hot_water")
def test_heating_coil_type_hot_water(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'HotWater'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:Water'].get('Sys 1 Furnace DX Cool SnglSpd Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_single_speed_dx_heat_pump_air_source")
def test_heating_coil_type_single_speed_dx_heat_pump_air_source(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'SingleSpeedDXHeatPumpAirSource'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:DX:SingleSpeed'].get('Sys 1 Furnace DX Cool SnglSpd Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_multi_speed_dx_heat_pump_air_source")
def test_heating_coil_type_multi_speed_dx_heat_pump_air_source(self):
# todo_eo: EO fails so making identical template objects is difficult. Check in with team
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'MultiSpeedDXHeatPumpAirSource'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'number_of_speeds_or_stages_for_heating'] = 2
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:DX:SingleSpeed'].get('Sys 1 Furnace DX Cool SnglSpd Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_single_speed_dx_heat_pump_water_source")
def test_heating_coil_type_single_speed_dx_heat_pump_water_source(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'SingleSpeedDXHeatPumpWaterSource'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:WaterToAirHeatPump:EquationFit']
.get('Sys 1 Furnace DX Cool SnglSpd Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_multi_stage_electric")
def test_heating_coil_type_multi_stage_electric(self):
# todo_eo: EO fails and unable to make comparison for template objects
# ** Severe ** <root>[Coil:Heating:Gas:MultiStage][Sys 1 Furnace DX Cool SnglSpd Heating Coil]
# [stage_2_gas_burner_efficiency] - Value type "string" for input "Curve:Cubic" not permitted by
# 'type' constraint. More similar errors.
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'number_of_speeds_or_stages_for_heating'] = 2
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'MultiStageElectric'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:WaterToAirHeatPump:EquationFit']
.get('Sys 1 Furnace DX Cool SnglSpd Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_multi_stage_gas")
def test_heating_coil_type_multi_stage_gas(self):
# todo_eo: EO fails and unable to make comparison for template objects
# [Controller:OutdoorAir][VAV Sys 1 OA Controller][economizer_control_type] - "None" - Failed to match
# against any enum values
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'number_of_speeds_or_stages_for_heating'] = 2
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'MultiStageGas'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:WaterToAirHeatPump:EquationFit']
.get('Sys 1 Furnace DX Cool SnglSpd Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_none")
def test_heating_coil_type_none(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(
epjson_output.get('Coil:Heating:Fuel'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_none")
def test_cooling_coil_type_none_heating_coil_type_none(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'None'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(
epjson_output.get('Coil:Heating:Fuel'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_type_none")
def test_cooling_coil_type_none_heating_coil_type_none_with_supplemental(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'cooling_coil_type'] = 'None'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'None'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'Electric'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(
epjson_output.get('Coil:Heating:Fuel'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heat_pump_heating_coil_availability_schedule_name")
def test_heating_coil_availability_schedule_name(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Coil:Heating:Fuel'][
'Sys 1 Furnace DX Cool SnglSpd Heating Coil']['availability_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_design_supply_air_temperature")
def test_heating_design_supply_air_temperature(self):
# todo_eo: why is the SetpointManager:SingleZone:Cooling object not affected by this input
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_design_supply_air_temperature'] = 48
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
48,
epjson_output['Sizing:System']['Sys 1 Furnace DX Cool SnglSpd Sizing System'][
'central_heating_design_supply_air_temperature'])
self.assertEqual(
48,
epjson_output['SetpointManager:SingleZone:Cooling'][
'Sys 1 Furnace DX Cool SnglSpd Cooling Supply Air Temp Manager']['maximum_supply_air_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_gross_rated_capacity_hot_water")
def test_heating_coil_gross_rated_capacity_hot_water(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'HotWater'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_gross_rated_capacity'] = 2000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2000,
epjson_output['Coil:Heating:Water']['Sys 1 Furnace DX Cool SnglSpd Heating Coil']['rated_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_gross_rated_capacity_gas")
def test_heating_coil_gross_rated_capacity_gas(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'Gas'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_gross_rated_capacity'] = 2000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2000,
epjson_output['Coil:Heating:Fuel']['Sys 1 Furnace DX Cool SnglSpd Heating Coil']['nominal_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heating_coil_gross_rated_capacity_electric")
def test_heating_coil_gross_rated_capacity_electric(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'Electric'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_gross_rated_capacity'] = 2000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2000,
epjson_output['Coil:Heating:Electric']['Sys 1 Furnace DX Cool SnglSpd Heating Coil']['nominal_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"gas_heating_coil_efficiency")
def test_gas_heating_coil_efficiency(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'gas_heating_coil_efficiency'] = 0.77
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.77,
epjson_output['Coil:Heating:Fuel']['Sys 1 Furnace DX Cool SnglSpd Heating Coil']['burner_efficiency'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"gas_heating_coil_parasitic_electric_load")
def test_gas_heating_coil_parasitic_electric_load(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'gas_heating_coil_parasitic_electric_load'] = 1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1,
epjson_output['Coil:Heating:Fuel']['Sys 1 Furnace DX Cool SnglSpd Heating Coil']['parasitic_electric_load'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heat_pump_heating_coil_gross_rated_cop")
def test_heat_pump_heating_coil_gross_rated_cop(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'SingleSpeedDXHeatPumpAirSource'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_pump_heating_coil_gross_rated_cop'] = 2.9
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2.9,
epjson_output['Coil:Heating:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Heating Coil'][
'gross_rated_heating_cop'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heat_pump_outdoor_dry_bulb_temperatures")
def test_heat_pump_outdoor_dry_bulb_temperatures(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'SingleSpeedDXHeatPumpAirSource'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_pump_heating_minimum_outdoor_dry_bulb_temperature'] = -7
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_pump_defrost_maximum_outdoor_dry_bulb_temperature'] = 2
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
-7,
epjson_output['Coil:Heating:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Heating Coil'][
'minimum_outdoor_dry_bulb_temperature_for_compressor_operation'])
self.assertEqual(
2,
epjson_output['Coil:Heating:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Heating Coil'][
'maximum_outdoor_dry_bulb_temperature_for_defrost_operation'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heat_pump_defrost_strategy_reverse_cycle")
def test_heat_pump_defrost_strategy_reverse_cycle(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'SingleSpeedDXHeatPumpAirSource'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_pump_defrost_strategy'] = 'ReverseCycle'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'ReverseCycle',
epjson_output['Coil:Heating:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Heating Coil'][
'defrost_strategy'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heat_pump_defrost_strategy_resistive")
def test_heat_pump_defrost_strategy_resistive(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'SingleSpeedDXHeatPumpAirSource'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_pump_defrost_strategy'] = 'Resistive'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Resistive',
epjson_output['Coil:Heating:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Heating Coil'][
'defrost_strategy'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heat_pump_defrost_control_timed")
def test_heat_pump_defrost_control_timed(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'SingleSpeedDXHeatPumpAirSource'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_pump_defrost_control'] = 'Timed'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_pump_defrost_time_period_fraction'] = 0.06
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Timed',
epjson_output['Coil:Heating:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Heating Coil'][
'defrost_control'])
self.assertEqual(
0.06,
epjson_output['Coil:Heating:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Heating Coil'][
'defrost_time_period_fraction'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"heat_pump_defrost_control_on_demand")
def test_heat_pump_defrost_control_on_demand(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heating_coil_type'] = 'SingleSpeedDXHeatPumpAirSource'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_pump_defrost_control'] = 'OnDemand'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OnDemand',
epjson_output['Coil:Heating:DX:SingleSpeed']['Sys 1 Furnace DX Cool SnglSpd Heating Coil'][
'defrost_control'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_heating_or_reheat_coil_type_none")
def test_supplemental_heating_or_reheat_coil_type_none(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(epjson_output['Coil:Heating:Electric'].get('Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_heating_or_reheat_coil_type_electric")
def test_supplemental_heating_or_reheat_coil_type_electric(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'Electric'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:Electric'].get('Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_heating_or_reheat_coil_type_electric")
def test_supplemental_heating_or_reheat_coil_type_gas(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'Gas'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['Coil:Heating:Fuel'].get('Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_heating_or_reheat_coil_type_hot_water")
def test_supplemental_heating_or_reheat_coil_type_hot_water(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'HotWater'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['Coil:Heating:Water'].get('Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_heating_or_reheat_coil_type_desuperheater")
def test_supplemental_heating_or_reheat_coil_type_desuperheater(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'DesuperHeater'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Coil:Heating:Desuperheater'].get('Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_heating_or_reheat_coil_availability_schedule_name")
def test_supplemental_heating_or_reheat_coil_availability_schedule_name(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'Electric'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Coil:Heating:Electric']['Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil'][
'availability_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_heating_or_reheat_coil_capacity")
def test_supplemental_heating_or_reheat_coil_capacity(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'HotWater'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_capacity'] = 2000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
2000,
epjson_output['Coil:Heating:Water']['Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil'][
'rated_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_heating_or_reheat_coil_maximum_outdoor_dry"
"_bulb_temperature")
def test_supplemental_heating_or_reheat_coil_maximum_outdoor_dry_bulb_temperature(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'Electric'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_maximum_outdoor_dry_bulb_temperature'] = 19
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
19,
epjson_output['AirLoopHVAC:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd Unitary System'][
'maximum_outdoor_dry_bulb_temperature_for_supplemental_heater_operation'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"supplemental_gas_heating_or_reheat_coil_inputs")
def test_supplemental_gas_heating_or_reheat_coil_inputs(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_heating_or_reheat_coil_type'] = 'Gas'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_gas_heating_or_reheat_coil_efficiency'] = 0.77
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supplemental_gas_heating_or_reheat_coil_parasitic_electric_load'] = 1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.77,
epjson_output['Coil:Heating:Fuel']['Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil']['burner_efficiency'])
self.assertEqual(
1,
epjson_output['Coil:Heating:Fuel']['Sys 1 Furnace DX Cool SnglSpd Supp Heating Coil'][
'parasitic_electric_load'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"outdoor_air_flow_rates")
def test_outdoor_air_flow_rates(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'maximum_outdoor_air_flow_rate'] = 0.66
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'minimum_outdoor_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.66,
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'maximum_outdoor_air_flow_rate'])
self.assertEqual(
0.1,
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'minimum_outdoor_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:minimum_outdoor_air_schedule_name")
def test_minimum_outdoor_air_schedule_name(self):
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=schedule_objects)
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'minimum_outdoor_air_schedule_name'] = 'Always0.8'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Always0.8',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'minimum_outdoor_air_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:economizer_type_no_economizer")
def test_economizer_type_no_economizer(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'NoEconomizer'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'NoEconomizer',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_type_fixed_dry_bulb")
def test_economizer_type_fixed_dry_bulb(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'FixedDryBulb'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'FixedDryBulb',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_type_fixed_enthalpy")
def test_economizer_type_fixed_enthalpy(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'FixedEnthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'FixedEnthalpy',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_type_differential_dry_bulb")
def test_economizer_type_differential_dry_bulb(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'DifferentialDryBulb'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DifferentialDryBulb',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_type_differential_enthalpy")
def test_economizer_type_differential_enthalpy(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'DifferentialEnthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DifferentialEnthalpy',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_type_fixed_dew_point_and_dry_bulb")
def test_economizer_type_fixed_dew_point_and_dry_bulb(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'FixedDewPointAndDryBulb'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'FixedDewPointAndDryBulb',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_type_electronic_enthalpy")
def test_economizer_type_electronic_enthalpy(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'ElectronicEnthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'ElectronicEnthalpy',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_type_differential_dry_bulb_and_enthalpy")
def test_economizer_type_differential_dry_bulb_and_enthalpy(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'DifferentialDryBulbAndEnthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DifferentialDryBulbAndEnthalpy',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_control_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_lockout_no_lockout")
def test_economizer_lockout_no_lockout(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_lockout'] = 'NoLockout'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'NoLockout',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'lockout_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_lockout_lockout_with_heating")
def test_economizer_lockout_lockout_with_heating(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_lockout'] = 'LockoutWithHeating'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'LockoutWithHeating',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'lockout_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_lockout_lockout_with_heating")
def test_economizer_lockout_lockout_with_compressor(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_lockout'] = 'LockoutWithCompressor'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'LockoutWithCompressor',
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'lockout_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:economizer_temperature_limits")
def test_economizer_temperature_limits(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'FixedDryBulb'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_maximum_limit_dry_bulb_temperature'] = 18
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_minimum_limit_dry_bulb_temperature'] = 5
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
18,
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_maximum_limit_dry_bulb_temperature'])
self.assertEqual(
5,
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_minimum_limit_dry_bulb_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:economizer_upper_enthalpy_limit")
def test_economizer_maximum_limit_enthalpy(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_maximum_limit_enthalpy'] = 100
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
100,
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_maximum_limit_enthalpy'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"economizer_maximum_limit_dewpoint_temperature")
def test_economizer_maximum_limit_dewpoint_temperature(self):
# todo_eo: Notes say that limit is applied regardless of what economizer type is applied. However, EO only
# applies the value when certain economizer is selected. Figure out what is preferred method.
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_type'] = 'FixedDewPointAndDryBulb'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'economizer_maximum_limit_dewpoint_temperature'] = 20
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
20,
epjson_output['Controller:OutdoorAir']['Sys 1 Furnace DX Cool SnglSpd OA Controller'][
'economizer_maximum_limit_dewpoint_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:supply_plenum_name")
def test_supply_plenum_name(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'supply_plenum_name'] = 'PLENUM-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'PLENUM-1',
epjson_output['AirLoopHVAC:SupplyPlenum']['Sys 1 Furnace DX Cool SnglSpd Supply Plenum']['zone_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:return_plenum_name")
def test_return_plenum_name(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'return_plenum_name'] = 'PLENUM-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'PLENUM-1',
epjson_output['AirLoopHVAC:ReturnPlenum']['Sys 1 Furnace DX Cool SnglSpd Return Plenum']['zone_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:heat_recovery_sensible")
def test_heat_recovery_sensible(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_recovery_type'] = 'Sensible'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('HeatExchanger:AirToAir:SensibleAndLatent'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:heat_recovery_enthalpy")
def test_heat_recovery_enthalpy(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_recovery_type'] = 'Enthalpy'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('HeatExchanger:AirToAir:SensibleAndLatent'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:heat_recovery_effectiveness_sensible")
def test_heat_recovery_effectiveness_sensible(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_recovery_type'] = 'Sensible'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'sensible_heat_recovery_effectiveness'] = 0.72
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('HeatExchanger:AirToAir:SensibleAndLatent'))
self.assertEqual(
0.77,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'sensible_effectiveness_at_75_cooling_air_flow'])
self.assertEqual(
0.77,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'sensible_effectiveness_at_75_heating_air_flow'])
self.assertEqual(
0.72,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'sensible_effectiveness_at_100_cooling_air_flow'])
self.assertEqual(
0.72,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'sensible_effectiveness_at_100_heating_air_flow'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:heat_recovery_effectiveness_enthalpy")
def test_heat_recovery_effectiveness_enthalpy(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'heat_recovery_type'] = 'Enthalpy'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'sensible_heat_recovery_effectiveness'] = 0.72
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'latent_heat_recovery_effectiveness'] = 0.61
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('HeatExchanger:AirToAir:SensibleAndLatent'))
self.assertEqual(
0.77,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'sensible_effectiveness_at_75_cooling_air_flow'])
self.assertEqual(
0.77,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'sensible_effectiveness_at_75_heating_air_flow'])
self.assertEqual(
0.72,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'sensible_effectiveness_at_100_cooling_air_flow'])
self.assertEqual(
0.72,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'sensible_effectiveness_at_100_heating_air_flow'])
self.assertEqual(
0.61,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'latent_effectiveness_at_100_cooling_air_flow'])
self.assertEqual(
0.61,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'latent_effectiveness_at_100_heating_air_flow'])
self.assertEqual(
0.66,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'latent_effectiveness_at_75_cooling_air_flow'])
self.assertEqual(
0.66,
epjson_output['HeatExchanger:AirToAir:SensibleAndLatent']['Sys 1 Furnace DX Cool SnglSpd Heat Recovery'][
'latent_effectiveness_at_75_heating_air_flow'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:dehumidification_control_type_none")
def test_dehumidification_control_type_none(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dehumidification_control_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"dehumidification_control_type_cool_reheat")
def test_dehumidification_control_type_cool_reheat(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dehumidification_control_type'] = 'CoolReheat'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dehumidification_relative_humidity_setpoint'] = 62
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'HVACTemplate-Always62.0',
epjson_output['ZoneControl:Humidistat']['Sys 1 Furnace DX Cool SnglSpd Dehumidification Humidistat'][
'dehumidifying_relative_humidity_setpoint_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"dehumidification_control_type_multimode")
def test_dehumidification_control_type_multimode(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dehumidification_control_type'] = 'Multimode'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dehumidification_relative_humidity_setpoint'] = 62
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'HVACTemplate-Always62.0',
epjson_output['ZoneControl:Humidistat']['Sys 1 Furnace DX Cool SnglSpd Dehumidification Humidistat'][
'dehumidifying_relative_humidity_setpoint_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:"
"dehumidification_relative_humidity_setpoint_schedule_name")
def test_dehumidification_relative_humidity_setpoint_schedule_name(self):
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=schedule_objects)
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dehumidification_control_type'] = 'Multimode'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'dehumidification_relative_humidity_setpoint_schedule_name'] = 'Always62'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Always62',
epjson_output['ZoneControl:Humidistat']['Sys 1 Furnace DX Cool SnglSpd Dehumidification Humidistat'][
'dehumidifying_relative_humidity_setpoint_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:humidifier_type")
def test_humidifier_type(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_type'] = 'ElectricSteam'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_relative_humidity_setpoint'] = 29
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(
epjson_output['Humidifier:Steam:Electric'].get('Sys 1 Furnace DX Cool SnglSpd Humidifier'))
self.assertIsNotNone(
epjson_output['SetpointManager:SingleZone:Humidity:Minimum']
.get('Sys 1 Furnace DX Cool SnglSpd Humidification Setpoint Manager'))
self.assertEqual(
'HVACTemplate-Always29.0',
epjson_output['ZoneControl:Humidistat']['Sys 1 Furnace DX Cool SnglSpd Humidification Humidistat'][
'humidifying_relative_humidity_setpoint_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:humidifier_inputs")
def test_humidifier_inputs(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_type'] = 'ElectricSteam'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_control_zone_name'] = 'SPACE1-1'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_relative_humidity_setpoint'] = 29
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_availability_schedule_name'] = 'OCCUPY-1'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_rated_capacity'] = 1
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'humidifier_rated_electric_power'] = 1000
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['Humidifier:Steam:Electric'].get('Sys 1 Furnace DX Cool SnglSpd Humidifier'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Humidifier:Steam:Electric'][
'Sys 1 Furnace DX Cool SnglSpd Humidifier']['availability_schedule_name'])
self.assertEqual(
1,
epjson_output['Humidifier:Steam:Electric'][
'Sys 1 Furnace DX Cool SnglSpd Humidifier']['rated_capacity'])
self.assertEqual(
1000,
epjson_output['Humidifier:Steam:Electric'][
'Sys 1 Furnace DX Cool SnglSpd Humidifier']['rated_power'])
self.assertEqual(
'HVACTemplate-Always29.0',
epjson_output['ZoneControl:Humidistat']['Sys 1 Furnace DX Cool SnglSpd Humidification Humidistat'][
'humidifying_relative_humidity_setpoint_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:sizing_option_non_coincident")
def test_sizing_option_non_coincident(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'sizing_option'] = 'NonCoincident'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'NonCoincident',
epjson_output['Sizing:System']['Sys 1 Furnace DX Cool SnglSpd Sizing System']['type_of_zone_sum_to_use']
)
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:sizing_option_non_coincident")
def test_sizing_option_coincident(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'sizing_option'] = 'Coincident'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Coincident',
epjson_output['Sizing:System']['Sys 1 Furnace DX Cool SnglSpd Sizing System']['type_of_zone_sum_to_use']
)
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:return_fan_no")
def test_return_fan_no(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'return_fan'] = 'No'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(epjson_output.get('Fan:ConstantVolume'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:return_fan_yes")
def test_return_fan_yes(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'return_fan'] = 'Yes'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output.get('Fan:ConstantVolume'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:System:UnitarySystem:return_fan_inputs")
def test_return_fan_inputs(self):
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'return_fan'] = 'Yes'
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'return_fan_total_efficiency'] = 0.72
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'return_fan_delta_pressure'] = 295
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'return_fan_motor_efficiency'] = 0.85
self.base_epjson['HVACTemplate:System:UnitarySystem']['Sys 1 Furnace DX Cool SnglSpd'][
'return_fan_motor_in_air_stream_fraction'] = 0.9
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath(
'..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.72,
epjson_output['Fan:ConstantVolume']['Sys 1 Furnace DX Cool SnglSpd Return Fan']['fan_total_efficiency'])
self.assertEqual(
295,
epjson_output['Fan:ConstantVolume']['Sys 1 Furnace DX Cool SnglSpd Return Fan']['pressure_rise'])
self.assertEqual(
0.85,
epjson_output['Fan:ConstantVolume']['Sys 1 Furnace DX Cool SnglSpd Return Fan']['motor_efficiency'])
self.assertEqual(
0.9,
epjson_output['Fan:ConstantVolume']['Sys 1 Furnace DX Cool SnglSpd Return Fan']['motor_in_airstream_fraction'])
return
```
#### File: simulations/thermostat/test_thermostat.py
```python
from pathlib import Path
import tempfile
from argparse import Namespace
import json
from tests.simulations import BaseSimulationTest
from src.epjson_handler import EPJSON
from src.main import main
test_dir = Path(__file__).parent.parent.parent
class TestSimulationsThermostat(BaseSimulationTest):
def setUp(self):
self.ej = EPJSON()
base_idf_file_path = test_dir.joinpath('..', 'simulation', 'ExampleFiles', 'HVACTemplate-5ZoneVAVWaterCooled.idf')
base_copy_file_path = self._copy_to_test_directory(base_idf_file_path)
# read in base file, then edit inputs for alternate tests
self.base_epjson = self.get_epjson_object_from_idf_file(base_copy_file_path)
self.base_epjson.pop('Output:Variable')
return
def teardown(self):
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Thermostat:test_blank")
def test_blank(self):
self.base_epjson['HVACTemplate:Thermostat'].pop('All Zones')
self.base_epjson['HVACTemplate:Thermostat']['All Zones'] = {}
with tempfile.NamedTemporaryFile(suffix='.epJSON', mode='w') as temp_file:
json.dump(
self.base_epjson,
temp_file)
temp_file.seek(0)
output = main(
Namespace(
file=temp_file.name,
no_schema=False
)
)
self.assertRegex(output['Output:PreprocessorMessage'], r'In HVACTemplate:Thermostat')
return
```
#### File: simulations/zone/test_zone_baseboard_heat.py
```python
from pathlib import Path
from tests.simulations import BaseSimulationTest
from src.epjson_handler import EPJSON
test_dir = Path(__file__).parent.parent.parent
doas_objects = {
"HVACTemplate:System:DedicatedOutdoorAir": {
"DOAS": {
"air_outlet_type": "DirectIntoZone",
"cooling_coil_design_setpoint": 12.8,
"cooling_coil_reset_outdoor_dry_bulb_high": 23.3,
"cooling_coil_reset_outdoor_dry_bulb_low": 15.6,
"cooling_coil_setpoint_at_outdoor_dry_bulb_high": 12.8,
"cooling_coil_setpoint_at_outdoor_dry_bulb_low": 15.6,
"cooling_coil_setpoint_control_type": "FixedSetpoint",
"cooling_coil_type": "TwoStageHumidityControlDX",
"dehumidification_control_type": "Multimode",
"dehumidification_setpoint": 0.00924,
"dx_cooling_coil_gross_rated_cop": 3,
"dx_cooling_coil_gross_rated_sensible_heat_ratio": "Autosize",
"dx_cooling_coil_gross_rated_total_capacity": "Autosize",
"gas_heating_coil_efficiency": 0.8,
"heat_recovery_frost_control_type": "None",
"heat_recovery_heat_exchanger_type": "Plate",
"heat_recovery_latent_effectiveness": 0.65,
"heat_recovery_sensible_effectiveness": 0.7,
"heat_recovery_type": "None",
"heating_coil_design_setpoint": 12.2,
"heating_coil_reset_outdoor_dry_bulb_high": 12.2,
"heating_coil_reset_outdoor_dry_bulb_low": 7.8,
"heating_coil_setpoint_at_outdoor_dry_bulb_high": 12.2,
"heating_coil_setpoint_at_outdoor_dry_bulb_low": 15,
"heating_coil_setpoint_control_type": "FixedSetpoint",
"heating_coil_type": "Gas",
"humidifier_constant_setpoint": 0.003,
"humidifier_rated_capacity": 1e-06,
"humidifier_rated_electric_power": 2690,
"humidifier_type": "None",
"supply_fan_delta_pressure": 1000,
"supply_fan_flow_rate": "Autosize",
"supply_fan_motor_efficiency": 0.9,
"supply_fan_motor_in_air_stream_fraction": 1,
"supply_fan_placement": "BlowThrough",
"supply_fan_total_efficiency": 0.7,
"system_availability_schedule_name": "OCCUPY-1"
}
}
}
design_specification_objects = {
"DesignSpecification:OutdoorAir": {
"SPACE1-1 SZ DSOA Custom Object": {
"outdoor_air_flow_per_zone": 0.01,
"outdoor_air_method": "Flow/Zone"
}
},
"DesignSpecification:ZoneAirDistribution": {
"SPACE1-1 SZ DSZAD Custom Object": {}
}
}
class TestSimulationsZoneBaseboardHeat(BaseSimulationTest):
def setUp(self):
self.ej = EPJSON()
base_idf_file_path = test_dir.joinpath('..', 'simulation', 'ExampleFiles', 'HVACTemplate-5ZoneBaseboardHeat.idf')
base_copy_file_path = self._copy_to_test_directory(base_idf_file_path)
# read in base file, then edit inputs for alternate tests
self.base_epjson = self.get_epjson_object_from_idf_file(base_copy_file_path)
self.base_epjson.pop('Output:Variable')
return
def teardown(self):
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:minimum_inputs")
def test_minimum_inputs(self):
self.base_epjson['HVACTemplate:Zone:BaseboardHeat'].pop('HVACTemplate:Zone:BaseboardHeat 1')
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary={
'HVACTemplate:Zone:BaseboardHeat': {
'HVACTemplate:Zone:BaseboardHeat 1': {}
}
}
)
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:zone_heating_sizing_factor")
def test_zone_heating_sizing_factor(self):
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1']['zone_heating_sizing_factor'] = 1.2
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1.2,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['zone_heating_sizing_factor'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:baseboard_heating_type_electric")
def test_heating_type_electric(self):
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1']['baseboard_heating_type'] = 'Electric'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'SPACE1-1 Baseboard Heat',
list(epjson_output['ZoneHVAC:Baseboard:Convective:Electric'].keys())[0])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:baseboard_heating_availability_schedule_name")
def test_heating_availability_schedule_name(self):
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'baseboard_heating_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['ZoneHVAC:Baseboard:RadiantConvective:Water']['SPACE1-1 Baseboard Heat']['availability_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:dedicated_outdoor_air_system_name")
def test_dedicated_outdoor_air_system_name(self):
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=doas_objects)
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'dedicated_outdoor_air_system_name'] = 'DOAS'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:outdoor_air_method_flow_per_person")
def test_outdoor_air_method_flow_per_person(self):
# DOAS must be specified for OA options
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=doas_objects)
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'dedicated_outdoor_air_system_name'] = 'DOAS'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'outdoor_air_method'] = 'Flow/Person'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'outdoor_air_flow_rate_per_person'] = 0.01
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Flow/Person',
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_method'])
self.assertEqual(
0.01,
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_flow_per_person'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:outdoor_air_method_flow_per_area")
def test_outdoor_air_method_flow_per_area(self):
# DOAS must be specified for OA options
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=doas_objects)
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'dedicated_outdoor_air_system_name'] = 'DOAS'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'outdoor_air_method'] = 'Flow/Area'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'outdoor_air_flow_rate_per_zone_floor_area'] = 0.0014
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Flow/Area',
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_method'])
self.assertEqual(
0.0014,
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_flow_per_zone_floor_area'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:outdoor_air_method_flow_per_zone")
def test_outdoor_air_method_flow_per_zone(self):
# DOAS must be specified for OA options
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=doas_objects)
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'dedicated_outdoor_air_system_name'] = 'DOAS'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'outdoor_air_method'] = 'Flow/Zone'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'outdoor_air_flow_rate_per_zone'] = 0.01
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Flow/Zone',
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_method'])
self.assertEqual(
0.01,
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_flow_per_zone'])
return
# todo_eo: outdoor_air_method Sum and Maximum not tested since they just use the verified inputs. Discuss how to
# make a note of this in the testing logs
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:BaseboardHeat:outdoor_air_method_detailed_specification")
def test_outdoor_air_method_detailed_specification(self):
# DOAS must be specified for OA options
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=dict(**doas_objects, **design_specification_objects))
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'dedicated_outdoor_air_system_name'] = 'DOAS'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'outdoor_air_method'] = 'DetailedSpecification'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'design_specification_outdoor_air_object_name'] = 'SPACE1-1 SZ DSOA Custom Object'
self.base_epjson['HVACTemplate:Zone:BaseboardHeat']['HVACTemplate:Zone:BaseboardHeat 1'][
'design_specification_zone_air_distribution_object_name'] = 'SPACE1-1 SZ DSZAD Custom Object'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['DesignSpecification:OutdoorAir'].get('SPACE1-1 SZ DSOA Custom Object'))
self.assertIsNotNone(epjson_output['DesignSpecification:ZoneAirDistribution'].get('SPACE1-1 SZ DSZAD Custom Object'))
return
```
#### File: simulations/zone/test_zone_vrf.py
```python
from pathlib import Path
from tests.simulations import BaseSimulationTest
from src.epjson_handler import EPJSON
test_dir = Path(__file__).parent.parent.parent
design_specification_objects = {
"DesignSpecification:OutdoorAir": {
"SPACE1-1 SZ DSOA Custom Object": {
"outdoor_air_flow_per_zone": 0.01,
"outdoor_air_method": "Flow/Zone"
}
},
"DesignSpecification:ZoneAirDistribution": {
"SPACE1-1 SZ DSZAD Custom Object": {}
}
}
hot_water_loop_objects = {
"HVACTemplate:Plant:Boiler": {
"Main HW Boiler": {
"boiler_type": "HotWaterBoiler",
"capacity": "Autosize",
"efficiency": 0.8,
"fuel_type": "NaturalGas",
"priority": "1",
"template_plant_loop_type": "HotWater"
}
},
"HVACTemplate:Plant:HotWaterLoop": {
"Hot Water Loop": {
"hot_water_design_setpoint": 82,
"hot_water_plant_operation_scheme_type": "Default",
"hot_water_pump_configuration": "ConstantFlow",
"hot_water_pump_rated_head": 179352,
"hot_water_reset_outdoor_dry_bulb_high": 10,
"hot_water_reset_outdoor_dry_bulb_low": -6.7,
"hot_water_setpoint_at_outdoor_dry_bulb_high": 65.6,
"hot_water_setpoint_at_outdoor_dry_bulb_low": 82.2,
"hot_water_setpoint_reset_type": "OutdoorAirTemperatureReset",
"pump_control_type": "Intermittent"
}
}
}
doas_objects = {
"HVACTemplate:System:DedicatedOutdoorAir": {
"DOAS": {
"air_outlet_type": "DirectIntoZone",
"cooling_coil_design_setpoint": 12.8,
"cooling_coil_reset_outdoor_dry_bulb_high": 23.3,
"cooling_coil_reset_outdoor_dry_bulb_low": 15.6,
"cooling_coil_setpoint_at_outdoor_dry_bulb_high": 12.8,
"cooling_coil_setpoint_at_outdoor_dry_bulb_low": 15.6,
"cooling_coil_setpoint_control_type": "FixedSetpoint",
"cooling_coil_type": "TwoStageHumidityControlDX",
"dehumidification_control_type": "Multimode",
"dehumidification_setpoint": 0.00924,
"dx_cooling_coil_gross_rated_cop": 3,
"dx_cooling_coil_gross_rated_sensible_heat_ratio": "Autosize",
"dx_cooling_coil_gross_rated_total_capacity": "Autosize",
"gas_heating_coil_efficiency": 0.8,
"heat_recovery_frost_control_type": "None",
"heat_recovery_heat_exchanger_type": "Plate",
"heat_recovery_latent_effectiveness": 0.65,
"heat_recovery_sensible_effectiveness": 0.7,
"heat_recovery_type": "None",
"heating_coil_design_setpoint": 12.2,
"heating_coil_reset_outdoor_dry_bulb_high": 12.2,
"heating_coil_reset_outdoor_dry_bulb_low": 7.8,
"heating_coil_setpoint_at_outdoor_dry_bulb_high": 12.2,
"heating_coil_setpoint_at_outdoor_dry_bulb_low": 15,
"heating_coil_setpoint_control_type": "FixedSetpoint",
"heating_coil_type": "Gas",
"humidifier_constant_setpoint": 0.003,
"humidifier_rated_capacity": 1e-06,
"humidifier_rated_electric_power": 2690,
"humidifier_type": "None",
"supply_fan_delta_pressure": 1000,
"supply_fan_flow_rate": "Autosize",
"supply_fan_motor_efficiency": 0.9,
"supply_fan_motor_in_air_stream_fraction": 1,
"supply_fan_placement": "BlowThrough",
"supply_fan_total_efficiency": 0.7,
"system_availability_schedule_name": "OCCUPY-1"
}
}
}
class TestSimulationsZoneVRF(BaseSimulationTest):
def setUp(self):
self.ej = EPJSON()
base_idf_file_path = test_dir.joinpath('..', 'simulation', 'ExampleFiles', 'HVACTemplate-5ZoneVRF.idf')
base_copy_file_path = self._copy_to_test_directory(base_idf_file_path)
# read in base file, then edit inputs for alternate tests
self.base_epjson = self.get_epjson_object_from_idf_file(base_copy_file_path)
self.base_epjson.pop('Output:Variable')
return
def teardown(self):
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:test_minimum_inputs")
def test_minimum_inputs(self):
# todo_eo: legacy fails on conversion from idf to epjson because
# Missing required property 'gross_rated_sensible_heat_ratio'.
# \r\n<root>[Coil:Cooling:DX:VariableRefrigerantFlow][SPACE1-1 VRF Cooling Coil] - Missing required property
# 'gross_rated_total_cooling_capacity'.\r\n<root>[Coil:Heating:DX:VariableRefrigerantFlow][SPACE1-1 VRF
# Heating Coil] - Missing required property 'gross_rated_heating_capacity'.\r\nErrors occurred when
# validating input file
self.base_epjson['HVACTemplate:Zone:VRF'].pop('HVACTemplate:Zone:VRF 1')
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary={
'HVACTemplate:Zone:VRF': {
'HVACTemplate:Zone:VRF 1': {
"template_thermostat_name": "All Zones",
"template_vrf_system_name": "VRF Sys 1 Water Source",
"zone_name": "SPACE1-1"
}
}
}
)
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:test_minimum_inputs_doas")
def test_minimum_inputs_doas(self):
# todo_eo: legacy fails on conversion from idf to epjson because
# Missing required property 'gross_rated_sensible_heat_ratio'.
# \r\n<root>[Coil:Cooling:DX:VariableRefrigerantFlow][SPACE1-1 VRF Cooling Coil] - Missing required property
# 'gross_rated_total_cooling_capacity'.\r\n<root>[Coil:Heating:DX:VariableRefrigerantFlow][SPACE1-1 VRF
# Heating Coil] - Missing required property 'gross_rated_heating_capacity'.\r\nErrors occurred when
# validating input file
self.base_epjson['HVACTemplate:Zone:VRF'].pop('HVACTemplate:Zone:VRF 1')
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary={
'HVACTemplate:Zone:VRF': {
'HVACTemplate:Zone:VRF 1': {
"dedicated_outdoor_air_system_name": "DOAS",
"template_thermostat_name": "All Zones",
"zone_name": "SPACE1-1"
}
},
'HVACTemplate:System:DedicatedOutdoorAir': {'DOAS': {
'cooling_coil_type': 'TwoSpeedDX',
'heating_coil_type': 'Electric'
}}
}
)
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:zone_heating_sizing_factor")
def test_zone_heating_sizing_factor(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1']['zone_heating_sizing_factor'] = 1.2
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1.2,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['zone_heating_sizing_factor'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:zone_heating_sizing_factor")
def test_zone_cooling_sizing_factor(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1']['zone_cooling_sizing_factor'] = 1.2
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1.2,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['zone_cooling_sizing_factor'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:rated_total_heating_capacity_sizing_ratio")
def test_rated_total_heating_capacity_sizing_ratio(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'rated_total_heating_capacity_sizing_ratio'] = 1.2
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
1.2,
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit']['rated_heating_capacity_sizing_ratio'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:cooling_supply_air_flow_rate")
def test_cooling_supply_air_flow_rate(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'cooling_supply_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.1,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['cooling_design_air_flow_rate'])
self.assertEqual(
'Flow/Zone',
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['cooling_design_air_flow_method'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:no_cooling_supply_air_flow_rate")
def test_no_cooling_supply_air_flow_rate(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'no_cooling_supply_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.1,
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit']['no_cooling_supply_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:heating_supply_air_flow_rate")
def test_heating_supply_air_flow_rate(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'heating_supply_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.1,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['heating_design_air_flow_rate'])
self.assertEqual(
'Flow/Zone',
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['heating_design_air_flow_method'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:no_heating_supply_air_flow_rate")
def test_no_heating_supply_air_flow_rate(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'no_heating_supply_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.1,
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit']['no_heating_supply_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:cooling_outdoor_air_flow_rate")
def test_cooling_outdoor_air_flow_rate(self):
# todo_eo: does not appear that zonehvac object gets value applied
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'cooling_outdoor_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.1,
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'][
'cooling_outdoor_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:heating_outdoor_air_flow_rate")
def test_heating_outdoor_air_flow_rate(self):
# todo_eo: ZoneHVAC:TerminalUnit:VariableRefrigerantFlow heating outdoor air flow rate not set in legacy.
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'heating_outdoor_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.1,
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'][
'heating_outdoor_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:no_load_outdoor_air_flow_rate")
def test_no_load_outdoor_air_flow_rate(self):
# todo_eo: value is not mapping to anything in legacy
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'no_load_outdoor_air_flow_rate'] = 0.1
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.1,
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'][
'no_load_outdoor_air_flow_rate'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:outdoor_air_method_flow_per_person")
def test_outdoor_air_method_flow_per_person(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'outdoor_air_method'] = 'Flow/Person'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'outdoor_air_flow_rate_per_person'] = 0.01
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Flow/Person',
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_method'])
self.assertEqual(
0.01,
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_flow_per_person'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:outdoor_air_method_flow_per_area")
def test_outdoor_air_method_flow_per_area(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'outdoor_air_method'] = 'Flow/Area'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'outdoor_air_flow_rate_per_zone_floor_area'] = 0.0014
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Flow/Area',
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_method'])
self.assertEqual(
0.0014,
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_flow_per_zone_floor_area'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:outdoor_air_method_flow_per_zone")
def test_outdoor_air_method_flow_per_zone(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'outdoor_air_method'] = 'Flow/Zone'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'outdoor_air_flow_rate_per_zone'] = 0.01
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Flow/Zone',
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_method'])
self.assertEqual(
0.01,
epjson_output['DesignSpecification:OutdoorAir']['SPACE1-1 SZ DSOA']['outdoor_air_flow_per_zone'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:outdoor_air_method_detailed_specification")
def test_outdoor_air_method_detailed_specification(self):
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=design_specification_objects)
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'outdoor_air_method'] = 'DetailedSpecification'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'design_specification_outdoor_air_object_name'] = 'SPACE1-1 SZ DSOA Custom Object'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'design_specification_zone_air_distribution_object_name'] = 'SPACE1-1 SZ DSZAD Custom Object'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['DesignSpecification:OutdoorAir'].get('SPACE1-1 SZ DSOA Custom Object'))
self.assertIsNotNone(epjson_output['DesignSpecification:ZoneAirDistribution'].get('SPACE1-1 SZ DSZAD Custom Object'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:system_availability_schedule_name")
def test_system_availability_schedule_name(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'system_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit']['terminal_unit_availability_schedule'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:supply_fan_operating_mode_schedule_name")
def test_supply_fan_operating_mode_schedule_name(self):
# todo_eo: schedule does not appear to be mapped to the ZoneHVAC:equipmentconnections even though the same
# name is used. discuss with team if this should be removed for this template only. Note, generally removing
# this transition will cause similar tests for pthp and ptac to fail
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'supply_fan_operating_mode_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'][
'supply_air_fan_operating_mode_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:supply_air_fan_placement")
def test_supply_air_fan_placement_blow_through(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'supply_air_fan_placement'] = 'BlowThrough'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'BlowThrough',
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'][
'supply_air_fan_placement'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:supply_air_fan_placement")
def test_supply_air_fan_placement_draw_through(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'supply_air_fan_placement'] = 'DrawThrough'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'DrawThrough',
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'][
'supply_air_fan_placement'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:supply_fan_total_efficiency")
def test_supply_fan_total_efficiency(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'supply_fan_total_efficiency'] = 0.65
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.65,
epjson_output['Fan:OnOff']['SPACE1-1 Supply Fan']['fan_total_efficiency'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:supply_fan_delta_pressure")
def test_supply_fan_delta_pressure(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'supply_fan_delta_pressure'] = 65
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
65,
epjson_output['Fan:OnOff']['SPACE1-1 Supply Fan']['pressure_rise'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:supply_fan_motor_efficiency")
def test_supply_fan_motor_efficiency(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'supply_fan_motor_efficiency'] = 0.85
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.85,
epjson_output['Fan:OnOff']['SPACE1-1 Supply Fan']['motor_efficiency'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:cooling_coil_type_variable_refrigerant_flow_dx")
def test_cooling_coil_type_variable_refrigerant_flow_dx(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'cooling_coil_type'] = 'VariableRefrigerantFlowDX'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Coil:Cooling:DX:VariableRefrigerantFlow',
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'][
'cooling_coil_object_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:cooling_coil_type_variable_refrigerant_flow_dx")
def test_cooling_coil_type_none(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'cooling_coil_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'].get(
'cooling_coil_object_type'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:cooling_coil_availability_schedule_name")
def test_cooling_coil_availability_schedule_name(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'cooling_coil_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Coil:Cooling:DX:VariableRefrigerantFlow']['SPACE1-1 VRF Cooling Coil'][
'availability_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:cooling_coil_gross_rated_total_capacity")
def test_cooling_coil_gross_rated_total_capacity(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'cooling_coil_gross_rated_total_capacity'] = 100
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
100,
epjson_output['Coil:Cooling:DX:VariableRefrigerantFlow']['SPACE1-1 VRF Cooling Coil'][
'gross_rated_total_cooling_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:cooling_coil_gross_rated_sensible_heat_ratio")
def test_cooling_coil_gross_rated_sensible_heat_ratio(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'cooling_coil_gross_rated_sensible_heat_ratio'] = 0.65
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
0.65,
epjson_output['Coil:Cooling:DX:VariableRefrigerantFlow']['SPACE1-1 VRF Cooling Coil'][
'gross_rated_sensible_heat_ratio'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:heat_pump_heating_coil_type_variable_refrigerant_flow_dx")
def test_heat_pump_heating_coil_type_variable_refrigerant_flow_dx(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'heat_pump_heating_coil_type'] = 'VariableRefrigerantFlowDX'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'Coil:Heating:DX:VariableRefrigerantFlow',
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'][
'heating_coil_object_type'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:heat_pump_heating_coil_type_variable_refrigerant_flow_dx")
def test_heat_pump_heating_coil_type_none(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'heat_pump_heating_coil_type'] = 'None'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNone(
epjson_output['ZoneHVAC:TerminalUnit:VariableRefrigerantFlow']['SPACE1-1 VRF Terminal Unit'].get(
'heating_coil_object_type'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:heat_pump_heating_coil_availability_schedule_name")
def test_heat_pump_heating_coil_availability_schedule_name(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'heat_pump_heating_coil_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['Coil:Heating:DX:VariableRefrigerantFlow']['SPACE1-1 VRF Heating Coil'][
'availability_schedule'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:heat_pump_heating_coil_gross_rated_total_capacity")
def test_heat_pump_heating_coil_gross_rated_capacity(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'heat_pump_heating_coil_gross_rated_capacity'] = 100
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
100,
epjson_output['Coil:Heating:DX:VariableRefrigerantFlow']['SPACE1-1 VRF Heating Coil'][
'gross_rated_heating_capacity'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:dedicated_outdoor_air_system_name")
def test_dedicated_outdoor_air_system_name(self):
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=doas_objects)
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'dedicated_outdoor_air_system_name'] = 'DOAS'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['AirLoopHVAC'].get('DOAS'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:"
"zone_cooling_design_supply_air_temperature_input_method_"
"supply_air_temperature")
def test_zone_cooling_design_supply_air_temperature_input_method_supply_air_temperature(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'zone_cooling_design_supply_air_temperature'] = 13.0
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'zone_cooling_design_supply_air_temperature_input_method'] = "SupplyAirTemperature"
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
13.0,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['zone_cooling_design_supply_air_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:"
"zone_cooling_design_supply_air_temperature_input_method_"
"temperature_difference")
def test_zone_cooling_design_supply_air_temperature_input_method_temperature_difference(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'zone_cooling_design_supply_air_temperature_difference'] = 11.5
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'zone_cooling_design_supply_air_temperature_input_method'] = "TemperatureDifference"
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
11.5,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['zone_cooling_design_supply_air_temperature_difference'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:"
"zone_heating_design_supply_air_temperature_input_method_"
"supply_air_temperature")
def test_zone_heating_design_supply_air_temperature_input_method_supply_air_temperature(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'zone_heating_design_supply_air_temperature'] = 51
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'zone_heating_design_supply_air_temperature_input_method'] = "SupplyAirTemperature"
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
51,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['zone_heating_design_supply_air_temperature'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:"
"zone_heating_design_supply_air_temperature_input_method_"
"temperature_difference")
def test_zone_heating_design_supply_air_temperature_input_method_temperature_difference(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'zone_heating_design_supply_air_temperature_difference'] = 31
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'zone_heating_design_supply_air_temperature_input_method'] = "TemperatureDifference"
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
31,
epjson_output['Sizing:Zone']['SPACE1-1 Sizing Zone']['zone_heating_design_supply_air_temperature_difference'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:baseboard_heating_type_hot_water")
def test_baseboard_heating_type_hot_water(self):
# todo_eo: Legacy fails when a HVACTemplate:Plant:HotWaterLoop and HVACTemplate:Plant:Boiler are
# included in the same file as HVACTemplate:PLant:MixedWaterLoop and existing HVACTemplate:Plant:Boiler.
# The PlantEquipmentList for the MixedWaterLoop includes the HW boiler.
# Explicitly setting template_plant_loop_type in both boilers fixes this in legacy.
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=hot_water_loop_objects)
self.base_epjson['HVACTemplate:Plant:Boiler']['Main Boiler'][
'template_plant_loop_type'] = 'MixedWater'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'baseboard_heating_type'] = 'HotWater'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['ZoneHVAC:Baseboard:RadiantConvective:Water'].get('SPACE1-1 Baseboard Heat'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:baseboard_heating_type_electric")
def test_baseboard_heating_type_electric(self):
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'baseboard_heating_type'] = 'Electric'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertIsNotNone(epjson_output['ZoneHVAC:Baseboard:Convective:Electric'].get('SPACE1-1 Baseboard Heat'))
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:baseboard_heating_availability_schedule_name")
def test_baseboard_heating_availability_schedule_name(self):
# todo_eo: Legacy fails when a HVACTemplate:Plant:HotWaterLoop and HVACTemplate:Plant:Boiler are
# included in the same file as HVACTemplate:PLant:MixedWaterLoop and existing HVACTemplate:Plant:Boiler.
# The PlantEquipmentList for the MixedWaterLoop includes the HW boiler.
# Explicitly setting template_plant_loop_type in both boilers fixes this in legacy.
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=hot_water_loop_objects)
self.base_epjson['HVACTemplate:Plant:Boiler']['Main Boiler'][
'template_plant_loop_type'] = 'MixedWater'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'baseboard_heating_type'] = 'HotWater'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'baseboard_heating_availability_schedule_name'] = 'OCCUPY-1'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 2'][
'baseboard_heating_type'] = 'Electric'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 2'][
'baseboard_heating_availability_schedule_name'] = 'OCCUPY-1'
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
'OCCUPY-1',
epjson_output['ZoneHVAC:Baseboard:RadiantConvective:Water']['SPACE1-1 Baseboard Heat']['availability_schedule_name'])
self.assertEqual(
'OCCUPY-1',
epjson_output['ZoneHVAC:Baseboard:Convective:Electric']['SPACE2-1 Baseboard Heat']['availability_schedule_name'])
return
@BaseSimulationTest._test_logger(doc_text="Simulation:Zone:VRF:baseboard_heating_capacity")
def test_baseboard_heating_capacity(self):
# todo_eo: Legacy fails when a HVACTemplate:Plant:HotWaterLoop and HVACTemplate:Plant:Boiler are
# included in the same file as HVACTemplate:PLant:MixedWaterLoop and existing HVACTemplate:Plant:Boiler.
# The PlantEquipmentList for the MixedWater Loop includes the HW boiler.
# Explicitly setting template_plant_loop_type in both boilers fixes this in legacy.
self.ej.merge_epjson(
super_dictionary=self.base_epjson,
object_dictionary=hot_water_loop_objects)
self.base_epjson['HVACTemplate:Plant:Boiler']['Main Boiler'][
'template_plant_loop_type'] = 'MixedWater'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'baseboard_heating_type'] = 'HotWater'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 1'][
'baseboard_heating_capacity'] = 200
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 2'][
'baseboard_heating_type'] = 'Electric'
self.base_epjson['HVACTemplate:Zone:VRF']['HVACTemplate:Zone:VRF 2'][
'baseboard_heating_capacity'] = 200
base_file_path = self.create_idf_file_from_epjson(epjson=self.base_epjson, file_name='base_pre_input.epJSON')
self.perform_full_comparison(base_idf_file_path=base_file_path)
epjson_output = self.ej._get_json_file(test_dir.joinpath('..', 'simulation', 'test', 'test_input_epjson.epJSON'))
self.assertEqual(
200,
epjson_output['ZoneHVAC:Baseboard:RadiantConvective:Water']['SPACE1-1 Baseboard Heat']['heating_design_capacity'])
self.assertEqual(
200,
epjson_output['ZoneHVAC:Baseboard:Convective:Electric']['SPACE2-1 Baseboard Heat']['heating_design_capacity'])
return
```
#### File: pyExpandObjects/tests/test_epjson.py
```python
from pathlib import Path
import unittest
import tempfile
from . import BaseTest
from src.epjson_handler import EPJSON
# must import exceptions directly from test code
from src.epjson_handler import UniqueNameException, PyExpandObjectsTypeError, \
PyExpandObjectsFileNotFoundError, PyExpandObjectsSchemaError, InvalidEpJSONException
minimum_objects_d = {
"Building": {
"Test Building": {}
},
"GlobalGeometryRules": {
"GlobalGeometryRules 1": {
"coordinate_system": "Relative",
"starting_vertex_position": "UpperLeftCorner",
"vertex_entry_direction": "Counterclockwise"
}
}
}
test_dir = Path(__file__).parent
class TestEPJSONHandler(BaseTest, unittest.TestCase):
def setUp(self):
self.epjson_handler = EPJSON()
self.epjson_handler_no_schema = EPJSON(no_schema=True)
self.epjson_handler.logger.setLevel('ERROR')
return
def test_merge_bad_objects(self):
dict_1 = {
"Zone": {
"SPACE2-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 239.247360229,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
}
dict_2 = {"Zone": ""}
with self.assertRaises(PyExpandObjectsTypeError):
self.epjson_handler.merge_epjson(
super_dictionary=dict_1,
object_dictionary=dict_2)
return
def test_merge_same_object_type(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
}
dict_2 = {
"Zone": {
"SPACE2-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 239.247360229,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
}
self.epjson_handler.merge_epjson(
super_dictionary=dict_1,
object_dictionary=dict_2,
)
self.assertIn('SPACE1-1', dict_1['Zone'].keys())
self.assertIn('SPACE2-1', dict_1['Zone'].keys())
return
def test_merge_two_objects(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
}
dict_2 = {
"ZoneInfiltration:DesignFlowRate": {
"SPACE1-1 Infil 1": {
"constant_term_coefficient": 0,
"design_flow_rate": 0.0167,
"design_flow_rate_calculation_method": "Flow/Zone",
"schedule_name": "INFIL-SCH",
"temperature_term_coefficient": 0,
"velocity_squared_term_coefficient": 0,
"velocity_term_coefficient": 0.2237,
"zone_or_zonelist_name": "SPACE1-1"
}
}
}
self.epjson_handler.merge_epjson(
super_dictionary=dict_1,
object_dictionary=dict_2,
)
self.assertIn('Zone', dict_1.keys())
self.assertGreater(len(dict_1['Zone']['SPACE1-1'].keys()), 0)
self.assertIn('ZoneInfiltration:DesignFlowRate', dict_1.keys())
self.assertGreater(len(dict_1['ZoneInfiltration:DesignFlowRate']['SPACE1-1 Infil 1'].keys()), 0)
return
def test_merge_duplicate_name(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
}
dict_2 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
}
with self.assertRaises(UniqueNameException):
self.epjson_handler.merge_epjson(
super_dictionary=dict_1,
object_dictionary=dict_2,
unique_name_override=False)
return
def test_merge_duplicate_name_skip_object(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
}
dict_2 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
}
self.epjson_handler.merge_epjson(
super_dictionary=dict_1,
object_dictionary=dict_2,
unique_name_override=False,
unique_name_fail=False)
self.assertEqual(1, len(dict_1["Zone"].keys()))
return
def test_merge_duplicate_name_skip_schedule_compact_always(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
},
'Schedule:Compact': {
"HVACTemplate-AlwaysTEST": {
'field_1': 'val_1'
}
}
}
dict_2 = {
"Zone": {
"SPACE2-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
},
'Schedule:Compact': {
"HVACTemplate-AlwaysTEST": {
'field_2': 'val_2'
}
}
}
self.epjson_handler.merge_epjson(
super_dictionary=dict_1,
object_dictionary=dict_2,
unique_name_override=False)
self.assertEqual('val_1', dict_1['Schedule:Compact']['HVACTemplate-AlwaysTEST']['field_1'])
return
def test_unpack_epjson(self):
outputs = self.epjson_handler.epjson_genexp({
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
},
"SPACE2-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
}
})
key_check = True
for output in outputs:
(object_type, object_structure), = output.items()
(name, _), = object_structure.items()
if name not in ['SPACE1-1', 'SPACE2-1']:
key_check = False
self.assertTrue(key_check)
return
def test_purge_epjson(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
},
"SPACE2-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
},
"ThermostatSetpoint:DualSetpoint": {
"All Zones Dual SP Control": {
"cooling_setpoint_temperature_schedule_name": "Clg-SetP-Sch",
"heating_setpoint_temperature_schedule_name": "Htg-SetP-Sch"
}
}
}
output = self.epjson_handler.purge_epjson(
epjson=dict_1,
purge_dictionary={
"Zone": ["SPACE1-1", ]
}
)
self.assertEqual(1, len(output['Zone'].keys()))
self.assertTrue("All Zones Dual SP Control" == list(output['ThermostatSetpoint:DualSetpoint'].keys())[0])
output = self.epjson_handler.purge_epjson(
epjson=dict_1,
purge_dictionary={
"Zone": '.*'
}
)
self.assertTrue("All Zones Dual SP Control" == list(output['ThermostatSetpoint:DualSetpoint'].keys())[0])
with self.assertRaises(KeyError):
output['Zone']
return
def test_epjson_count_summary(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
},
"SPACE2-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
},
"ThermostatSetpoint:DualSetpoint": {
"All Zones Dual SP Control": {
"cooling_setpoint_temperature_schedule_name": "Clg-SetP-Sch",
"heating_setpoint_temperature_schedule_name": "Htg-SetP-Sch"
}
}
}
output = self.epjson_handler.summarize_epjson(dict_1)
self.assertEqual(2, output['Zone'])
self.assertEqual(1, output['ThermostatSetpoint:DualSetpoint'])
return
def test_default_schema_is_valid(self):
self.epjson_handler._load_schema()
assert self.epjson_handler.schema_is_valid
return
def test_blank_schema_is_not_valid(self):
with self.assertRaises(PyExpandObjectsSchemaError):
self.epjson_handler._validate_schema({"properties": {"id": "asdf"}})
return
def test_good_object_is_valid(self):
self.epjson_handler.epjson_process(
epjson_ref={
**minimum_objects_d,
"Version": {
"Version 1": {
"version_identifier": "9.4"
}
}
}
)
self.assertTrue(self.epjson_handler.input_epjson_is_valid)
return
def test_good_file_is_verified(self):
self.epjson_handler.epjson_process(
epjson_ref=str(
test_dir / '..' / 'simulation' / 'ExampleFiles' / 'HVACTemplate-5ZoneVAVWaterCooledExpanded.epJSON')
)
self.assertTrue(self.epjson_handler.input_epjson_is_valid)
return
def test_no_schema_returns_json(self):
self.epjson_handler_no_schema._load_epjson({
**minimum_objects_d,
"Version": {
"Version 1": {
"version_identifier": "9.4"
}
}
})
self.assertIsNone(self.epjson_handler_no_schema.schema)
self.assertIsNone(self.epjson_handler_no_schema.schema_is_valid)
self.assertIs(True, self.epjson_handler_no_schema.input_epjson_is_valid)
self.assertEqual(len(self.epjson_handler_no_schema.input_epjson.keys()), 3)
return
def test_bad_file_path_object_returns_error(self):
with self.assertRaisesRegex(PyExpandObjectsFileNotFoundError, 'input is not a string'):
self.epjson_handler._get_json_file(
json_location={})
return
def test_bad_file_path_returns_error(self):
with self.assertRaisesRegex(PyExpandObjectsFileNotFoundError, 'file does not exist'):
self.epjson_handler._get_json_file(
json_location='bad/file/path.epJSON')
return
def test_bad_file_returns_error(self):
with self.assertRaisesRegex(PyExpandObjectsTypeError, 'not a valid json'):
with tempfile.NamedTemporaryFile(suffix='.epJSON', mode='w') as temp_file:
temp_file.write('bad file data')
temp_file.seek(0)
self.epjson_handler._get_json_file(
json_location=temp_file.name)
return
def test_get_epjson_objects_object_type(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
},
"SPACE2-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
},
"ThermostatSetpoint:DualSetpoint": {
"All Zones Dual SP Control": {
"cooling_setpoint_temperature_schedule_name": "Clg-SetP-Sch",
"heating_setpoint_temperature_schedule_name": "Htg-SetP-Sch"
}
}
}
output = self.epjson_handler.get_epjson_objects(epjson=dict_1, object_type_regexp='^Z.*')
self.assertEqual({'Zone': 2}, self.epjson_handler.summarize_epjson(output))
return
def test_get_epjson_object_name_reference(self):
dict_1 = {
"Zone": {
"SPACE1-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
},
"SPACE2-1": {
"ceiling_height": 2.438400269,
"direction_of_relative_north": 0,
"multiplier": 1,
"type": 1,
"volume": 103.311355591,
"x_origin": 0,
"y_origin": 0,
"z_origin": 0
}
},
"ThermostatSetpoint:DualSetpoint": {
"All Zones Dual SP Control": {
"cooling_setpoint_temperature_schedule_name": "Clg-SetP-Sch",
"heating_setpoint_temperature_schedule_name": "Htg-SetP-Sch"
}
}
}
output = self.epjson_handler.get_epjson_objects(
epjson=dict_1,
object_type_regexp='^Z.*',
object_name_regexp='^Space2.*')
self.assertEqual({'Zone': 1}, self.epjson_handler.summarize_epjson(output))
return
def test_reject_get_epjson_object_bad_input(self):
dict_1 = []
with self.assertRaisesRegex(InvalidEpJSONException, 'Invalid epJSON'):
self.epjson_handler.get_epjson_objects(
epjson=dict_1,
object_type_regexp='^Z.*',
object_name_regexp='^Space2.*')
return
```
#### File: pyExpandObjects/tests/test_expand_objects_yaml.py
```python
import unittest
from unittest.mock import MagicMock
import copy
from src.expand_objects import ExpandObjects, ExpandZone, ExpandSystem
from src.expand_objects import PyExpandObjectsTypeError, PyExpandObjectsYamlStructureException, \
PyExpandObjectsYamlError, PyExpandObjectsException
from . import BaseTest
mock_zone_template = {
'HVACTemplate:Zone:VAV': {
'template_name': {
'template_field': 'template_test_value',
'template_field2': 'test_pre_mapped_value',
'reheat_coil_type': 'HotWater',
'zone_name': 'test zone'
}
}
}
mock_zone_option_tree = {
'OptionTree': {
'Zone': {
'VAV': {
'BaseObjects': {
"Objects": [
{
'ZoneHVAC:AirDistributionUnit': {
"name": "{} ATU",
"field_name": "field_value"
}
}
],
'Transitions': [
{
"ZoneHVAC:AirDistributionUnit": {
"template_field": "object_test_field",
"template_field2": "object_test_field2"
}
}
],
'Mappings': [
{
'ZoneHVAC:AirDistributionUnit': {
"template_field2": {
"test_pre_mapped_value": {
"test_map_field": "test_mapped_value"
}
}
}
}
]
},
'TemplateObjects': {
'reheat_coil_type': {
"HotWater": {
'Objects': [
[
{
'AirTerminal:SingleDuct:VAV:Reheat': {
'name': '{} VAV Reheat',
'maximum_air_flow_rate': 'Autosize',
}
}
],
{
"Branch": {
"name": "{} HW Branch"
}
}
],
'Transitions': [
{
"AirTerminal:.*": {
"template_field": "object_test_field"
}
}
]
}
}
}
}
}
}
}
mock_system_template = {
'HVACTemplate:System:VAV': {
'template_name': {
'template_field': 'template_test_value',
'cooling_coil_type': 'ChilledWater'
}
}
}
mock_system_option_tree = {
'OptionTree': {
'HVACTemplate': {
'System': {
'VAV': {
'BuildPath': {
'BaseObjects': {
'Objects': [
{
'OutdoorAir:Mixer': {
'Fields': {
'name': '{} OA Mixing Box',
'mixed_air_node_name': '{} Mixed Air Outlet',
'outdoor_air_stream_node_name': '{} Outside Air Inlet',
'relief_air_stream_node_name': '{} Relief Air Outlet',
'return_air_stream_node_name': '{} Air Loop Inlet'
},
'Connectors': {
'AirLoop': {
'Inlet': 'outdoor_air_stream_node_name',
'Outlet': 'mixed_air_node_name'
}
}
}
},
{
'Fan:VariableVolume': {
'Fields': {
'name': '{} Supply Fan',
'air_inlet_node_name': '{} Supply Fan Inlet',
'air_outlet_node_name': '{} Supply Fan Outlet'
},
'Connectors': {
'AirLoop': {
'Inlet': 'air_inlet_node_name',
'Outlet': 'air_outlet_node_name'
}
}
}
}
],
'Transitions': [
{
"Fan:.*": {
"template_field": "object_test_field"
}
}
]
},
'Actions': [
{
'cooling_coil_type': {
'ChilledWater': {
'ObjectReference': 'OutdoorAir:Mixer',
'Location': 'After',
'ActionType': 'Insert',
'Objects': [
{
'Coil:Cooling:Water': {
'Fields': {
'name': '{} Cooling Coil',
'air_inlet_node_name': '{} Cooling Coil Inlet',
'air_outlet_node_name': '{} Cooling Coil Outlet',
'water_inlet_node_name': '{} Cooling Coil Chw Inlet',
'water_outlet_node_name': '{} Cooling Coil Chw Outlet'
},
'Connectors': {
'AirLoop': {
'Inlet': 'air_inlet_node_name',
'Outlet': 'air_outlet_node_name'
}
}
}
}
]
}
}
}
]
}
}
}
}
}
}
mock_build_path = [
{
'OutdoorAir:Mixer': {
'Fields': {
'name': '{} OA Mixing Box',
'mixed_air_node_name': '{} Mixed Air Outlet',
'outdoor_air_stream_node_name': '{} Outside Air Inlet',
'relief_air_stream_node_name': '{} Relief Air Outlet',
'return_air_stream_node_name': '{} Air Loop Inlet'
},
'Connectors': {
'AirLoop': {
'Inlet': 'outdoor_air_stream_node_name',
'Outlet': 'mixed_air_node_name'
}
}
}
},
{
'Fan:VariableVolume': {
'Fields': {
'name': '{} Supply Fan',
'air_inlet_node_name': '{} Supply Fan Inlet',
'air_outlet_node_name': '{} Supply Fan Outlet'
},
'Connectors': {
'AirLoop': {
'Inlet': 'air_inlet_node_name',
'Outlet': 'air_outlet_node_name'
}
}
}
}
]
class TestExpandObjectsYaml(BaseTest, unittest.TestCase):
"""
General handling and processing of YAML instructions
"""
def setUp(self):
return
def teardown(self):
return
def test_get_option_tree_from_yaml(self):
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=mock_zone_option_tree)
structure_hierarcy = ['OptionTree', 'Zone', 'VAV']
output = eo._get_option_tree(structure_hierarchy=structure_hierarcy)
key_check = True
try:
output['BaseObjects']
except KeyError:
key_check = False
self.assertTrue(key_check)
# test without OptionTree
structure_hierarchy = ['Zone', 'VAV']
output = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
key_check = True
try:
output['BaseObjects']
except KeyError:
key_check = False
self.assertTrue(key_check)
return
def test_reject_bad_yaml(self):
bad_yaml_string = "bad brackets: ]["
with self.assertRaisesRegex(PyExpandObjectsYamlError, 'Problem loading'):
ExpandObjects(
template=mock_zone_template,
expansion_structure=bad_yaml_string)
return
def test_reject_bad_structure_format(self):
with self.assertRaisesRegex(PyExpandObjectsTypeError, '.*is not a file path or dictionary.*'):
ExpandObjects(
template=mock_zone_template,
expansion_structure=[])
return
def test_reject_bad_option_tree_request(self):
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=mock_zone_option_tree)
structure_hierarchy = 'BadString'
with self.assertRaisesRegex(PyExpandObjectsTypeError, 'Call to YAML object'):
eo._get_option_tree(structure_hierarchy=structure_hierarchy)
return
def test_template_object_with_none_option_creates_object(self):
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure={
'OptionTree': {
'HVACTemplate': {
'Zone': {
'VAV': {
'BaseObjects': {
'Objects': {}
},
'TemplateObjects': {
'SomeNonPresentField': {
'None': {
'Objects': [
{
'Object:1': {
'name': 'object_name',
'template_test_field': 'template_test_value'
}
}
]
}
}
}
}
}
}
}
}
)
eo._create_objects()
self.assertEqual('template_test_value', eo.epjson['Object:1']['object_name']['template_test_field'])
return
def test_option_tree_leaf(self):
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=mock_zone_option_tree)
structure_hierarchy = ['OptionTree', 'Zone', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
option_tree_leaf = eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
key_check = True
for k in option_tree_leaf.keys():
if k not in ['Objects', 'Transitions', 'Mappings']:
key_check = False
self.assertTrue(key_check)
return
def test_reject_option_tree_leaf_bad_structure(self):
option_tree = {'mock': 'object'}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = 'test name'
eo.get_structure = MagicMock()
eo.get_structure.return_value = {'BadKey': []}
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Invalid or missing Objects'):
eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
return
def test_option_tree_leaf_multiple_mappings(self):
eo = ExpandObjects()
eo.supply_fan_part_load_power_coefficients = 'InletVaneDampers'
option_tree = {
'BaseObjects': {
'Objects': [
{
'Fan:VariableVolume': {
'name': 'test_name'
}
}
],
'Mappings': [
{
'Fan:.*': {
'supply_fan_part_load_power_coefficients': {
'InletVaneDampers': {
'fan_power_coefficient_1': 0.35071223,
'fan_power_coefficient_2': 0.30850535,
'fan_power_coefficient_3': -0.54137364,
'fan_power_coefficient_4': 0.87198823,
'fan_power_coefficient_5': 0
}
}
}
}
]
}
}
option_tree_leaf = eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
object_list = eo._apply_transitions(option_tree_leaf=option_tree_leaf)
self.assertEqual(0.35071223, object_list[0]['Fan:VariableVolume']['fan_power_coefficient_1'])
return
def test_option_tree_leaf_without_transitions(self):
# remove Transitions for this test
bad_mock_zone_option_tree = copy.deepcopy(mock_zone_option_tree)
bad_mock_zone_option_tree['OptionTree']['Zone']['VAV']['BaseObjects'].pop('Transitions')
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=bad_mock_zone_option_tree)
structure_hierarchy = ['OptionTree', 'Zone', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
option_tree_leaf = eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
self.assertIsNone(option_tree_leaf['Transitions'])
return
def test_apply_transitions(self):
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=mock_zone_option_tree)
structure_hierarchy = ['OptionTree', 'Zone', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
option_tree_leaf = eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
transitioned_option_tree_leaf = eo._apply_transitions(option_tree_leaf)
self.assertEqual(
'template_test_value',
transitioned_option_tree_leaf[0]['ZoneHVAC:AirDistributionUnit']['object_test_field'])
return
def test_apply_transitions_bad_transition_structure(self):
option_tree_leaf = {
'Objects': {'test': 'val'},
'BadKey': {'test': 'val'},
'Transitions': [{'BadKey': {'bad_obj_ref': 'bad_obj_structure'}}]}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = 'test name'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'OptionTree leaf is incorrectly formatted'):
eo._apply_transitions(option_tree_leaf=option_tree_leaf)
return
def test_apply_transitions_bad_mapping_structure(self):
option_tree_leaf = {
'Objects': {'test': 'val'},
'BadKey': {'test': 'val'},
'Mappings': [{'BadKey': {'bad_obj_ref': 'bad_obj_structure'}}]}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = 'test name'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'OptionTree leaf is incorrectly formatted'):
eo._apply_transitions(option_tree_leaf=option_tree_leaf)
return
def test_apply_transitions_and_map(self):
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=mock_zone_option_tree)
structure_hierarchy = ['OptionTree', 'Zone', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
option_tree_leaf = eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
transitioned_option_tree_leaf = eo._apply_transitions(option_tree_leaf)
self.assertEqual(
'test_mapped_value',
transitioned_option_tree_leaf[0]['ZoneHVAC:AirDistributionUnit']['test_map_field'])
return
def test_yaml_list_to_dictionary_regular_object(self):
dict_1 = {
"Object:1": {
"name": "test_name",
"field_1": "val_1"
}
}
eo = ExpandZone(template=mock_zone_template)
output = eo.yaml_list_to_epjson_dictionaries([dict_1, ])
self.assertEqual('val_1', output['Object:1']['test_name']['field_1'])
return
def test_yaml_list_to_dictionary_super_object(self):
dict_1 = {
"Object:1": {
"Fields": {
"name": "test_name",
"field_1": "val_1"
},
"Connectors": {}
}
}
eo = ExpandZone(template=mock_zone_template)
output = eo.yaml_list_to_epjson_dictionaries([dict_1, ])
self.assertEqual('val_1', output['Object:1']['test_name']['field_1'])
return
# Commented out because error statement was getting over used
# def test_warning_on_bad_apply_transitions(self):
# # make a bad template reference
# bad_mock_zone_option_tree = mock_zone_option_tree
# bad_mock_zone_option_tree['OptionTree']['Zone']['VAV']['BaseObjects']['Transitions'] = [
# {
# "ZoneHVAC:AirDistributionUnit": {
# "template_bad_field": "object_test_field"
# }
# }
# ]
# eo = ExpandObjects(
# template=mock_zone_template,
# expansion_structure=bad_mock_zone_option_tree)
# structure_hierarchy = ['OptionTree', 'Zone', 'VAV']
# option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
# option_tree_leaf = eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
# eo._apply_transitions(option_tree_leaf)
# # Logger (Parent class of ExpandObjects) keeps logs in self.stream
# self.assertIn(
# 'A template value was attempted to be applied',
# eo.stream.getvalue()
# )
# return
def test_error_on_bad_object(self):
# make a bad template reference
# check missing 'name' field
bad_mock_zone_option_tree = copy.deepcopy(mock_zone_option_tree)
bad_mock_zone_option_tree['OptionTree']['Zone']['VAV']['BaseObjects']['Objects'] = [
{
'ZoneHVAC:AirDistributionUnit': {
"bad_name": "{} ATU"
}
}
]
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=bad_mock_zone_option_tree)
structure_hierarchy = ['OptionTree', 'Zone', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
option_tree_leaf = eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
object_list = eo._apply_transitions(option_tree_leaf)
with self.assertRaises(PyExpandObjectsYamlStructureException):
eo.yaml_list_to_epjson_dictionaries(object_list)
# more than one object in a dictionary
bad_mock_zone_option_tree = copy.deepcopy(mock_zone_option_tree)
bad_mock_zone_option_tree['OptionTree']['Zone']['VAV']['BaseObjects']['Objects'] = [
{
'ZoneHVAC:AirDistributionUnit': {
"name": "{} ATU"
},
'ZoneHVAC:AirDistributionUnit2': {
"name": "{} ATU"
}
}
]
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=bad_mock_zone_option_tree)
structure_hierarchy = ['OptionTree', 'Zone', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
option_tree_leaf = eo._get_option_tree_leaf(option_tree=option_tree, leaf_path=['BaseObjects', ])
object_list = eo._apply_transitions(option_tree_leaf)
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'YAML object is incorrectly formatted'):
eo.yaml_list_to_epjson_dictionaries(object_list)
return
def test_retrieve_objects_from_option_tree(self):
eo = ExpandZone(template=mock_zone_template)
structure_hierarchy = ['OptionTree', 'HVACTemplate', 'Zone', 'VAV']
template_objects = eo._get_option_tree_objects(structure_hierarchy=structure_hierarchy)
self.assertEqual(
eo.summarize_epjson(template_objects),
{'AirTerminal:SingleDuct:VAV:Reheat': 1,
'Branch': 1,
'Coil:Heating:Water': 1,
'DesignSpecification:OutdoorAir': 1,
'DesignSpecification:ZoneAirDistribution': 1,
'Sizing:Zone': 1,
'ZoneHVAC:AirDistributionUnit': 1,
'ZoneHVAC:EquipmentConnections': 1,
'ZoneHVAC:EquipmentList': 1}
)
return
def test_get_option_tree_no_match(self):
structure_hierarchy = ['mock', 'object']
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = 'test name'
eo._get_option_tree = MagicMock()
eo._get_option_tree.return_value = {'TemplateObjects': {'field_name': {'field_value': 'object_field'}}}
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'template option was not applied for '
'template field'):
eo._get_option_tree_objects(structure_hierarchy=structure_hierarchy)
return
def test_get_option_tree_bad_value(self):
structure_hierarchy = ['mock', 'object']
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = 'test name'
eo._get_option_tree = MagicMock()
eo._get_option_tree.return_value = {'TemplateObjects': ['test', ]}
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'TemplateObjects section for system'):
eo._get_option_tree_objects(structure_hierarchy=structure_hierarchy)
return
def test_complex_inputs_simple(self):
test_d = {
"Object:1": {
"name_1": {
"field_1": "value_1"
}
}
}
eo = ExpandZone(template=mock_zone_template)
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_1",
input_value="{} test_val"
)
self.assertEqual('test zone test_val', [i for i in output][0]["value"])
# number test
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_1",
input_value=3
)
self.assertEqual(3, [i for i in output][0]["value"])
return
def test_complex_inputs_class_attribute_reference_string(self):
eo = ExpandZone(template=mock_zone_template)
eo.test_val = "test_string"
output = eo._resolve_complex_input(
epjson={},
field_name="field_1",
input_value="{test_val}"
)
self.assertEqual('test_string', [o for o in output][0]['value'])
return
def test_reject_complex_inputs_class_attribute_reference_bad_string(self):
eo = ExpandZone(template=mock_zone_template)
eo.bad_test_val = "test_string"
output = eo._resolve_complex_input(
epjson={},
field_name="field_1",
input_value="{test_val}"
)
self.assertIsNone([o for o in output][0]['value'])
return
def test_skip_none_resolve_objects_attribute_reference_bad_string(self):
eo = ExpandZone(template=mock_zone_template)
eo.test_val = "test_string"
output = eo.resolve_objects(epjson={
"Object:1":
{
"object_1_name": {
"field_1": '{bad_test_val}',
"field_2": '{test_val}'
}
}
})
self.assertEqual(1, len(output['Object:1']['object_1_name'].keys()))
self.assertEqual('test_string', output['Object:1']['object_1_name']['field_2'])
return
def test_complex_inputs_class_attribute_reference_float(self):
eo = ExpandZone(template=mock_zone_template)
eo.test_val = "1.0"
output = eo._resolve_complex_input(
epjson={},
field_name="field_1",
input_value="{test_val}"
)
self.assertTrue(isinstance([o for o in output][0]['value'], float))
return
def test_complex_inputs_class_attribute_reference_int(self):
eo = ExpandZone(template=mock_zone_template)
eo.test_val = "1"
output = eo._resolve_complex_input(
epjson={},
field_name="field_1",
input_value="{test_val}"
)
self.assertTrue(isinstance([o for o in output][0]['value'], int))
return
def test_complex_inputs_dictionary(self):
test_d = {
"Object:1": {
"name_1": {
"field_1": "value_1"
}
}
}
eo = ExpandZone(template=mock_zone_template)
# field value check
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_1",
input_value={
"Object:1": "field_1"
}
)
tmp_d = {}
for o in output:
tmp_d[o['field']] = o['value']
self.assertEqual('field_1', list(tmp_d.keys())[0])
self.assertEqual('value_1', tmp_d['field_1'])
# dictionary key check
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_1",
input_value={
"Object:1": "self"
}
)
tmp_d = {}
for o in output:
tmp_d[o['field']] = o['value']
self.assertEqual('Object:1', tmp_d['field_1'])
# name check
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_1",
input_value={
"Object:1": "key"
}
)
tmp_d = {}
for o in output:
tmp_d[o['field']] = o['value']
self.assertEqual('name_1', tmp_d['field_1'])
return
def test_complex_inputs_recursion_dictionary(self):
test_d = {
"Object:1": {
"name_1": {
"field_1": "value_1"
}
},
"Object:2": {
"name_1": {
"field_1": {
"Object:1": "field_1"
}
}
}
}
eo = ExpandZone(template=mock_zone_template)
# field value check
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_test",
input_value={
"Object:2": "field_1"
}
)
tmp_d = {}
for o in output:
tmp_d[o['field']] = o['value']
self.assertEqual('value_1', tmp_d['field_test'])
return
def test_complex_inputs_recursion_limit(self):
test_d = {
"Object:1": {
"name_1": {
"field_1": {
"Object:2": "field_1"
}
}
},
"Object:2": {
"name_1": {
"field_1": {
"Object:1": "field_1"
}
}
}
}
eo = ExpandObjects(
template=mock_zone_template,
expansion_structure=mock_zone_option_tree)
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Maximum Recursion limit exceeded when '
'resolving'):
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_test",
input_value={
"Object:2": "field_1"
}
)
tmp_d = {}
for o in output:
tmp_d[o['field']] = o['value']
return
def test_complex_inputs_list(self):
test_d = {
"Object:2": {
"name_1": {
"field_1": "value_1"
}
}
}
eo = ExpandZone(template=mock_zone_template)
# field value check
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_test",
input_value=[
{"field_sub_test": {"Object:2": "field_1"}}
]
)
tmp_d = {}
for o in output:
tmp_d[o['field']] = o['value']
self.assertEqual('value_1', tmp_d['field_test'][0]['field_sub_test'])
return
def test_complex_inputs_list_recursion(self):
test_d = {
"Object:1": {
"name_1": {
"field_1": "value_1"
}
},
"Object:2": {
"name_1": {
"field_1": {
"Object:1": "field_1"
}
}
}
}
eo = ExpandZone(template=mock_zone_template)
# field value check
output = eo._resolve_complex_input(
epjson=test_d,
field_name="field_test",
input_value=[
{"field_sub_test": {"Object:2": "field_1"}}
]
)
tmp_d = {}
for o in output:
tmp_d[o['field']] = o['value']
self.assertEqual('value_1', tmp_d['field_test'][0]['field_sub_test'])
return
def test_resolve_complex_inputs_object(self):
test_d = {
"Object:1": {
"name_1": {
"field_1": "value_1"
}
},
"Object:2": {
"name_1": {
"field_1": {
"Object:1": "field_1"
}
}
}
}
eo = ExpandZone(template=mock_zone_template)
eo.resolve_objects(epjson=test_d)
self.assertEqual('value_1', test_d['Object:2']['name_1']['field_1'])
return
def test_resolve_complex_inputs_object_with_template_reference(self):
test_d = {
"Object:1": {
"name_1": {
"field_1": 'test {template_field}'
}
}
}
eo = ExpandZone(template=mock_zone_template)
eo.resolve_objects(epjson=test_d)
self.assertEqual('test template_test_value', test_d['Object:1']['name_1']['field_1'])
return
def test_complex_nested_test(self):
test_d = {
'AirTerminal:SingleDuct:VAV:Reheat': {
'SPACE1-1 VAV Reheat': {
'air_inlet_node_name': '{} Zone Equip Inlet',
'air_outlet_node_name': '{} Supply Inlet',
'damper_air_outlet_node_name': '{} Damper Outlet',
'damper_heating_action': 'Reverse',
'maximum_air_flow_rate': 'Autosize',
'maximum_hot_water_or_steam_flow_rate': 'Autosize',
'reheat_coil_name': '{} Reheat Coil',
'reheat_coil_object_type': 'Coil:Heating:Water',
'zone_minimum_air_flow_input_method': 'Constant'}},
'Branch': {
'SPACE1-1 HW Reheat Branch': {
'components': [
{
'component_inlet_node_name': {'Coil:Heating:Water': 'water_inlet_node_name'},
'component_name': {'Coil:Heating:Water': 'key'},
'component_object_type': {'Coil:Heating:Water': 'self'},
'component_outlet_node_name': {'Coil:Heating:Water': 'water_outlet_node_name'}
}
]
}
},
'Coil:Heating:Water': {
'SPACE1-1 Reheat Coil': {
'air_inlet_node_name': '{} Damper Outlet',
'air_outlet_node_name': '{} Supply Inlet',
'maximum_water_flow_rate': 'Autosize',
'performance_input_method': 'UFactorTimesAreaAndDesignWaterFlowRate',
'rated_capacity': 'Autosize',
'rated_inlet_air_temperature': 16.6,
'rated_inlet_water_temperature': 82.2,
'rated_outlet_air_temperature': 32.2,
'rated_outlet_water_temperature': 71.1,
'rated_ratio_for_air_and_water_convection': 0.5,
'u_factor_times_area_value': 'Autosize',
'water_inlet_node_name': '{} Heating Coil Hw Inlet',
'water_outlet_node_name': '{} Heating Coil Hw Outlet'}},
'ZoneHVAC:AirDistributionUnit': {
'SPACE1-1 ATU': {
'air_distribution_unit_outlet_node_name':
{'^AirTerminal:.*': 'air_outlet_node_name'},
'air_terminal_name':
{'^AirTerminal:.*': 'key'},
'air_terminal_object_type':
{'^AirTerminal:.*': 'self'}
}
}
}
eo = ExpandZone(
template=mock_zone_template)
eo.resolve_objects(epjson=test_d)
# Check that no string remains unformatted. The * and ^ are the common regex special characters.
self.assertNotIn('{}', eo.epjson)
self.assertNotIn('^', eo.epjson)
self.assertNotIn('*', eo.epjson)
return
def test_complex_inputs_bad_reference_object(self):
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Complex input reference is invalid'):
output = eo._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"Bad Reference 1": 'val',
"Bad Reference 2": 'val'
}
)
print([i for i in output])
return
def test_complex_inputs_bad_build_path_reference_object(self):
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Build Path complex input was specified '
'with no build path'):
output = eo._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
'BuildPathReference': {}
}
)
print([i for i in output])
return
def test_complex_inputs_bad_build_path_reference_object_instructions(self):
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Object field could not be resolved'):
output = eo._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
'BuildPathReference': {}
},
build_path=['test', ]
)
print([i for i in output])
return
def test_complex_inputs_bad_build_path_reference_maximum_recursion(self):
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
eo.build_path = [
{
"Object1": {
"Fields": {
'field1': {
'BuildPathReference': {
'Location': 1,
'ValueLocation': 'Inlet'
}
}
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1'
}
}
}
},
{
"Object2": {
"Fields": {
'field1': {
'BuildPathReference': {
'Location': 0,
'ValueLocation': 'Inlet'
}
}
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1'
}
}
}
}
]
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Object field could not be resolved'):
output = eo._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
'BuildPathReference': {
'Location': 0,
'ValueLocation': 'Inlet'
}
}
)
print([i for i in output])
return
def test_complex_inputs_from_build_path_no_location(self):
build_path = [
{
'Object1': {
"Fields": {
'field1': 'val1',
'field2': 'val1'
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1',
'Outlet': 'field2'
}
}
}
}
]
lookup_instructions = {}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = 'test name'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Build Path Location or ValueLocation '
'reference is invalid'):
eo._resolve_complex_input_from_build_path(build_path=build_path, lookup_instructions=lookup_instructions)
return
def test_complex_inputs_from_build_path_bad_location(self):
build_path = [
{
'Object1': {
"Fields": {
'field1': 'val1',
'field2': 'val1'
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1',
'Outlet': 'field2'
}
}
}
}
]
lookup_instructions = {
"Location": 5,
'ValueLocation': 'Inlet'
}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Invalid build path or lookup instructions'):
eo._resolve_complex_input_from_build_path(build_path=build_path, lookup_instructions=lookup_instructions)
return
def test_complex_inputs_from_build_path_bad_occurrence(self):
build_path = [
{
'Object1': {
"Fields": {
'field1': 'val1',
'field2': 'val1'
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1',
'Outlet': 'field2'
}
}
}
}
]
lookup_instructions = {
'Location': 'Object1',
'ValueLocation': 'Inlet',
'Occurrence': 'BadVal'}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Occurrence key in complex reference is '
'not an integer'):
eo._resolve_complex_input_from_build_path(build_path=build_path, lookup_instructions=lookup_instructions)
return
def test_complex_inputs_from_build_path_occurrences_not_reached(self):
build_path = [
{
'Object1': {
"Fields": {
'field1': 'val1',
'field2': 'val1'
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1',
'Outlet': 'field2'
}
}
}
}
]
lookup_instructions = {
'Location': 'Object1',
'ValueLocation': 'Inlet',
'Occurrence': 2}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
eo._resolve_complex_input_from_build_path(build_path=build_path, lookup_instructions=lookup_instructions)
output = eo.stream.getvalue()
self.assertRegex(output, 'The number of occurrence matches')
return
def test_complex_inputs_from_build_path_bad_valuelocation(self):
build_path = [
{
'Object1': {
"Fields": {
'field1': 'val1',
'field2': 'val1'
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1',
'Outlet': 'field2'
}
}
}
}
]
lookup_instructions = {
'Location': 'Object1',
'ValueLocation': 'Bad'}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Invalid complex input for Build Path Lookup'):
eo._resolve_complex_input_from_build_path(build_path=build_path, lookup_instructions=lookup_instructions)
return
def test_complex_inputs_from_build_path_by_reference(self):
build_path = [
{
'Object1': {
"Fields": {
'field1': 'val1',
'field2': 'val1'
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1',
'Outlet': 'field2'
}
}
}
}
]
lookup_instructions = {
'Location': 'Object1',
'ValueLocation': 'Inlet'}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
output = eo._resolve_complex_input_from_build_path(build_path=build_path, lookup_instructions=lookup_instructions)
self.assertEqual(output, 'val1')
return
def test_complex_inputs_from_build_path_by_location(self):
build_path = [
{
'Object1': {
"Fields": {
'field1': 'val1',
'field2': 'val1'
},
'Connectors': {
'AirLoop': {
'Inlet': 'field1',
'Outlet': 'field2'
}
}
}
}
]
lookup_instructions = {
'Location': 0,
'ValueLocation': 'Inlet'}
eo = ExpandObjects()
eo.template_type = 'test type'
eo.template_name = '<NAME>'
output = eo._resolve_complex_input_from_build_path(build_path=build_path, lookup_instructions=lookup_instructions)
self.assertEqual(output, 'val1')
return
def test_field_with_zero_value_processing(self):
eo = ExpandObjects()
output = eo.yaml_list_to_epjson_dictionaries([{
'Object:1': {
'name': 'test_name',
'field': 0
}
}])
output = eo.resolve_objects(epjson=output)
self.assertEqual(0, output['Object:1']['test_name']['field'])
return
def test_build_path_action_non_super_object_processed_and_saved_to_epjson(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'Location': 1,
'Occurrence': 1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
},
{
'non_super_object': {
'name': 'test_non_super_object',
'test_non_super_field': 'test_non_super_val'
}
}
]
}
eo = ExpandObjects()
eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
self.assertEqual(
'test_non_super_val', eo.epjson['non_super_object']['test_non_super_object']['test_non_super_field'])
return
def test_build_path_connections(self):
# Note ObjectReference is not needed
eo = ExpandSystem(template=mock_system_template)
object_list = eo._process_build_path(
option_tree=mock_system_option_tree['OptionTree']['HVACTemplate']['System']['VAV']['BuildPath'])
self.assertEqual(
'{} Cooling Coil Outlet',
object_list[-1]['Fan:VariableVolume']['air_inlet_node_name']
)
return
def test_build_path_bad_build_path(self):
# Note ObjectReference is not needed
eo = ExpandSystem(template=mock_system_template)
eo._get_option_tree_leaf = MagicMock()
eo._get_option_tree_leaf.return_value = {}
eo._apply_transitions = MagicMock()
eo._apply_transitions.return_value = {}
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Build Path action is incorrectly formatted'):
eo._process_build_path(
option_tree={'Actions': [{'bad': 'structure'}]})
return
def test_build_path_action_never_applied(self):
# Note ObjectReference is not needed
eo = ExpandSystem(template=mock_system_template)
eo._get_option_tree_leaf = MagicMock()
eo._get_option_tree_leaf.return_value = {}
eo._apply_transitions = MagicMock()
eo._apply_transitions.return_value = {}
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'A build path action was not applied '
'for template field'):
eo._process_build_path(
option_tree={
'Actions': [
{
'template_field': {
'Location': 'Coil:Cooling.*',
'ActionType': 'Insert',
'Objects': []
}
}
]
}
)
return
def test_build_path_action_insert_by_location(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'Location': 1,
'Occurrence': 1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
build_path = eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
self.assertEqual('test_object_type', list(build_path[1].keys())[0])
return
def test_build_path_action_replace_by_location(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'Location': 1,
'ActionType': 'Replace',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
build_path = eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
self.assertEqual('test_object_type', list(build_path[1].keys())[0])
self.assertEqual(2, len(build_path))
return
def test_build_path_action_remove_by_location(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'Location': 1,
'ActionType': 'Remove'
}
eo = ExpandObjects()
build_path = eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
self.assertEqual('OutdoorAir:Mixer', list(build_path[0].keys())[0])
self.assertEqual(1, len(build_path))
return
def test_build_path_action_insert_by_object_reference(self):
build_path = mock_build_path
action_instruction = {
'ObjectReference': 'OutdoorAir:Mixer',
'Location': 'After',
'Occurrence': 1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
build_path = eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
self.assertEqual('test_object_type', list(build_path[1].keys())[0])
return
def test_build_path_action_remove_by_object_reference(self):
build_path = mock_build_path
action_instruction = {
'ObjectReference': 'OutdoorAir:Mixer',
'ActionType': 'Remove'
}
eo = ExpandObjects()
build_path = eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
self.assertEqual('Fan:VariableVolume', list(build_path[0].keys())[0])
return
def test_build_path_action_replace_by_object_reference(self):
build_path = mock_build_path
action_instruction = {
'ObjectReference': 'OutdoorAir:Mixer',
'ActionType': 'Replace',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
build_path = eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
self.assertEqual('test_object_type', list(build_path[0].keys())[0])
return
def test_reject_build_path_action_with_bad_occurrence(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'Location': 1,
'Occurrence': 'bad',
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'must be a non-negative integer'):
eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
action_instruction = {
'Location': 1,
'Occurrence': -1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'must be a non-negative integer'):
eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
return
def test_reject_build_path_action_with_bad_action(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'Location': 1,
'Occurrence': 0,
'ActionType': 'bad',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Invalid action type'):
eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
return
def test_reject_build_path_action_with_bad_location_text(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'Location': 'bad',
'Occurrence': 0,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Build path action insert reference value '):
eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
return
def test_reject_build_path_action_with_occurrence_too_high(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'ObjectReference': 'OutdoorAir:Mixer',
'Location': 'Before',
'Occurrence': 10,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'The number of occurrence matches'):
eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
return
def test_reject_build_path_action_missing_keys(self):
build_path = mock_build_path
# Note ObjectReference is not needed
action_instruction = {
'ObjectReference': 'OutdoorAir:Mixer',
'Location': 'Before',
'Occurrence': 10,
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
eo = ExpandObjects()
with self.assertRaisesRegex(
PyExpandObjectsYamlStructureException, 'Build Path action is missing required instructions'):
eo._apply_build_path_action(build_path=build_path, action_instructions=action_instruction)
return
def test_insert_on_build_path_from_option_tree(self):
test_system_option_tree = copy.deepcopy(mock_system_option_tree)
test_system_option_tree['OptionTree']['HVACTemplate']['System']['VAV']['BuildPath']['Actions'] = [
{
'template_field': {
'template_test_value': {
'Location': 1,
'Occurrence': 1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
}
},
{
'template_field': {
'template_test_value': {
'Location': 2,
'Occurrence': 1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type_2": {
"Fields": {
'name': 'test_object_name_2',
'test_field_2': 'test_value_2',
'test_field_4': 'test_value_4'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field_2', "Outlet": "test_field_4"}}
}
}
]
}
}
}
]
eo = ExpandObjects(
template=mock_system_template,
expansion_structure=test_system_option_tree)
structure_hierarchy = ['OptionTree', 'HVACTemplate', 'System', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
build_path = eo._process_build_path(option_tree=option_tree['BuildPath'])
self.assertEqual('test_object_type', list(build_path[1].keys())[0])
self.assertEqual('test_object_type_2', list(build_path[2].keys())[0])
return
def test_insert_on_build_path_from_option_tree_with_none_value(self):
test_system_option_tree = copy.deepcopy(mock_system_option_tree)
test_system_option_tree['OptionTree']['HVACTemplate']['System']['VAV']['BuildPath']['Actions'] = [
{
'non_present_template_field': {
'None': {
'Location': 1,
'Occurrence': 1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
}
}
]
eo = ExpandObjects(
template=mock_system_template,
expansion_structure=test_system_option_tree)
structure_hierarchy = ['OptionTree', 'HVACTemplate', 'System', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
build_path = eo._process_build_path(option_tree=option_tree['BuildPath'])
self.assertEqual('test_object_type', list(build_path[1].keys())[0])
return
def test_replace_on_build_path_from_option_tree(self):
test_system_option_tree = copy.deepcopy(mock_system_option_tree)
test_system_option_tree['OptionTree']['HVACTemplate']['System']['VAV']['BuildPath']['Actions'] = [
{
'template_field': {
'template_test_value': {
'Location': 1,
'Occurrence': 1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
}
},
{
'template_field': {
'template_test_value': {
'Location': 2,
'Occurrence': 1,
'ActionType': 'Replace',
'Objects': [
{
"test_object_type_2": {
"Fields": {
'name': 'test_object_name_2',
'test_field_2': 'test_value_2',
'test_field_4': 'test_value_4'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field_2', "Outlet": "test_field_4"}}
}
}
]
}
}
}
]
eo = ExpandObjects(
template=mock_system_template,
expansion_structure=test_system_option_tree)
structure_hierarchy = ['OptionTree', 'HVACTemplate', 'System', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
build_path = eo._process_build_path(option_tree=option_tree['BuildPath'])
self.assertEqual('test_object_type', list(build_path[1].keys())[0])
self.assertEqual('test_object_type_2', list(build_path[2].keys())[0])
return
def test_remove_on_build_path_from_option_tree(self):
test_system_option_tree = copy.deepcopy(mock_system_option_tree)
test_system_option_tree['OptionTree']['HVACTemplate']['System']['VAV']['BuildPath']['Actions'] = [
{
'template_field': {
'template_test_value': {
'Location': 0,
'ActionType': 'Remove',
}
}
}
]
eo = ExpandObjects(
template=mock_system_template,
expansion_structure=test_system_option_tree)
structure_hierarchy = ['OptionTree', 'HVACTemplate', 'System', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
build_path = eo._process_build_path(option_tree=option_tree['BuildPath'])
self.assertEqual('Fan:VariableVolume', list(build_path[0].keys())[0])
return
def test_complex_actions_on_build_path_from_option_tree(self):
test_system_option_tree = copy.deepcopy(mock_system_option_tree)
test_system_option_tree['OptionTree']['HVACTemplate']['System']['VAV']['BuildPath']['Actions'] = [
{
'template_field': {
'template_test_value': {
'Location': 0,
'Occurrence': 1,
'ActionType': 'Replace',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
}
},
{
'template_field': {
'template_test_value': {
'Location': 0,
'ActionType': 'Remove',
}
}
}
]
eo = ExpandObjects(
template=mock_system_template,
expansion_structure=test_system_option_tree)
structure_hierarchy = ['OptionTree', 'HVACTemplate', 'System', 'VAV']
option_tree = eo._get_option_tree(structure_hierarchy=structure_hierarchy)
build_path = eo._process_build_path(option_tree=option_tree['BuildPath'])
self.assertEqual(1, len(build_path))
return
def test_convert_build_path(self):
build_path = [
{
"Object:1": {
"Fields": {
"field_1": "value_1",
"field_2": "value_2"
},
"Connectors": {
"AirLoop": {
"Inlet": "field_1",
"Outlet": "field_2"
}
}
}
},
{
"Object:2": {
"Fields": {
"field_3": "value_3",
"field_4": "value_4"
},
"Connectors": {
"AirLoop": {
"Inlet": "field_3",
"Outlet": "field_4"
}
}
}
}
]
eo = ExpandObjects()
output = eo._connect_and_convert_build_path_to_object_list(build_path=build_path)
self.assertEqual("value_2", output[1]["Object:2"]["field_3"])
return
def test_convert_build_path_no_build_path(self):
build_path = []
eo = ExpandObjects()
with self.assertRaisesRegex(PyExpandObjectsException, 'Build path was not provided nor was it'):
eo._connect_and_convert_build_path_to_object_list(build_path=build_path)
return
def test_convert_build_path_no_connectors(self):
build_path = [
{
"Object:1": {
"Fields": {
"field_1": "value_1",
"field_2": "value_2"
}
}
}
]
eo = ExpandObjects()
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Referenced super object is missing Connectors key'):
eo._connect_and_convert_build_path_to_object_list(build_path=build_path)
return
def test_convert_build_path_bad_connectors(self):
build_path = [
{
"Object:1": {
"Fields": {
"field_1": "value_1",
"field_2": "value_2"
},
"Connectors": {
"AirLoop": {
"Inlet": "field_4",
"Outlet": "field_5"
}
}
}
}
]
eo = ExpandObjects()
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'There is a Field/Connector mismatch'):
eo._connect_and_convert_build_path_to_object_list(build_path=build_path)
return
def test_retrieve_build_path_objects_from_option_tree(self):
eo = ExpandSystem(template=mock_system_template)
test_system_option_tree = copy.deepcopy(mock_system_option_tree)
test_system_option_tree['OptionTree']['HVACTemplate']['System']['VAV']['BuildPath']['Actions'] = [
{
'template_field': {
'template_test_value': {
'Location': 1,
'Occurrence': 1,
'ActionType': 'Insert',
'Objects': [
{
"test_object_type": {
"Fields": {
'name': 'test_object_name',
'test_field': 'test_value',
'test_field_3': 'test_value_3'
},
"Connectors": {'AirLoop': {"Inlet": 'test_field', "Outlet": "test_field_3"}}
}
}
]
}
}
},
]
eo._get_option_tree = MagicMock()
eo._get_option_tree.return_value = test_system_option_tree['OptionTree']['HVACTemplate']['System']['VAV']
output = eo._get_option_tree_objects(structure_hierarchy=['not', 'important', 'because', 'mocked'])
self.assertEqual(
eo.summarize_epjson(output),
{'OutdoorAir:Mixer': 1, 'test_object_type': 1, 'Fan:VariableVolume': 1}
)
return
def test_complex_inputs_build_path(self):
es = ExpandSystem(template=mock_system_template)
output = es._resolve_complex_input(
epjson={},
build_path=mock_build_path,
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": 1,
"ValueLocation": "Outlet"
}
}
)
self.assertEqual('template_name Supply Fan Outlet', [i for i in output][0]['value'])
return
def test_complex_inputs_build_path_reference_by_object_type(self):
es = ExpandSystem(template=mock_system_template)
es.build_path = [
{
'Fan:VariableVolume': {
'Fields': {
'name': '{} Return Fan',
'air_inlet_node_name': '{} Return Fan Inlet',
'air_outlet_node_name': '{} Return Fan Outlet'},
'Connectors': {
'AirLoop': {
'Inlet': 'air_inlet_node_name',
'Outlet': 'air_outlet_node_name'}}}},
{
'OutdoorAir:Mixer': {
'Fields': {
'name': '{} OA Mixing Box',
'mixed_air_node_name': '{} Mixed Air Outlet',
'outdoor_air_stream_node_name': '{} Outside Air Inlet',
'relief_air_stream_node_name': '{} Relief Air Outlet',
'return_air_stream_node_name': '{} Air Loop Inlet'},
'Connectors': {
'AirLoop': {
'Inlet': 'return_air_stream_node_name',
'Outlet': 'mixed_air_node_name'}}}},
{
'Fan:VariableVolume': {
'Fields': {
'name': '{} Supply Fan',
'air_inlet_node_name': '{} Supply Fan Inlet',
'air_outlet_node_name': '{} Supply Fan Outlet'},
'Connectors': {
'AirLoop': {
'Inlet': 'air_inlet_node_name',
'Outlet': 'air_outlet_node_name'}}}}]
es.unique_name = 'template_name'
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": 'OutdoorAir:M.*',
"ValueLocation": "Outlet"
}
}
)
self.assertEqual('template_name Mixed Air Outlet', [i for i in output][0]['value'])
return
def test_complex_inputs_build_path_reference_by_object_type_occurence(self):
es = ExpandSystem(template={'template_type': {'template_name': {}}})
es.build_path = [
{
'Fan:VariableVolume': {
'Fields': {
'name': '{} Return Fan',
'air_inlet_node_name': '{} Return Fan Inlet',
'air_outlet_node_name': '{} Return Fan Outlet'},
'Connectors': {
'AirLoop': {
'Inlet': 'air_inlet_node_name',
'Outlet': 'air_outlet_node_name'}}}},
{
'OutdoorAir:Mixer': {
'Fields': {
'name': '{} OA Mixing Box',
'mixed_air_node_name': '{} Mixed Air Outlet',
'outdoor_air_stream_node_name': '{} Outside Air Inlet',
'relief_air_stream_node_name': '{} Relief Air Outlet',
'return_air_stream_node_name': '{} Air Loop Inlet'},
'Connectors': {
'AirLoop': {
'Inlet': 'return_air_stream_node_name',
'Outlet': 'mixed_air_node_name'}}}},
{
'Fan:VariableVolume': {
'Fields': {
'name': '{} Supply Fan',
'air_inlet_node_name': '{} Supply Fan Inlet',
'air_outlet_node_name': '{} Supply Fan Outlet'},
'Connectors': {
'AirLoop': {
'Inlet': 'air_inlet_node_name',
'Outlet': 'air_outlet_node_name'}}}}
]
es.unique_name = 'TEST SYSTEM'
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": 'Fan:.*',
'Occurrence': 2,
"ValueLocation": "Outlet"
}
}
)
self.assertEqual(
'TEST SYSTEM Supply Fan Outlet',
[o for o in output][0]['value']
)
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": 'Fan:.*',
'Occurrence': -1,
"ValueLocation": "Outlet"
}
}
)
self.assertEqual(
'TEST SYSTEM Supply Fan Outlet',
[o for o in output][0]['value']
)
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": 'Fan:.*',
'Occurrence': 1,
"ValueLocation": "Outlet"
}
}
)
self.assertEqual(
'TEST SYSTEM Return Fan Outlet',
[o for o in output][0]['value']
)
return
def test_reject_complex_inputs_build_path_reference_by_object_type_bad_occurence(self):
es = ExpandSystem(template={'template_type': {'template_name': {}}})
es.build_path = [
{
'Fan:VariableVolume': {
'Fields': {
'name': '{} Return Fan',
'air_inlet_node_name': '{} Return Fan Inlet',
'air_outlet_node_name': '{} Return Fan Outlet'},
'Connectors': {
'AirLoop': {
'Inlet': 'air_inlet_node_name',
'Outlet': 'air_outlet_node_name'}}}}]
es.unique_name = 'TEST SYSTEM'
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": 'Fan:.*',
'Occurrence': 'BadValue',
"ValueLocation": "Outlet"
}
}
)
with self.assertRaises(PyExpandObjectsYamlStructureException):
print([o for o in output])
return
def test_complex_inputs_build_path_class_attribute(self):
es = ExpandSystem(template=mock_system_template)
es.expansion_structure = copy.deepcopy(mock_system_option_tree)
es._create_objects()
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": -1,
"ValueLocation": "Outlet"
}
}
)
self.assertEqual('template_name Supply Fan Outlet', [i for i in output][0]['value'])
return
def test_complex_inputs_build_path_class_attribute_get_object(self):
es = ExpandSystem(template=mock_system_template)
es.expansion_structure = copy.deepcopy(mock_system_option_tree)
es._create_objects()
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": -1,
"ValueLocation": "self"
}
}
)
self.assertEqual('Fan:VariableVolume', [i for i in output][0]['value'])
return
def test_complex_inputs_build_path_class_attribute_get_name(self):
es = ExpandSystem(template=mock_system_template)
es.expansion_structure = copy.deepcopy(mock_system_option_tree)
es._create_objects()
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": -1,
"ValueLocation": "key"
}
}
)
self.assertEqual('template_name Supply Fan', [i for i in output][0]['value'])
return
def test_separate_objects_build_path(self):
eo = ExpandObjects()
eo.epjson = {}
object_list = [
{
"Object:Type1": {
"Fields": {
"name": "ObjectName1",
"field_1": "val_1",
"field_2": "val_2"
},
"Connectors": {
"AirLooop": {
"Inlet": "field_1",
"Outlet": "field_2"
}
}
}
},
{
"Object:Type2": {
"name": "ObjectName2",
"field_3": "val_3",
"field_4": "val_4"
}
}
]
output = eo._parse_build_path(object_list=object_list)
self.assertEqual('Object:Type1', list(output[0].keys())[0])
self.assertEqual('val_3', eo.epjson['Object:Type2']['ObjectName2']['field_3'])
return
def test_reject_complex_inputs_build_path_reference_no_build_path(self):
es = ExpandSystem(template=mock_system_template)
es.build_path = {}
output = es._resolve_complex_input(
epjson={},
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": -1,
"ValueLocation": "Outlet"
}
}
)
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Build Path complex input was specified'):
self.assertEqual('template_name Supply Fan Outlet', [i for i in output][0]['value'])
return
def test_reject_complex_inputs_build_path_missing_inputs(self):
es = ExpandSystem(template=mock_system_template)
output = es._resolve_complex_input(
epjson={},
build_path=mock_build_path,
field_name="field_1",
input_value={
"BuildPathReference": {
"ValueLocation": "Outlet"
}
}
)
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Object field could not be'):
self.assertEqual('template_name Supply Fan Outlet', [i for i in output][0]['value'])
output = es._resolve_complex_input(
epjson={},
build_path=mock_build_path,
field_name="field_1",
input_value={
"BuildPathReference": {
"Location": -1
}
}
)
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Object field could not be'):
self.assertEqual('template_name Supply Fan Outlet', [i for i in output][0]['value'])
return
def test_reject_complex_inputs_build_path_bad_location(self):
es = ExpandSystem(template=mock_system_template)
output = es._resolve_complex_input(
epjson={},
build_path=mock_build_path,
field_name="field_1",
input_value={
"BuildPathReference": {
"ValueLocation": "Outlet",
"Location": 10
}
}
)
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Object field could not be'):
self.assertEqual('template_name Supply Fan Outlet', [i for i in output][0]['value'])
return
def test_reject_complex_inputs_build_path_bad_value(self):
es = ExpandSystem(template=mock_system_template)
output = es._resolve_complex_input(
epjson={},
build_path=mock_build_path,
field_name="field_1",
input_value={
"BuildPathReference": {
"ValueLocation": "Bad_value",
"Location": -1
}
}
)
with self.assertRaisesRegex(PyExpandObjectsYamlStructureException, 'Object field could not be'):
self.assertEqual('template_name Supply Fan Outlet', [i for i in output][0]['value'])
return
def test_complex_inputs_compact_schedule(self):
eo = ExpandObjects()
output = eo.resolve_objects(epjson={
'Schedule:Compact': {
"HVACTemplate-Always12.8": {
'structure': 'Objects:Common:Objects:Schedule:Compact:ALWAYS_VAL',
'insert_values': [12.8, ]
}
}
})
self.assertEqual(
12.8,
output['Schedule:Compact']['HVACTemplate-Always12.8']['data'][-1]['field'])
return
def test_complex_inputs_compact_schedule_full(self):
eo = ExpandObjects()
output = eo.resolve_objects(epjson={
'Schedule:Compact': {
"HVACTemplate-Always12.8": {
'schedule_type_limits_name': 'Any Number',
'data': [
{'field': 'Through 12/31'},
{'field': 'For AllDays'},
{'field': 'Until 24:00'},
{'field': 12.8}
]
}
}
})
self.assertEqual(
12.8,
output['Schedule:Compact']['HVACTemplate-Always12.8']['data'][-1]['field'])
return
def test_complex_inputs_create_schedule_from_transition(self):
es = ExpandSystem(template={
'HVACTemplate:System:VAV': {
'template_name': {
'template_field': 'template_test_value',
'cooling_coil_design_setpoint': 12.8
}
}
})
es.expansion_structure = {
'Objects': {
'Common': {
'Objects': {
'Schedule': {
'Compact': {
'ALWAYS_VAL': {
'name': 'HVACTemplate-Always{}',
'schedule_type_limits_name': 'Any Number',
'data': [
{'field': 'Through 12/31'},
{'field': 'For AllDays'},
{'field': 'Until 24:00'},
{'field': '{}'}
]
}
}
}
}
}
},
'OptionTree': {
'HVACTemplate': {
'System': {
'VAV': {
'BaseObjects': {
'Objects': [
{
'SetpointManager:Scheduled': {
'name': '{} Cooling Supply Air Temp Manager',
'control_variable': 'Temperature'
}
}
],
'Transitions': [
{
"SetpointManager:Scheduled": {
"cooling_coil_design_setpoint": {
'schedule_name': 'HVACTemplate-Always{cooling_coil_design_setpoint}'
}
}
}
]
}
}
}
}
}
}
es._create_objects()
es.epjson = es.resolve_objects(epjson=es.epjson)
self.assertEqual(
12.8,
es.epjson['Schedule:Compact']['HVACTemplate-Always12.8']['data'][-1]['field'])
return
def test_zonehvac_equipmentlist(self):
ez = ExpandZone(template={
'HVACTemplate:Zone:FanCoil': {
'zone_template_name': {
"cooling_coil_type": "ChilledWater",
"heating_coil_type": "HotWater",
'zone_name': 'test_zone'}}})
ez._create_objects()
self.assertEqual(
'test_zone Fan Coil',
ez.epjson['ZoneHVAC:EquipmentList']['test_zone Equipment']['equipment'][0]['zone_equipment_name'])
return
def test_zonehvac_equipmentlist_baseboard(self):
ez = ExpandZone(template={
'HVACTemplate:Zone:FanCoil': {
'zone_template_name': {
"cooling_coil_type": "ChilledWater",
"heating_coil_type": "HotWater",
'zone_name': 'test_zone',
'baseboard_heating_type': 'HotWater'}}})
ez._create_objects()
self.assertEqual(
'test_zone Baseboard Heat',
ez.epjson['ZoneHVAC:EquipmentList']['test_zone Equipment']['equipment'][1]['zone_equipment_name'])
return
def test_zonehvac_equipmentlist_doas(self):
ez = ExpandZone(template={
'HVACTemplate:Zone:FanCoil': {
'zone_template_name': {
"cooling_coil_type": "ChilledWater",
"heating_coil_type": "HotWater",
'zone_name': 'test_zone',
'dedicated_outdoor_air_system_name': 'DOAS'}}})
ez._create_objects()
self.assertEqual(
'test_zone DOAS ATU',
ez.epjson['ZoneHVAC:EquipmentList']['test_zone Equipment']['equipment'][0]['zone_equipment_name'])
return
def test_zonehvac_equipmentlist_doas_baseboard(self):
ez = ExpandZone(template={
'HVACTemplate:Zone:FanCoil': {
'zone_template_name': {
"cooling_coil_type": "ChilledWater",
"heating_coil_type": "HotWater",
'zone_name': 'test_zone',
'baseboard_heating_type': 'HotWater',
'dedicated_outdoor_air_system_name': 'DOAS'}}})
ez._create_objects()
self.assertEqual(
'test_zone DOAS ATU',
ez.epjson['ZoneHVAC:EquipmentList']['test_zone Equipment']['equipment'][0]['zone_equipment_name'])
self.assertEqual(
'test_zone Baseboard Heat',
ez.epjson['ZoneHVAC:EquipmentList']['test_zone Equipment']['equipment'][-1]['zone_equipment_name'])
return
``` |
{
"source": "johngrantuk/PiUpDue",
"score": 4
} |
#### File: PiUpDue/piupdue/ArduinoFlashSerial.py
```python
import time, binascii, sys
import ArduinoFlashHardValues, ArduinoFlashXmodem
def ReadWord(SerialPort, WriteAddress, Log):
""" Writes to address and flips the received bits and stores in a 32 bit variable. """
Log.Log("ReadWord(), Writing: " + WriteAddress)
SerialPort.write(WriteAddress)
data = ReadSerial(SerialPort, Log)
value = (ord(data[3]) << 24 | ord(data[2]) << 16 | ord(data[1]) << 8 | ord(data[0]) << 0) # ord() gives value of byte. Incase where data = {1,2,3,4}, value = 00000100 00000011 00000010 00000001, ie 4,3,2,1
Log.Log("Value: " + str(value) + ", " + hex(value))
return value
def ReadSerialByte(ser, Log):
""" Reads one byte of data from serial port. """
toBeRead = ser.inWaiting()
if toBeRead > 0:
data = ser.read(1)
Log.Log("Read data: " + data + ", " + binascii.hexlify(data))
return data
else:
#Log.Log("No Data To Be Read")
return ""
def ReadSerial(ser, Log):
""" Reads any data in serial port. """
time.sleep(2)
while 1:
toBeRead = ser.inWaiting()
Log.Log("ReadSerial(), " + str(toBeRead) + " bytes in buffer.")
if toBeRead > 0:
data = ser.read(toBeRead)
hexData = ":".join("{:02x}".format(ord(c)) for c in data) # Just for display purposes.
Log.Log("Read data: " + data + "\nIn hex: " + hexData)
return data
else:
Log.Log("No Data To Be Read")
break
def WriteWord(SerialPort, Addr, Value, Log):
""" Converts addr and value into message of format: W{addr},{value}# WXXXXXXXX,XXXXXXXX# and writes to serial port."""
addr = '{0:08X}'.format(Addr)
value = '{0:08X}'.format(Value)
output = "W" + addr + "," + value + '#' # W20001020,20010000#
Log.Log("Writing Word: " + output)
if ArduinoFlashHardValues.LiveWrite:
SerialPort.write(output)
def Write(SerialPort, Log, Addr, Data, DataSize, IsNativePort):
"""
Converts addr and value into message of format: S{addr},{value}# SXXXXXXXX,XXXXXXXX# and writes to serial port.
Then uses WriteXmodem to transfer data.
"""
addr = '{0:08X}'.format(Addr)
size = '{0:08X}'.format(DataSize)
output = "S" + addr + "," + size + '#'
Log.Log("Writing: " + output)
if ArduinoFlashHardValues.LiveWrite:
SerialPort.write(output)
if IsNativePort:
ArduinoFlashXmodem.WriteBinary(SerialPort, Log, Data, len(Data))
else:
ArduinoFlashXmodem.WriteXmodem(SerialPort, Log, Data, DataSize)
def Go(SerialPort, Log, Addr):
""" Converts addr into message of format: G{addr}# GXXXXXXXX# and writes to serial port."""
addr = '{0:08X}'.format(Addr)
output = "G" + addr + "#" #snprintf((char*) cmd, sizeof(cmd), "G%08X#", addr); G20001020#
Log.Log("Serial.Go(): " + output)
if ArduinoFlashHardValues.LiveWrite:
SerialPort.write(output)
```
#### File: PiUpDue/piupdue/ArduinoFlashXmodem.py
```python
import ctypes, time
import ArduinoFlashHardValues, ArduinoFlashSerial
BLK_SIZE = 128
MAX_RETRIES = 20
code = [0x09, 0x48, 0x0a, 0x49, 0x0a, 0x4a, 0x02, 0xe0, 0x08, 0xc9, 0x08, 0xc0, 0x01, 0x3a, 0x00, 0x2a,0xfa, 0xd1, 0x04, 0x48, 0x00, 0x28, 0x01, 0xd1, 0x01, 0x48, 0x85, 0x46, 0x70, 0x47, 0xc0, 0x46,0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,0x00, 0x00, 0x00, 0x00]
def crc16Calc(Data, StartElement, Len):
""" Calculates 2 Byte CRC """
crc16Table = [0x0000,0x1021,0x2042,0x3063,0x4084,0x50a5,0x60c6,0x70e7,\
0x8108,0x9129,0xa14a,0xb16b,0xc18c,0xd1ad,0xe1ce,0xf1ef,\
0x1231,0x0210,0x3273,0x2252,0x52b5,0x4294,0x72f7,0x62d6,\
0x9339,0x8318,0xb37b,0xa35a,0xd3bd,0xc39c,0xf3ff,0xe3de,\
0x2462,0x3443,0x0420,0x1401,0x64e6,0x74c7,0x44a4,0x5485,\
0xa56a,0xb54b,0x8528,0x9509,0xe5ee,0xf5cf,0xc5ac,0xd58d,\
0x3653,0x2672,0x1611,0x0630,0x76d7,0x66f6,0x5695,0x46b4,\
0xb75b,0xa77a,0x9719,0x8738,0xf7df,0xe7fe,0xd79d,0xc7bc,\
0x48c4,0x58e5,0x6886,0x78a7,0x0840,0x1861,0x2802,0x3823,\
0xc9cc,0xd9ed,0xe98e,0xf9af,0x8948,0x9969,0xa90a,0xb92b,\
0x5af5,0x4ad4,0x7ab7,0x6a96,0x1a71,0x0a50,0x3a33,0x2a12,\
0xdbfd,0xcbdc,0xfbbf,0xeb9e,0x9b79,0x8b58,0xbb3b,0xab1a,\
0x6ca6,0x7c87,0x4ce4,0x5cc5,0x2c22,0x3c03,0x0c60,0x1c41,\
0xedae,0xfd8f,0xcdec,0xddcd,0xad2a,0xbd0b,0x8d68,0x9d49,\
0x7e97,0x6eb6,0x5ed5,0x4ef4,0x3e13,0x2e32,0x1e51,0x0e70,\
0xff9f,0xefbe,0xdfdd,0xcffc,0xbf1b,0xaf3a,0x9f59,0x8f78,\
0x9188,0x81a9,0xb1ca,0xa1eb,0xd10c,0xc12d,0xf14e,0xe16f,\
0x1080,0x00a1,0x30c2,0x20e3,0x5004,0x4025,0x7046,0x6067,\
0x83b9,0x9398,0xa3fb,0xb3da,0xc33d,0xd31c,0xe37f,0xf35e,\
0x02b1,0x1290,0x22f3,0x32d2,0x4235,0x5214,0x6277,0x7256,\
0xb5ea,0xa5cb,0x95a8,0x8589,0xf56e,0xe54f,0xd52c,0xc50d,\
0x34e2,0x24c3,0x14a0,0x0481,0x7466,0x6447,0x5424,0x4405,\
0xa7db,0xb7fa,0x8799,0x97b8,0xe75f,0xf77e,0xc71d,0xd73c,\
0x26d3,0x36f2,0x0691,0x16b0,0x6657,0x7676,0x4615,0x5634,\
0xd94c,0xc96d,0xf90e,0xe92f,0x99c8,0x89e9,0xb98a,0xa9ab,\
0x5844,0x4865,0x7806,0x6827,0x18c0,0x08e1,0x3882,0x28a3,\
0xcb7d,0xdb5c,0xeb3f,0xfb1e,0x8bf9,0x9bd8,0xabbb,0xbb9a,\
0x4a75,0x5a54,0x6a37,0x7a16,0x0af1,0x1ad0,0x2ab3,0x3a92,\
0xfd2e,0xed0f,0xdd6c,0xcd4d,0xbdaa,0xad8b,0x9de8,0x8dc9,\
0x7c26,0x6c07,0x5c64,0x4c45,0x3ca2,0x2c83,0x1ce0,0x0cc1,\
0xef1f,0xff3e,0xcf5d,0xdf7c,0xaf9b,0xbfba,0x8fd9,0x9ff8,\
0x6e17,0x7e36,0x4e55,0x5e74,0x2e93,0x3eb2,0x0ed1,0x1ef0]
crc16 = ctypes.c_uint16(0).value
for x in range(StartElement, Len + StartElement):
crc16 = (crc16 << 8) ^ ctypes.c_uint16(crc16Table[((crc16 >> 8) ^ ctypes.c_uint8(Data[x]).value) & 0xff]).value
crc16 = ctypes.c_uint16(crc16).value
return crc16
def Crc16Add(Data):
""" Calculates two byte CRC and adds to end of Data array."""
crc16 = ctypes.c_uint16(crc16Calc(Data, 3, BLK_SIZE)).value
Data[BLK_SIZE + 3] = (crc16 >> 8) & 0xff
Data[BLK_SIZE + 4] = crc16 & 0xff
def WriteBinary(SerialPort, Log, Data, Size):
""" Writes all data to serial port. _port->write(buffer, size) != size"""
Log.Log("WriteBinary()...")
Log.Log("Writing bytes: " + str(Size))
blkStr = ""
for e in Data:
blkStr = blkStr + chr(e)
if ArduinoFlashHardValues.LiveWrite:
bytesWritten = SerialPort.write(blkStr)
Log.Log("Bytes written: " + str(bytesWritten))
if bytesWritten != Size:
raise Exception("WriteBinary() Exception.")
def WriteXmodem(SerialPort, log, Data, Size):
""" Main implementation of Xmodem(). See Samba.cpp LN392 """
log.Log("\n\nWriteXmodem():")
blkNum = 1 # Block numbers haven't been implemented yet.
blk = [ctypes.c_uint8(0).value] * (BLK_SIZE + 5) # Initial decleration of blk array.
for retries in range(0, MAX_RETRIES):
log.Log("Waiting for start char, C.")
if ArduinoFlashHardValues.LiveWrite:
receivedMsg = ArduinoFlashSerial.ReadSerialByte(SerialPort, log)
else:
receivedMsg = 'C'
if receivedMsg == 'C':
break
if retries == MAX_RETRIES - 1:
raise Exception("WriteXmodem() Waited Max For Start Char.")
codeNo = 0
bytesWritten = 0
while Size > 0:
blk[0] = ctypes.c_uint8(0x01).value # First three bytes are pre-filled.
blk[1] = ctypes.c_uint8(blkNum & 0xff).value
blk[2] = ctypes.c_uint8(~(blkNum & 0xff)).value
log.Log("len(Data): " + str(len(Data)) + ", min: " + str(min(Size, BLK_SIZE)))
log.Log("!. Code No: " + str(codeNo))
if codeNo < len(Data):
for x in range(3, min(len(Data), BLK_SIZE) + 3): # Fills blk array (starting from 3rd element) with Data bytes.
blk[x] = Data[codeNo]
codeNo = codeNo + 1
log.Log("2. Code No: " + str(codeNo))
if Size < BLK_SIZE: # Fills any unwritten elements with 0 BLK_SIZE=128.
log.Log("Filling in 0s.")
for x in range(3 + len(Data), BLK_SIZE):
blk[x] = 0x00
Crc16Add(blk) # Calculates two byte CRC and adds to end of blk.
i = 0 # For display purpose.
log.Log("Converting Xmodem CRC Code into string for serial transfer.")
blkStr = ""
for e in blk:
blkStr = blkStr + chr(e)
line = 0
i = 0 # For display purpose.
dataStr = "Writing Data: \n0. "
for e in blk:
dataStr = dataStr + hex(e) + " "
if (i + 1)%16 == 0:
line += 1
dataStr = dataStr + "\n" + str(line) + ". "
i += 1
log.Log(dataStr)
for retries in range(0, MAX_RETRIES):
log.Log("WriteXmodem(), writing Xmodem CRC Code data...")
if ArduinoFlashHardValues.LiveWrite:
bytesWritten = SerialPort.write(blkStr)
# Write to the SAM-BA
log.Log("WriteXmodem(), xModem CRC Code data written. " + str(bytesWritten))
log.Log("WriteXmodem(), checking for ACK...")
time.sleep(1)
if ArduinoFlashHardValues.LiveWrite:
receivedMsg = ArduinoFlashSerial.ReadSerialByte(SerialPort, log)
else:
receivedMsg = '\x06'
if receivedMsg == '\x06':
break
time.sleep(3)
if retries == MAX_RETRIES - 1:
raise Exception("WriteXmodem() Waited While Writing Blk.")
log.Log("Size: " + str(Size))
Size = Size - BLK_SIZE
blkNum = blkNum + 1
log.Log("Size: " + str(Size))
for retries in range(0, MAX_RETRIES):
log.Log("ACK receive, writing EOT...")
SerialPort.write('\x04') # 0x04 = EOT
log.Log("Waiting for ACK.")
time.sleep(1)
if ArduinoFlashHardValues.LiveWrite:
receivedMsg = ArduinoFlashSerial.ReadSerialByte(SerialPort, log)
else:
receivedMsg = '\x06'
if receivedMsg == '\x06':
log.Log("ACK received.")
log.Log("WriteXmodem() Complete.")
break
time.sleep(3)
if retries == MAX_RETRIES - 1:
raise Exception("WriteXmodem() Waited While Writing EOT.")
log.Log("WriteXmodem() Complete.\n")
``` |
{
"source": "johngrasty/salt",
"score": 2
} |
#### File: unit/modules/git_test.py
```python
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import git
class GitTestCase(TestCase):
'''
TestCase for salt.modules.git module
'''
def test_http_basic_authentication(self):
'''
Test that HTTP Basic auth works as intended.
'''
# ((user, pass), expected) tuples
test_inputs = [
((None, None), 'https://example.com'),
(('user', None), 'https://[email protected]'),
(('user', 'pass'), 'https://user:[email protected]'),
]
for (user, password), expected in test_inputs:
kwargs = {
'url': 'https://example.com',
'https_user': user,
'https_pass': password,
}
result = git._add_http_basic_auth(**kwargs)
self.assertEqual(result, expected)
def test_https_user_and_pw_is_confidential(self):
sensitive_outputs = (
'https://[email protected]',
'https://user:[email protected]',
)
sanitized = 'https://<redacted>@example.com'
for sensitive_output in sensitive_outputs:
result = git._remove_sensitive_data(sensitive_output)
self.assertEqual(result, sanitized)
def test_git_ssh_user_is_not_treated_as_sensitive(self):
not_sensitive_outputs = (
'ssh://[email protected]',
)
for not_sensitive_output in not_sensitive_outputs:
result = git._remove_sensitive_data(not_sensitive_output)
self.assertEqual(result, not_sensitive_output)
if __name__ == '__main__':
from integration import run_tests
run_tests(GitTestCase, needs_daemon=False)
``` |
{
"source": "JohnGrey0/bestbuy-item-tracker",
"score": 3
} |
#### File: JohnGrey0/bestbuy-item-tracker/scraper.py
```python
import requests
import time
from bs4 import BeautifulSoup
from datetime import datetime
from helpers import get_useragent
class Scraper():
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_page_html(self, url):
headers = {
"User-Agent": get_useragent()}
page = requests.get(url, headers=headers)
if page.status_code == 200:
return page.content, page.status_code
return None, page.status_code
def check_item_in_stock(self, page_html, tag, data):
soup = BeautifulSoup(page_html, 'html.parser')
out_of_stock_divs = soup.findAll(tag, data)
return len(out_of_stock_divs) != 0
def get_page_attributes(self, page_html, tag, data, index):
soup = BeautifulSoup(page_html, 'html.parser')
attribute = soup.findAll(tag, data)
return attribute[0].contents[index]
def get_item_html(self, url):
status_code = 0
page_html = None
counter = 0
max_retries = 24
sleep = 300
while status_code != 200 and page_html is None and counter < max_retries:
counter += 1
try:
print("{} - Scraper - Checking products for availability attempt #{}...".format(str(datetime.now()), counter))
page_html, status_code = self.get_page_html(url)
except Exception as e:
time.sleep(sleep)
return page_html
``` |
{
"source": "johngriebel/nbahub",
"score": 3
} |
#### File: nbahub/nbahub/excel_handler.py
```python
from openpyxl import Workbook
from openpyxl.styles import Font, PatternFill
from openpyxl.styles.borders import Border, Side
from nba_stats_api.constants import (GENERAL_STAT_TYPES,
PLAYTYPE_COLUMNS,
ALL_PLAY_TYPES,
VIDEO_MEASURES,
VIDEO_ENDPOINT)
class ExcelGenerator:
def __init__(self, player_stats, season):
self.workbook = Workbook()
self.worksheet = self.workbook.active
self.current_column = None
self.current_row = None
self.video_row = 31
self.video_column = 17
self.player_stats = player_stats
self.season = season
self.header_font = Font(name="Calibri",
size=14,
bold="True")
self.header_fill = PatternFill(start_color="ff8080",
end_color="ff8080",
fill_type="solid")
self.row_fill = PatternFill(start_color="b3d9ff",
end_color="b3d9ff",
fill_type="solid")
self.all_borders = Border(left=Side(style='thin'),
right=Side(style='thin'),
top=Side(style='thin'),
bottom=Side(style='thin'))
def _set_current_cell_value(self, value, row=None, column=None, **kwargs):
self.worksheet.cell(row=row or self.current_row,
column=column or self.current_column,
value=value)
for attr in kwargs:
setattr(self.worksheet.cell(row=row or self.current_row, column=column or self.current_column),
attr, kwargs.get(attr))
def _general_stats(self):
for stat_type in GENERAL_STAT_TYPES:
self._set_current_cell_value(value=stat_type,
font=self.header_font)
self.current_row += 1
if self.player_stats.get(stat_type) is not None:
for header_column in self.player_stats[stat_type]:
self._set_current_cell_value(value=header_column,
font=self.header_font,
fill=self.header_fill,
border=self.all_borders)
value = self.player_stats[stat_type][header_column]
self._set_current_cell_value(value=value,
row=self.current_row + 1,
fill=self.row_fill,
border=self.all_borders)
self.current_column += 1
self.current_row += 3
self.current_column = 1
self.current_row += 2
def _play_types(self):
self._set_current_cell_value(value="Play Type",
font=self.header_font,
fill=self.header_fill,
border=self.all_borders)
self.current_column += 1
for column in PLAYTYPE_COLUMNS:
self._set_current_cell_value(value=column,
font=self.header_font,
fill=self.header_fill,
border=self.all_borders)
self.current_column += 1
self.current_row += 1
self.current_column = 1
self.video_row = self.current_row
for play_type in ALL_PLAY_TYPES:
self._set_current_cell_value(value=play_type,
font=self.header_font,
fill=self.header_fill,
border=self.all_borders)
self.current_column += 1
for column in self.player_stats.get(play_type, []):
self._set_current_cell_value(value=self.player_stats[play_type][column],
fill=self.row_fill,
border=self.all_borders)
self.current_column += 1
self.video_column = self.current_column
self.current_column = 1
self.current_row += 1
self.video_column += 1
def _video(self):
self.current_row = self.video_row
self.current_column = self.video_column
self._set_current_cell_value(value="Video (NBA.com archive)",
font=self.header_font,
fill=self.header_fill)
self.current_row += 1
for measure in VIDEO_MEASURES:
url = VIDEO_ENDPOINT.format(player_id=self.player_stats['BasicInfo']['PLAYER_ID'],
measure=measure, season=self.season,
season_type="Regular+Season",
)
link_background_fill = PatternFill(start_color="1aff8c",
end_color="1aff8c",
fill_type="solid")
self._set_current_cell_value(value=VIDEO_MEASURES[measure],
fill=link_background_fill)
self.worksheet.cell(row=self.current_row,
column=self.current_column).hyperlink = url
self.current_row += 1
def generate_workbook(self, auto_save=True, path="outputs"):
player_name = self.player_stats['BasicInfo']['PLAYER_NAME']
print(f"Creating Excel Spreadsheet for {player_name}. Autosave: {auto_save}")
self.current_row = 1
self.current_column = 1
header_value = (self.player_stats['BasicInfo']['PLAYER_NAME'] +
" {season} stats and video hub".format(season=self.season))
self._set_current_cell_value(value=header_value,
font=self.header_font)
self.current_row = 4
self._general_stats()
self._play_types()
self._video()
if auto_save:
self.workbook.save(f"{path}/{player_name}.xlsx")
```
#### File: nbahub/nbahub/nbahub_cli.py
```python
import json
import click
from nba_stats_api.utils import update_all_player_stats, DecimalEncoder
from nbahub.excel_handler import ExcelGenerator
@click.group()
def cli():
pass
@cli.command()
@click.option("--season", default="2016-17", help="The season you're interested in. Must be specified in "
"YYYY-YY format. Currently defaults to 2016-17")
@click.option("--format", type=click.Choice(["excel", "json"]))
@click.option("--output", default="outputs", help="The directory you would like the output to be saved in."
" This can be specified as either a full or local path.")
def update_all(season, format, output):
print(f"Updating all player statistics for {season}")
# PLAYER_ID - > {'PerGame': {<stats here>}, 'Totals': <stats>, etc}
player_stats_dict = update_all_player_stats(season)
for player in player_stats_dict:
this_player_stats = player_stats_dict[player]
player_name = this_player_stats['BasicInfo']['PLAYER_NAME']
if format == "json":
with open(f"{output}/{player_name}.json", "w") as stat_file:
stat_file.write(json.dumps(player_stats_dict[player],
cls=DecimalEncoder))
stat_file.close()
elif format == "excel":
excel_generator = ExcelGenerator(this_player_stats, season=season)
excel_generator.generate_workbook(path=output)
def main():
cli()
``` |
{
"source": "JohnGriffiths/ConWhAt",
"score": 2
} |
#### File: conwhat/utils/readers.py
```python
import os,sys,yaml,h5py
import numpy as np,networkx as nx, pandas as pd
import nibabel as nib, nilearn as nl
from nibabel.affines import apply_affine
from dipy.io import Dpy
import indexed_gzip as igzip
# Atlas base dir
abd = os.path.split(__file__)[0] + '/../data'
def load_vol_file_mappings(atlas_name=None,atlas_dir=None):
print 'loading file mapping'
if atlas_dir == None: atlas_dir = os.path.join(abd,atlas_name)
mappings = pd.read_csv(atlas_dir + '/mappings.txt', sep=',')
return mappings,atlas_dir
def load_vol_bboxes(atlas_name=None,atlas_dir=None):
print 'loading vol bbox'
if atlas_dir == None: atlas_dir = os.path.join(abd,atlas_name)
bbox = pd.read_csv(atlas_dir + '/bounding_boxes.txt', sep=',')
return bbox
def load_connectivity(atlas_name=None,atlas_dir=None,weights_name='weights'):
print 'loading connectivity'
if not atlas_dir: atlas_dir = os.path.join(abd,atlas_name)
# Mandatory files
ws_file = '%s/%s.txt' %(atlas_dir,weights_name)
rls_file = '%s/region_labels.txt' % atlas_dir
ws = np.loadtxt(ws_file)
rls = [l[:-1] for l in open(rls_file, 'r').readlines()]
# Optional files
tls_file = '%s/tract_lengths.txt' % atlas_dir
rxyzs_file = '%s/region_xyzs.txt' % atlas_dir
rnii_file = '%s/region_masks.nii.gz' % atlas_dir
hs_file = '%s/hemispheres.txt' % atlas_dir
ctx_file = '%s/cortex.txt' % atlas_dir
rmfslh_file = '%s/region_mapping_fsav_lh.txt' % atlas_dir
rmfsrh_file = '%s/region_mapping_fsav_rh.txt' % atlas_dir
tls,rxyzs,rnii,ctxi,hs,rmfslh,rmfsrh = None,None,None,None,None,None,None
if os.path.isfile(tls_file): itls = np.loadtxt(tls_file)
if os.path.isfile(rxyzs_file): rxyzs = np.loadtxt(rxyzs_file)
if os.path.isfile(rnii_file): rnii = nib.load(rnii_file)
if os.path.isfile(hs_file): hs = np.loadtxt(hs_file)
if os.path.isfile(ctx_file): ctx = np.loadtxt(ctx_file)
if os.path.isfile(rmfslh_file): rmfslh =np.loadtxt(rmfslh_file)
if os.path.isfile(rmfsrh_file): rmfsrh = np.loadtxt(rmfsrh_file)
return ws,rls,tls,rxyzs,rnii,ctx,hs,rmfslh,rmfsrh
def make_nx_graph(vfms,bboxes,weights,region_labels,hemis,cortex):
G = nx.Graph()
# add node info
for node_it,node in enumerate(region_labels):
rl = region_labels[node_it]
hemi = hemis[node_it]
ctx = cortex[node_it]
G.add_node(node_it, **{'region_label': rl,
'hemisphere': hemi,
'cortex': ctx})
# add edge info
for idx in vfms.index:
vfm = vfms.ix[idx]
name = vfm['name']
# Allow either '33-55' or '33_to_55' naming conventions
if '_to_' in name:
roi1,roi2 = name.split('_to_')
else:
roi1,roi2 = name.split('-')
roi1 = int(roi1); roi2 = int(roi2)
ad = vfm.to_dict()
ad.update(bboxes.ix[idx])
ad['idx'] = idx
ad['weight'] = weights[roi1,roi2]
n1,n2 = G.node[roi1],G.node[roi2]
# (ibid...)
if '_to_' in name:
fullname = n1['region_label'] + '_to_' + n2['region_label']
else:
fullname = n1['region_label'] + '-' + n2['region_label']
ad['fullname'] = fullname
G.add_edge(roi1,roi2,**ad)
return G
def load_stream_file_mappings(atlas_name=None,atlas_dir=None):
print 'loading streamline file mappings'
if not atlas_dir: atlas_dir = os.path.join(abd,atlas_name)
F = h5py.File(atlas_dir + '/mappings.h5', 'r')
KVs = {k: v.value.astype(int) for k,v in F.items()}
F.close()
mappings = pd.DataFrame(np.array(KVs.values()),
index=KVs.keys())
mappings.columns = ['idxlist']
mappings.index.names = ['name']
mappings = mappings.reset_index()
return mappings,atlas_dir
def load_stream_file_mappings_multifile(atlas_name=None,atlas_dir=None):
print 'loading mult-file streamline file mappings'
if not atlas_dir: atlas_dir = os.path.join(abd,atlas_name)
# Difference from above is that the keys are now (sub,cnxn), rather than cnxn
F = h5py.File(atlas_dir + '/mappings_multifile.h5', 'r')
KVs = {k: v.value for k,v in F.items()}
F.close()
mappings = pd.DataFrame(np.array(KVs.values()),
index=KVs.keys())
mappings.columns = ['idxlist']
mappings.index.names = ['sub','name']
mappings = mappings.reset_index()
return mappings,atlas_dir
# (this is identical to load vox bboxes. Remove both
# and replace with single func?)
def load_stream_bboxes(atlas_name=None,atlas_dir=None):
print 'loading stream bbox'
if not atlas_dir: atlas_dir = os.path.join(abd,atlas_name)
bbox = pd.read_csv(atlas_dir + '/bounding_boxes.txt', sep=',',
names=['xmin', 'xmax', 'ymin', 'ymax', 'zmin', 'zmax'])
return bbox
def make_streams_nx_graph(sfms,bboxes,weights,region_labels,hemis,cortex):
# I THINK THIS CAN BE THE SAME FUNC FOR BOTH \
# VOLUMETRIC AND STREAMLINETRIC
# ...just writing one for streamlines first to get clear...
G = nx.Graph()
# add node info
for node_it,node in enumerate(region_labels):
rl = region_labels[node_it]
hemi = hemis[node_it]
ctx = cortex[node_it]
G.add_node(node_it, **{'region_label': rl,
'hemisphere': hemi,
'cortex': ctx})
# add edge info
for idx in sfms.index:
sfm = sfms.ix[idx]
name = sfm['name']
# Allow either '33-55' or '33_to_55' naming conventions
if '_to_' in name:
roi1,roi2 = name.split('_to_')
else:
roi1,roi2 = name.split('-')
roi1 = int(roi1); roi2 = int(roi2)
ad = sfm.to_dict()
ad.update(bboxes.ix[idx])
ad['idx'] = idx
ad['weight'] = weights[roi1,roi2]
n1,n2 = G.node[roi1],G.node[roi2]
# (ibid...)
if '_to_' in name:
fullname = n1['region_label'] + '_to_' + n2['region_label']
else:
fullname = n1['region_label'] + '-' + n2['region_label']
ad['fullname'] = fullname
G.add_edge(roi1,roi2,**ad)
def igzip4dnii(fname,inds3d,
inds0d='all',inds1d='all',inds2d='all',
atlas_name=None,
atlas_dir=None):
# If atlas dir given, file is assumed to be in there
if atlas_dir:
fname = '%s/%s' %(atlas_dir,fname)
else:
# If atlas dir not given but atlas name is given, assumes path is relative
# to local conwhat atlas dir
if atlas_name:
fname = '%s/%s/%s' %(abd,atlas_name,fname)
# Here we are usin 4MB spacing between
# seek points, and using a larger read
# buffer (than the default size of 16KB).
fobj = igzip.IndexedGzipFile(
filename=fname,#'big_image.nii.gz',
spacing=4194304,
readbuf_size=131072)
# Create a nibabel image using
# the existing file handle.
fmap = nib.Nifti1Image.make_file_map()
fmap['image'].fileobj = fobj
image = nib.Nifti1Image.from_file_map(fmap)
if inds3d == 'N/A' or np.isnan(inds3d):
dims0,dims1,dims2 = image.shape
dat = np.squeeze(image.dataobj[:,:,:])
else:
# Use the image ArrayProxy to access the
# data - the index will automatically be
# built as data is accessed.
dims0,dims1,dims2,dims3 = image.shape
#if inds0d == 'all': inds0d = range(dims0)
#if inds1d == 'all': inds1d = range(dims1)
#if inds2d == 'all': inds2d = range(dims2)
dat = np.squeeze(image.dataobj[:,:,:,int(inds3d)])
#dat = np.squeeze(image.dataobj[inds0d,inds1d,inds2d,int(inds3d)])
#if type(inds3d) == int: # len(inds3d) == 1:
# dat = np.squeeze(image.dataobj[inds0d,inds1d,inds2d,int(inds3d)])
#else:
# dat = np.array([(image.dataobj[inds0d,inds1d,inds2d,int(i3)]) for i3 in inds3d])
# dat = dat.reshape([dims[1],dims[2],dims[3],dims[0]])
return dat
def dpy_to_trk(dpy_file,ref,outfile,inds='all'):
if os.path.isfile(ref):
ref_img = nib.load(ref)
else:
ref_img = ref
# Make trackvis header
hdr = nib.trackvis.empty_header()
hdr['voxel_size'] = ref_img.get_header().get_zooms()
hdr['dim'] = ref_img.shape
hdr['voxel_order'] = "LAS"#"RAS"
hdr['vox_to_ras'] = ref_img.Affine
zooms = ref_img.header.get_zooms()
# Load streamlines
D = Dpy(dpy_file, 'r')
if inds == 'all':
dpy_streams = D.read_tracks()
else:
dpy_streams = D.read_tracksi(inds)
D.close()
# Convert to trackvis space + format
[apply_affine(hdr['vox_to_ras'], s*zooms) for s in dpy_streams]
trk_streams = [(s,None,None) for s in dpy_streams]
nib.trackvis.write(outfile,trk_streams,hdr)
``` |
{
"source": "JohnGriffiths/dipy",
"score": 3
} |
#### File: dipy/align/__init__.py
```python
import numpy as np
floating = np.float32
class Bunch(object):
def __init__(self, **kwds):
r"""A 'bunch' of values (a replacement of Enum)
This is a temporary replacement of Enum, which is not available
on all versions of Python 2
"""
self.__dict__.update(kwds)
VerbosityLevels = Bunch(NONE=0, STATUS=1, DIAGNOSE=2, DEBUG=3)
r""" VerbosityLevels
This enum defines the four levels of verbosity we use in the align
module.
NONE : do not print anything
STATUS : print information about the current status of the algorithm
DIAGNOSE : print high level information of the components involved in the
registration that can be used to detect a failing component.
DEBUG : print as much information as possible to isolate the cause of a bug.
"""
```
#### File: core/tests/test_optimize.py
```python
import numpy as np
import scipy.sparse as sps
import numpy.testing as npt
from dipy.core.optimize import Optimizer, SCIPY_LESS_0_12, sparse_nnls, spdot
import dipy.core.optimize as opt
def func(x):
return x[0]**2 + x[1]**2 + x[2]**2
def func2(x):
return x[0]**2 + 0.5 * x[1]**2 + 0.2 * x[2]**2 + 0.2 * x[3]**2
@npt.dec.skipif(SCIPY_LESS_0_12)
def test_optimize_new_scipy():
opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]), method='Powell')
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))
npt.assert_almost_equal(opt.fopt, 0)
opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]), method='L-BFGS-B',
options={'maxcor': 10, 'ftol': 1e-7,
'gtol': 1e-5, 'eps': 1e-8})
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))
npt.assert_almost_equal(opt.fopt, 0)
npt.assert_equal(opt.evolution, None)
npt.assert_equal(opt.evolution, None)
opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]), method='L-BFGS-B',
options={'maxcor': 10, 'ftol': 1e-7,
'gtol': 1e-5, 'eps': 1e-8},
evolution=False)
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))
npt.assert_almost_equal(opt.fopt, 0)
opt.print_summary()
opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='L-BFGS-B',
options={'maxcor': 10, 'ftol': 1e-7,
'gtol': 1e-5, 'eps': 1e-8},
evolution=True)
npt.assert_equal(opt.evolution.shape, (opt.nit, 4))
opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='Powell',
options={'xtol': 1e-6, 'ftol': 1e-6, 'maxiter': 1e6},
evolution=True)
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]))
@npt.dec.skipif(not SCIPY_LESS_0_12)
def test_optimize_old_scipy():
opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]),
method='L-BFGS-B',
options={'maxcor': 10, 'ftol': 1e-7,
'gtol': 1e-5, 'eps': 1e-8})
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))
npt.assert_almost_equal(opt.fopt, 0)
opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='Powell',
options={'xtol': 1e-6, 'ftol': 1e-6, 'maxiter': 1e6},
evolution=True)
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]))
opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]),
method='L-BFGS-B',
options={'maxcor': 10, 'eps': 1e-8})
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))
npt.assert_almost_equal(opt.fopt, 0)
opt = Optimizer(fun=func, x0=np.array([1., 1., 1.]),
method='L-BFGS-B',
options=None)
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0]))
npt.assert_almost_equal(opt.fopt, 0)
opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='L-BFGS-B',
options={'gtol': 1e-7, 'ftol': 1e-7, 'maxiter': 10000})
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]), 4)
npt.assert_almost_equal(opt.fopt, 0)
opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='Powell',
options={'maxiter': 1e6},
evolution=True)
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]))
opt = Optimizer(fun=func2, x0=np.array([1., 1., 1., 5.]),
method='Powell',
options={'maxiter': 1e6},
evolution=True)
npt.assert_array_almost_equal(opt.xopt, np.array([0, 0, 0, 0.]))
def test_sklearn_linear_solver():
class SillySolver(opt.SKLearnLinearSolver):
def fit(self, X, y):
self.coef_ = np.ones(X.shape[-1])
MySillySolver = SillySolver()
n_samples = 100
n_features = 20
y = np.random.rand(n_samples)
X = np.ones((n_samples, n_features))
MySillySolver.fit(X, y)
npt.assert_equal(MySillySolver.coef_, np.ones(n_features))
npt.assert_equal(MySillySolver.predict(X), np.ones(n_samples) * 20)
def test_nonnegativeleastsquares():
n = 100
X = np.eye(n)
beta = np.random.rand(n)
y = np.dot(X, beta)
my_nnls = opt.NonNegativeLeastSquares()
my_nnls.fit(X, y)
npt.assert_equal(my_nnls.coef_, beta)
npt.assert_equal(my_nnls.predict(X), y)
def test_spdot():
n = 100
m = 20
k = 10
A = np.random.randn(n, m)
B = np.random.randn(m, k)
A_sparse = sps.csr_matrix(A)
B_sparse = sps.csr_matrix(B)
dense_dot = np.dot(A, B)
# Try all the different variations:
npt.assert_array_almost_equal(dense_dot,
spdot(A_sparse, B_sparse).todense())
npt.assert_array_almost_equal(dense_dot, spdot(A, B_sparse))
npt.assert_array_almost_equal(dense_dot, spdot(A_sparse, B))
def test_sparse_nnls():
# Set up the regression:
beta = np.random.rand(10)
X = np.random.randn(1000, 10)
y = np.dot(X, beta)
beta_hat = sparse_nnls(y, X)
beta_hat_sparse = sparse_nnls(y, sps.csr_matrix(X))
# We should be able to get back the right answer for this simple case
npt.assert_array_almost_equal(beta, beta_hat, decimal=1)
npt.assert_array_almost_equal(beta, beta_hat_sparse, decimal=1)
if __name__ == '__main__':
npt.run_module_suite()
```
#### File: dipy/dipy/__init__.py
```python
import sys
if sys.version[0:3] < '2.6':
raise ImportError('Dipy needs Python version 2.6 or above')
from .info import __version__
# Test callable
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
del Tester
# Plumb in version etc info stuff
from .pkg_info import get_pkg_info as _get_pkg_info
def get_info():
from os.path import dirname
return _get_pkg_info(dirname(__file__))
del sys
```
#### File: dipy/io/pickles.py
```python
from ..utils.six.moves import cPickle
def save_pickle(fname,dix):
''' Save `dix` to `fname` as pickle
Parameters
------------
fname : str
filename to save object e.g. a dictionary
dix : str
dictionary or other object
Examples
----------
>>> import os
>>> from tempfile import mkstemp
>>> fd, fname = mkstemp() # make temporary file (opened, attached to fh)
>>> d={0:{'d':1}}
>>> save_pickle(fname, d)
>>> d2=load_pickle(fname)
We remove the temporary file we created for neatness
>>> os.close(fd) # the file is still open, we need to close the fh
>>> os.remove(fname)
See also
----------
dipy.io.pickles.load_pickle
'''
out=open(fname,'wb')
cPickle.dump(dix,out,protocol=cPickle.HIGHEST_PROTOCOL)
out.close()
def load_pickle(fname):
''' Load object from pickle file `fname`
Parameters
------------
fname : str
filename to load dict or other python object
Returns
---------
dix : object
dictionary or other object
Examples
----------
dipy.io.pickles.save_pickle
'''
inp=open(fname,'rb')
dix = cPickle.load(inp)
inp.close()
return dix
```
#### File: io/tests/test_bvectxt.py
```python
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises
from dipy.io.bvectxt import orientation_from_string, reorient_vectors, \
orientation_to_string, reorient_vectors
def test_orientation_from_to_string():
ras = np.array(((0,1), (1,1), (2,1)))
lps = np.array(((0,-1), (1,-1), (2,1)))
asl = np.array(((1,1), (2,1), (0,-1)))
assert_array_equal(orientation_from_string('ras'), ras)
assert_array_equal(orientation_from_string('lps'), lps)
assert_array_equal(orientation_from_string('asl'), asl)
assert_raises(ValueError, orientation_from_string, 'aasl')
assert orientation_to_string(ras) == 'ras'
assert orientation_to_string(lps) == 'lps'
assert orientation_to_string(asl) == 'asl'
def test_reorient_vectors():
bvec = np.arange(12).reshape((3,4))
assert_array_equal(reorient_vectors(bvec, 'ras', 'ras'), bvec)
assert_array_equal(reorient_vectors(bvec, 'ras', 'lpi'), -bvec)
result = bvec[[1,2,0]]
assert_array_equal(reorient_vectors(bvec, 'ras', 'asr'), result)
bvec = result
result = bvec[[1,0,2]]*[[-1],[1],[-1]]
assert_array_equal(reorient_vectors(bvec, 'asr', 'ial'), result)
result = bvec[[1,0,2]]*[[-1],[1],[1]]
assert_array_equal(reorient_vectors(bvec, 'asr', 'iar'), result)
assert_raises(ValueError, reorient_vectors, bvec, 'ras', 'ra')
def test_reorient_vectors():
bvec = np.arange(12).reshape((3,4))
assert_array_equal(reorient_vectors(bvec, 'ras', 'ras'), bvec)
assert_array_equal(reorient_vectors(bvec, 'ras', 'lpi'), -bvec)
result = bvec[[1,2,0]]
assert_array_equal(reorient_vectors(bvec, 'ras', 'asr'), result)
bvec = result
result = bvec[[1,0,2]]*[[-1],[1],[-1]]
assert_array_equal(reorient_vectors(bvec, 'asr', 'ial'), result)
result = bvec[[1,0,2]]*[[-1],[1],[1]]
assert_array_equal(reorient_vectors(bvec, 'asr', 'iar'), result)
assert_raises(ValueError, reorient_vectors, bvec, 'ras', 'ra')
bvec = np.arange(12).reshape((3,4))
bvec = bvec.T
assert_array_equal(reorient_vectors(bvec, 'ras', 'ras', axis=1), bvec)
assert_array_equal(reorient_vectors(bvec, 'ras', 'lpi', axis=1), -bvec)
result = bvec[:, [1,2,0]]
assert_array_equal(reorient_vectors(bvec, 'ras', 'asr', axis=1), result)
bvec = result
result = bvec[:, [1,0,2]]*[-1, 1, -1]
assert_array_equal(reorient_vectors(bvec, 'asr', 'ial', axis=1), result)
result = bvec[:, [1,0,2]]*[-1, 1, 1]
assert_array_equal(reorient_vectors(bvec, 'asr', 'iar', axis=1), result)
```
#### File: io/tests/test_io_gradients.py
```python
from __future__ import division, print_function, absolute_import
import os.path as osp
import tempfile
import numpy as np
import numpy.testing as npt
from dipy.data import get_data
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
def test_read_bvals_bvecs():
fimg, fbvals, fbvecs = get_data('small_101D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bvals, gt.bvals)
npt.assert_array_equal(bvecs, gt.bvecs)
# None should also work as an input:
bvals_none, bvecs_none = read_bvals_bvecs(None, fbvecs)
npt.assert_array_equal(bvecs_none, gt.bvecs)
bvals_none, bvecs_none = read_bvals_bvecs(fbvals, None)
npt.assert_array_equal(bvals_none, gt.bvals)
# Test for error raising with unknown file formats:
nan_fbvecs = osp.splitext(fbvecs)[0] + '.nan' # Nonsense extension
npt.assert_raises(ValueError, read_bvals_bvecs, fbvals, nan_fbvecs)
# Test for error raising with incorrect file-contents:
# These bvecs only have two rows/columns:
new_bvecs1 = bvecs[:, :2]
# Make a temporary file
bv_file1 = tempfile.NamedTemporaryFile(mode='wt')
# And fill it with these 2-columned bvecs:
for x in range(new_bvecs1.shape[0]):
bv_file1.file.write('%s %s\n' %
(new_bvecs1[x][0], new_bvecs1[x][1]))
bv_file1.close()
npt.assert_raises(IOError, read_bvals_bvecs, fbvals, bv_file1.name)
# These bvecs are saved as one long array:
new_bvecs2 = np.ravel(bvecs)
bv_file2 = tempfile.NamedTemporaryFile()
np.save(bv_file2, new_bvecs2)
bv_file2.close()
npt.assert_raises(IOError, read_bvals_bvecs, fbvals, bv_file2.name)
# There are less bvecs than bvals:
new_bvecs3 = bvecs[:-1, :]
bv_file3 = tempfile.NamedTemporaryFile()
np.save(bv_file3, new_bvecs3)
bv_file3.close()
npt.assert_raises(IOError, read_bvals_bvecs, fbvals, bv_file3.name)
# You entered the bvecs on both sides:
npt.assert_raises(IOError, read_bvals_bvecs, fbvecs, fbvecs)
if __name__ == '__main__':
from numpy.testing import run_module_suite
run_module_suite()
```
#### File: dipy/reconst/cache.py
```python
from dipy.core.onetime import auto_attr
class Cache(object):
"""Cache values based on a key object (such as a sphere or gradient table).
Notes
-----
This class is meant to be used as a mix-in::
class MyModel(Model, Cache):
pass
class MyModelFit(Fit):
pass
Inside a method on the fit, typical usage would be::
def odf(sphere):
M = self.model.cache_get('odf_basis_matrix', key=sphere)
if M is None:
M = self._compute_basis_matrix(sphere)
self.model.cache_set('odf_basis_matrix', key=sphere, value=M)
"""
# We use this method instead of __init__ to construct the cache, so
# that the class can be used as a mixin, without having to worry about
# calling the super-class constructor
@auto_attr
def _cache(self):
return {}
def cache_set(self, tag, key, value):
"""Store a value in the cache.
Parameters
----------
tag : str
Description of the cached value.
key : object
Key object used to look up the cached value.
value : object
Value stored in the cache for each unique combination
of ``(tag, key)``.
Examples
--------
>>> def compute_expensive_matrix(parameters):
... # Imagine the following computation is very expensive
... return (p**2 for p in parameters)
>>> c = Cache()
>>> parameters = (1, 2, 3)
>>> X1 = compute_expensive_matrix(parameters)
>>> c.cache_set('expensive_matrix', parameters, X1)
>>> X2 = c.cache_get('expensive_matrix', parameters)
>>> X1 is X2
True
"""
self._cache[(tag, key)] = value
def cache_get(self, tag, key, default=None):
"""Retrieve a value from the cache.
Parameters
----------
tag : str
Description of the cached value.
key : object
Key object used to look up the cached value.
default : object
Value to be returned if no cached entry is found.
Returns
-------
v : object
Value from the cache associated with ``(tag, key)``. Returns
`default` if no cached entry is found.
"""
return self._cache.get((tag, key), default)
def cache_clear(self):
"""Clear the cache.
"""
self._cache = {}
```
#### File: dipy/reconst/dsi.py
```python
import numpy as np
from scipy.ndimage import map_coordinates
from scipy.fftpack import fftn, fftshift, ifftshift
from dipy.reconst.odf import OdfModel, OdfFit, gfa
from dipy.reconst.cache import Cache
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.recspeed import local_maxima, remove_similar_vertices
class DiffusionSpectrumModel(OdfModel, Cache):
def __init__(self,
gtab,
qgrid_size=17,
r_start=2.1,
r_end=6.,
r_step=0.2,
filter_width=32,
normalize_peaks=False):
r""" Diffusion Spectrum Imaging
The theoretical idea underlying this method is that the diffusion
propagator $P(\mathbf{r})$ (probability density function of the average
spin displacements) can be estimated by applying 3D FFT to the signal
values $S(\mathbf{q})$
..math::
:nowrap:
\begin{eqnarray}
P(\mathbf{r}) & = & S_{0}^{-1}\int S(\mathbf{q})\exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{r}
\end{eqnarray}
where $\mathbf{r}$ is the displacement vector and $\mathbf{q}$ is the
wavector which corresponds to different gradient directions. Method
used to calculate the ODFs. Here we implement the method proposed by
Wedeen et. al [1]_.
The main assumption for this model is fast gradient switching and that
the acquisition gradients will sit on a keyhole Cartesian grid in
q_space [3]_.
Parameters
----------
gtab : GradientTable,
Gradient directions and bvalues container class
qgrid_size : int,
has to be an odd number. Sets the size of the q_space grid.
For example if qgrid_size is 17 then the shape of the grid will be
``(17, 17, 17)``.
r_start : float,
ODF is sampled radially in the PDF. This parameters shows where the
sampling should start.
r_end : float,
Radial endpoint of ODF sampling
r_step : float,
Step size of the ODf sampling from r_start to r_end
filter_width : float,
Strength of the hanning filter
References
----------
.. [1] <NAME>. al, "Mapping Complex Tissue Architecture With
Diffusion Spectrum Magnetic Resonance Imaging", MRM 2005.
.. [2] <NAME>. al, "Deconvolution in Diffusion
Spectrum Imaging", Neuroimage, 2010.
.. [3] <NAME>, "Towards an accurate brain tractography", PhD
thesis, University of Cambridge, 2012.
Examples
--------
In this example where we provide the data, a gradient table
and a reconstruction sphere, we calculate generalized FA for the first
voxel in the data with the reconstruction performed using DSI.
>>> from dipy.data import dsi_voxels, get_sphere
>>> data, gtab = dsi_voxels()
>>> sphere = get_sphere('symmetric724')
>>> from dipy.reconst.dsi import DiffusionSpectrumModel
>>> ds = DiffusionSpectrumModel(gtab)
>>> dsfit = ds.fit(data)
>>> from dipy.reconst.odf import gfa
>>> np.round(gfa(dsfit.odf(sphere))[0, 0, 0], 2)
0.11
Notes
------
A. Have in mind that DSI expects gradients on both hemispheres. If your
gradients span only one hemisphere you need to duplicate the data and
project them to the other hemisphere before calling this class. The
function dipy.reconst.dsi.half_to_full_qspace can be used for this
purpose.
B. If you increase the size of the grid (parameter qgrid_size) you will
most likely also need to update the r_* parameters. This is because
the added zero padding from the increase of gqrid_size also introduces
a scaling of the PDF.
C. We assume that data only one b0 volume is provided.
See Also
--------
dipy.reconst.gqi.GeneralizedQSampling
"""
self.bvals = gtab.bvals
self.bvecs = gtab.bvecs
self.normalize_peaks = normalize_peaks
# 3d volume for Sq
if qgrid_size % 2 == 0:
raise ValueError('qgrid_size needs to be an odd integer')
self.qgrid_size = qgrid_size
# necessary shifting for centering
self.origin = self.qgrid_size // 2
# hanning filter width
self.filter = hanning_filter(gtab, filter_width)
# odf sampling radius
self.qradius = np.arange(r_start, r_end, r_step)
self.qradiusn = len(self.qradius)
# create qspace grid
self.qgrid = create_qspace(gtab, self.origin)
b0 = np.min(self.bvals)
self.dn = (self.bvals > b0).sum()
self.gtab = gtab
@multi_voxel_fit
def fit(self, data):
return DiffusionSpectrumFit(self, data)
class DiffusionSpectrumFit(OdfFit):
def __init__(self, model, data):
""" Calculates PDF and ODF and other properties for a single voxel
Parameters
----------
model : object,
DiffusionSpectrumModel
data : 1d ndarray,
signal values
"""
self.model = model
self.data = data
self.qgrid_sz = self.model.qgrid_size
self.dn = self.model.dn
self._gfa = None
self.npeaks = 5
self._peak_values = None
self._peak_indices = None
def pdf(self, normalized=True):
""" Applies the 3D FFT in the q-space grid to generate
the diffusion propagator
"""
values = self.data * self.model.filter
# create the signal volume
Sq = np.zeros((self.qgrid_sz, self.qgrid_sz, self.qgrid_sz))
# fill q-space
for i in range(len(values)):
qx, qy, qz = self.model.qgrid[i]
Sq[qx, qy, qz] += values[i]
# apply fourier transform
Pr = fftshift(np.real(fftn(ifftshift(Sq),
3 * (self.qgrid_sz, ))))
# clipping negative values to 0 (ringing artefact)
Pr = np.clip(Pr, 0, Pr.max())
# normalize the propagator to obtain a pdf
if normalized:
Pr /= Pr.sum()
return Pr
def rtop_signal(self, filtering=True):
""" Calculates the return to origin probability (rtop) from the signal
rtop equals to the sum of all signal values
Parameters
----------
filtering : boolean
default true, perform the hanning filtering
Returns
-------
rtop : float
the return to origin probability
"""
if filtering:
values = self.data * self.model.filter
else:
values = self.data
rtop = values.sum()
return rtop
def rtop_pdf(self, normalized=True):
r""" Calculates the return to origin probability from the propagator, which is
the propagator evaluated at zero (see Descoteaux et Al. [1]_, Tuch [2]_, Wu et al. [3]_)
rtop = P(0)
Parameters
----------
normalized : boolean
default true, normalize the propagator by its sum in order to obtain a pdf
Returns
-------
rtop : float
the return to origin probability
References
----------
.. [1] Descoteaux M. et. al, "Multiple q-shell diffusion propagator
imaging", Medical Image Analysis, vol 15, No. 4, p. 603-621, 2011.
.. [2] <NAME>., "Diffusion MRI of Complex Tissue Structure",
PhD Thesis, 2002.
.. [3] <NAME>. et. al, "Computation of Diffusion Function Measures
in q -Space Using Magnetic Resonance Hybrid Diffusion Imaging",
IEEE TRANSACTIONS ON MEDICAL IMAGING, vol. 27, No. 6, p. 858-865, 2008
"""
Pr = self.pdf(normalized=normalized)
center = self.qgrid_sz // 2
rtop = Pr[center, center, center]
return rtop
def msd_discrete(self, normalized=True):
r""" Calculates the mean squared displacement on the discrete propagator
..math::
:nowrap:
\begin{equation}
MSD:{DSI}=\int_{-\infty}^{\infty}\int_{-\infty}^{\infty}\int_{-\infty}^{\infty} P(\hat{\mathbf{r}}) \cdot \hat{\mathbf{r}}^{2} \ dr_x \ dr_y \ dr_z
\end{equation}
where $\hat{\mathbf{r}}$ is a point in the 3D Propagator space (see Wu et. al [1]_).
Parameters
----------
normalized : boolean
default true, normalize the propagator by its sum in order to obtain a pdf
Returns
-------
msd : float
the mean square displacement
References
----------
.. [1] <NAME>. et. al, "Hybrid diffusion imaging", NeuroImage, vol 36,
p. 617-629, 2007.
"""
Pr = self.pdf(normalized=normalized)
# create the r squared 3D matrix
gridsize = self.qgrid_sz
center = gridsize // 2
a = np.arange(gridsize) - center
x = np.tile(a, (gridsize, gridsize, 1))
y = np.tile(a.reshape(gridsize, 1), (gridsize, 1, gridsize))
z = np.tile(a.reshape(gridsize, 1, 1), (1, gridsize, gridsize))
r2 = x ** 2 + y ** 2 + z ** 2
msd = np.sum(Pr * r2) / float((gridsize ** 3))
return msd
def odf(self, sphere):
r""" Calculates the real discrete odf for a given discrete sphere
..math::
:nowrap:
\begin{equation}
\psi_{DSI}(\hat{\mathbf{u}})=\int_{0}^{\infty}P(r\hat{\mathbf{u}})r^{2}dr
\end{equation}
where $\hat{\mathbf{u}}$ is the unit vector which corresponds to a
sphere point.
"""
interp_coords = self.model.cache_get('interp_coords',
key=sphere)
if interp_coords is None:
interp_coords = pdf_interp_coords(sphere,
self.model.qradius,
self.model.origin)
self.model.cache_set('interp_coords', sphere, interp_coords)
Pr = self.pdf()
# calculate the orientation distribution function
return pdf_odf(Pr, self.model.qradius, interp_coords)
def create_qspace(gtab, origin):
""" create the 3D grid which holds the signal values (q-space)
Parameters
----------
gtab : GradientTable
origin : (3,) ndarray
center of the qspace
Returns
-------
qgrid : ndarray
qspace coordinates
"""
# create the q-table from bvecs and bvals
qtable = create_qtable(gtab)
# center and index in qspace volume
qgrid = qtable + origin
return qgrid.astype('i8')
def create_qtable(gtab):
""" create a normalized version of gradients
"""
bv = gtab.bvals
bmin = np.sort(bv)[1]
bv = np.sqrt(bv / bmin)
qtable = np.vstack((bv, bv, bv)).T * gtab.bvecs
return np.floor(qtable + .5)
def hanning_filter(gtab, filter_width):
""" create a hanning window
The signal is premultiplied by a Hanning window before
Fourier transform in order to ensure a smooth attenuation
of the signal at high q values.
Parameters
----------
gtab : GradientTable
filter_width : int
Returns
-------
filter : (N,) ndarray
where N is the number of non-b0 gradient directions
"""
qtable = create_qtable(gtab)
# calculate r - hanning filter free parameter
r = np.sqrt(qtable[:, 0] ** 2 + qtable[:, 1] ** 2 + qtable[:, 2] ** 2)
# setting hanning filter width and hanning
return .5 * np.cos(2 * np.pi * r / filter_width)
def pdf_interp_coords(sphere, rradius, origin):
""" Precompute coordinates for ODF calculation from the PDF
Parameters
----------
sphere : object,
Sphere
rradius : array, shape (N,)
line interpolation points
origin : array, shape (3,)
center of the grid
"""
interp_coords = rradius * sphere.vertices[np.newaxis].T
origin = np.reshape(origin, [-1, 1, 1])
interp_coords = origin + interp_coords
return interp_coords
def pdf_odf(Pr, rradius, interp_coords):
r""" Calculates the real ODF from the diffusion propagator(PDF) Pr
Parameters
----------
Pr : array, shape (X, X, X)
probability density function
rradius : array, shape (N,)
interpolation range on the radius
interp_coords : array, shape (3, M, N)
coordinates in the pdf for interpolating the odf
"""
PrIs = map_coordinates(Pr, interp_coords, order=1)
odf = (PrIs * rradius ** 2).sum(-1)
return odf
def half_to_full_qspace(data, gtab):
""" Half to full Cartesian grid mapping
Useful when dMRI data are provided in one qspace hemisphere as
DiffusionSpectrum expects data to be in full qspace.
Parameters
----------
data : array, shape (X, Y, Z, W)
where (X, Y, Z) volume size and W number of gradient directions
gtab : GradientTable
container for b-values and b-vectors (gradient directions)
Returns
-------
new_data : array, shape (X, Y, Z, 2 * W -1)
new_gtab : GradientTable
Notes
-----
We assume here that only on b0 is provided with the initial data. If that
is not the case then you will need to write your own preparation function
before providing the gradients and the data to the DiffusionSpectrumModel
class.
"""
bvals = gtab.bvals
bvecs = gtab.bvecs
bvals = np.append(bvals, bvals[1:])
bvecs = np.append(bvecs, - bvecs[1:], axis=0)
data = np.append(data, data[..., 1:], axis=-1)
gtab.bvals = bvals.copy()
gtab.bvecs = bvecs.copy()
return data, gtab
def project_hemisph_bvecs(gtab):
""" Project any near identical bvecs to the other hemisphere
Parameters
----------
gtab : object,
GradientTable
Notes
-------
Useful only when working with some types of dsi data.
"""
bvals = gtab.bvals
bvecs = gtab.bvecs
bvs = bvals[1:]
bvcs = bvecs[1:]
b = bvs[:, None] * bvcs
bb = np.zeros((len(bvs), len(bvs)))
pairs = []
for (i, vec) in enumerate(b):
for (j, vec2) in enumerate(b):
bb[i, j] = np.sqrt(np.sum((vec - vec2) ** 2))
I = np.argsort(bb[i])
for j in I:
if j != i:
break
if (j, i) in pairs:
pass
else:
pairs.append((i, j))
bvecs2 = bvecs.copy()
for (i, j) in pairs:
bvecs2[1 + j] = - bvecs2[1 + j]
return bvecs2, pairs
class DiffusionSpectrumDeconvModel(DiffusionSpectrumModel):
def __init__(self, gtab, qgrid_size=35, r_start=4.1, r_end=13.,
r_step=0.4, filter_width=np.inf, normalize_peaks=False):
r""" Diffusion Spectrum Deconvolution
The idea is to remove the convolution on the DSI propagator that is
caused by the truncation of the q-space in the DSI sampling.
..math::
:nowrap:
\begin{eqnarray*}
P_{dsi}(\mathbf{r}) & = & S_{0}^{-1}\iiint\limits_{\| \mathbf{q} \| \le \mathbf{q_{max}}} S(\mathbf{q})\exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{q} \\
& = & S_{0}^{-1}\iiint\limits_{\mathbf{q}} \left( S(\mathbf{q}) \cdot M(\mathbf{q}) \right) \exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{q} \\
& = & P(\mathbf{r}) \otimes \left( S_{0}^{-1}\iiint\limits_{\mathbf{q}} M(\mathbf{q}) \exp(-i2\pi\mathbf{q}\cdot\mathbf{r})d\mathbf{q} \right) \\
\end{eqnarray*}
where $\mathbf{r}$ is the displacement vector and $\mathbf{q}$ is the
wavector which corresponds to different gradient directions,
$M(\mathbf{q})$ is a mask corresponding to your q-space sampling and
$\otimes$ is the convolution operator [1]_.
Parameters
----------
gtab : GradientTable,
Gradient directions and bvalues container class
qgrid_size : int,
has to be an odd number. Sets the size of the q_space grid.
For example if qgrid_size is 35 then the shape of the grid will be
``(35, 35, 35)``.
r_start : float,
ODF is sampled radially in the PDF. This parameters shows where the
sampling should start.
r_end : float,
Radial endpoint of ODF sampling
r_step : float,
Step size of the ODf sampling from r_start to r_end
filter_width : float,
Strength of the hanning filter
References
----------
.. [1] <NAME>. al, "Deconvolution in Diffusion
Spectrum Imaging", Neuroimage, 2010.
.. [2] <NAME>. al, "Acceleration of Iterative Image
Restoration Algorithms", Applied Optics, vol. 36, No. 8, p. 1766-1775,
1997.
"""
DiffusionSpectrumModel.__init__(self, gtab, qgrid_size,
r_start, r_end, r_step,
filter_width,
normalize_peaks)
@multi_voxel_fit
def fit(self, data):
return DiffusionSpectrumDeconvFit(self, data)
class DiffusionSpectrumDeconvFit(DiffusionSpectrumFit):
def pdf(self):
""" Applies the 3D FFT in the q-space grid to generate
the DSI diffusion propagator, remove the background noise with a
hard threshold and then deconvolve the propagator with the
Lucy-Richardson deconvolution algorithm
"""
values = self.data
# create the signal volume
Sq = np.zeros((self.qgrid_sz, self.qgrid_sz, self.qgrid_sz))
# fill q-space
for i in range(len(values)):
qx, qy, qz = self.model.qgrid[i]
Sq[qx, qy, qz] += values[i]
# get deconvolution PSF
DSID_PSF = self.model.cache_get('deconv_psf', key=self.model.gtab)
if DSID_PSF is None:
DSID_PSF = gen_PSF(self.model.qgrid, self.qgrid_sz,
self.qgrid_sz, self.qgrid_sz)
self.model.cache_set('deconv_psf', self.model.gtab, DSID_PSF)
# apply fourier transform
Pr = fftshift(np.abs(np.real(fftn(ifftshift(Sq),
3 * (self.qgrid_sz, )))))
# threshold propagator
Pr = threshold_propagator(Pr)
# apply LR deconvolution
Pr = LR_deconv(Pr, DSID_PSF, 5, 2)
return Pr
def threshold_propagator(P, estimated_snr=15.):
"""
Applies hard threshold on the propagator to remove background noise for the
deconvolution.
"""
P_thresholded = P.copy()
threshold = P_thresholded.max() / float(estimated_snr)
P_thresholded[P_thresholded < threshold] = 0
return P_thresholded / P_thresholded.sum()
def gen_PSF(qgrid_sampling, siz_x, siz_y, siz_z):
"""
Generate a PSF for DSI Deconvolution by taking the ifft of the binary
q-space sampling mask and truncating it to keep only the center.
"""
Sq = np.zeros((siz_x, siz_y, siz_z))
# fill q-space
for i in range(qgrid_sampling.shape[0]):
qx, qy, qz = qgrid_sampling[i]
Sq[qx, qy, qz] = 1
return Sq * np.real(np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(Sq))))
def LR_deconv(prop, psf, numit=5, acc_factor=1):
r"""
Perform Lucy-Richardson deconvolution algorithm on a 3D array.
Parameters
----------
prop : 3-D ndarray of dtype float
The 3D volume to be deconvolve
psf : 3-D ndarray of dtype float
The filter that will be used for the deconvolution.
numit : int
Number of Lucy-Richardson iteration to perform.
acc_factor : float
Exponential acceleration factor as in [1]_.
References
----------
.. [1] <NAME>. al, "Acceleration of Iterative Image
Restoration Algorithms", Applied Optics, vol. 36, No. 8, p. 1766-1775,
1997.
"""
eps = 1e-16
# Create the otf of the same size as prop
otf = np.zeros_like(prop)
# prop.ndim==3
otf[otf.shape[0] // 2 - psf.shape[0] // 2:otf.shape[0] // 2 +
psf.shape[0] // 2 + 1, otf.shape[1] // 2 - psf.shape[1] // 2:
otf.shape[1] // 2 + psf.shape[1] // 2 + 1, otf.shape[2] // 2 -
psf.shape[2] // 2:otf.shape[2] // 2 + psf.shape[2] // 2 + 1] = psf
otf = np.real(np.fft.fftn(np.fft.ifftshift(otf)))
# Enforce Positivity
prop = np.clip(prop, 0, np.inf)
prop_deconv = prop.copy()
for it in range(numit):
# Blur the estimate
reBlurred = np.real(np.fft.ifftn(otf * np.fft.fftn(prop_deconv)))
reBlurred[reBlurred < eps] = eps
# Update the estimate
prop_deconv = prop_deconv * (np.real(np.fft.ifftn(otf *
np.fft.fftn((prop / reBlurred) + eps)))) ** acc_factor
# Enforce positivity
prop_deconv = np.clip(prop_deconv, 0, np.inf)
return prop_deconv / prop_deconv.sum()
if __name__ == '__main__':
pass
```
#### File: dipy/reconst/mapmri.py
```python
import numpy as np
from dipy.reconst.multi_voxel import multi_voxel_fit
from dipy.reconst.base import ReconstModel, ReconstFit
from scipy.special import hermite, gamma
from scipy.misc import factorial, factorial2
import dipy.reconst.dti as dti
from warnings import warn
from dipy.core.gradients import gradient_table
from ..utils.optpkg import optional_package
cvxopt, have_cvxopt, _ = optional_package("cvxopt")
class MapmriModel(ReconstModel):
r"""Mean Apparent Propagator MRI (MAPMRI) [1]_ of the diffusion signal.
The main idea is to model the diffusion signal as a linear combination of
the continuous functions presented in [2]_ but extending it in three
dimensions.
The main difference with the SHORE proposed in [3]_ is that MAPMRI 3D
extension is provided using a set of three basis functions for the radial
part, one for the signal along x, one for y and one for z, while [3]_
uses one basis function to model the radial part and real Spherical
Harmonics to model the angular part.
From the MAPMRI coefficients is possible to use the analytical formulae
to estimate the ODF.
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2] <NAME>. et. al, "Simple harmonic oscillator based reconstruction
and estimation for one-dimensional q-space magnetic resonance
1D-SHORE)", eapoc Intl Soc Mag Reson Med, vol. 16, p. 35., 2008.
.. [3] <NAME>. et. al, "Continuous diffusion signal, EAP and ODF
estimation via Compressive Sensing in diffusion MRI", Medical
Image Analysis, 2013.
"""
def __init__(self,
gtab,
radial_order=4,
lambd=1e-16,
eap_cons=False,
anisotropic_scaling=True,
eigenvalue_threshold=1e-04,
bmax_threshold=2000):
r""" Analytical and continuous modeling of the diffusion signal with
respect to the MAPMRI basis [1]_.
The main idea is to model the diffusion signal as a linear combination of
the continuous functions presented in [2]_ but extending it in three
dimensions.
The main difference with the SHORE proposed in [3]_ is that MAPMRI 3D
extension is provided using a set of three basis functions for the radial
part, one for the signal along x, one for y and one for z, while [3]_
uses one basis function to model the radial part and real Spherical
Harmonics to model the angular part.
From the MAPMRI coefficients is possible to use the analytical formulae
to estimate the ODF.
Parameters
----------
gtab : GradientTable,
gradient directions and bvalues container class
radial_order : unsigned int,
an even integer that represent the order of the basis
lambd : float,
radial regularisation constant
eap_cons : bool,
Constrain the propagator to be positive.
anisotropic_scaling : bool,
If false, force the basis function to be identical in the three
dimensions (SHORE like).
eigenvalue_threshold : float,
set the minimum of the tensor eigenvalues in order to avoid
stability problem
bmax_threshold : float,
set the maximum b-value for the tensor estimation
References
----------
.. [1] Ozarslan E. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
.. [2] Ozarslan E. et. al, "Simple harmonic oscillator based reconstruction
and estimation for one-dimensional q-space magnetic resonance
1D-SHORE)", eapoc Intl Soc Mag Reson Med, vol. 16, p. 35., 2008.
.. [3] Ozarslan E. et. al, "Simple harmonic oscillator based reconstruction
and estimation for three-dimensional q-space mri", ISMRM 2009.
Examples
--------
In this example, where the data, gradient table and sphere tessellation
used for reconstruction are provided, we model the diffusion signal
with respect to the MAPMRI model and compute the analytical ODF.
>>> from dipy.core.gradients import gradient_table
>>> from dipy.data import dsi_voxels, get_sphere
>>> data, gtab = dsi_voxels()
>>> sphere = get_sphere('symmetric724')
>>> from dipy.sims.voxel import SticksAndBall
>>> data, golden_directions = SticksAndBall(gtab, d=0.0015, S0=1, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None)
>>> from dipy.reconst.mapmri import MapmriModel
>>> radial_order = 4
>>> map_model = MapmriModel(gtab, radial_order=radial_order)
>>> mapfit = map_model.fit(data)
>>> odf= mapfit.odf(sphere)
"""
self.bvals = gtab.bvals
self.bvecs = gtab.bvecs
self.gtab = gtab
self.radial_order = radial_order
self.lambd = lambd
self.eap_cons = eap_cons
if self.eap_cons:
if not have_cvxopt:
raise ValueError(
'CVXOPT package needed to enforce constraints')
import cvxopt.solvers
self.anisotropic_scaling = anisotropic_scaling
if (gtab.big_delta is None) or (gtab.small_delta is None):
self.tau = 1 / (4 * np.pi ** 2)
else:
self.tau = gtab.big_delta - gtab.small_delta / 3.0
self.eigenvalue_threshold = eigenvalue_threshold
self.ind = self.gtab.bvals <= bmax_threshold
gtab_dti = gradient_table(
self.gtab.bvals[self.ind], self.gtab.bvecs[self.ind, :])
self.tenmodel = dti.TensorModel(gtab_dti)
self.ind_mat = mapmri_index_matrix(self.radial_order)
self.Bm = b_mat(self.ind_mat)
@multi_voxel_fit
def fit(self, data):
tenfit = self.tenmodel.fit(data[self.ind])
evals = tenfit.evals
R = tenfit.evecs
evals = np.clip(evals, self.eigenvalue_threshold, evals.max())
if self.anisotropic_scaling:
mu = np.sqrt(evals * 2 * self.tau)
else:
mumean = np.sqrt(evals.mean() * 2 * self.tau)
mu = np.array([mumean, mumean, mumean])
qvals = np.sqrt(self.gtab.bvals / self.tau) / (2 * np.pi)
qvecs = np.dot(self.gtab.bvecs, R)
q = qvecs * qvals[:, None]
M = mapmri_phi_matrix(self.radial_order, mu, q.T)
# This is a simple empirical regularization, to be replaced
I = np.diag(self.ind_mat.sum(1) ** 2)
if self.eap_cons:
if not have_cvxopt:
raise ValueError(
'CVXOPT package needed to enforce constraints')
w_s = "The implementation of MAPMRI depends on CVXOPT "
w_s += " (http://cvxopt.org/). This software is licensed "
w_s += "under the GPL (see: http://cvxopt.org/copyright.html) "
w_s += " and you may be subject to this license when using MAPMRI."
warn(w_s)
import cvxopt.solvers
rmax = 2 * np.sqrt(10 * evals.max() * self.tau)
r_index, r_grad = create_rspace(11, rmax)
K = mapmri_psi_matrix(
self.radial_order, mu, r_grad[0:len(r_grad) / 2, :])
Q = cvxopt.matrix(np.dot(M.T, M) + self.lambd * I)
p = cvxopt.matrix(-1 * np.dot(M.T, data))
G = cvxopt.matrix(-1 * K)
h = cvxopt.matrix(np.zeros((K.shape[0])), (K.shape[0], 1))
cvxopt.solvers.options['show_progress'] = False
sol = cvxopt.solvers.qp(Q, p, G, h)
if sol['status'] != 'optimal':
warn('Optimization did not find a solution')
coef = np.array(sol['x'])[:, 0]
else:
pseudoInv = np.dot(
np.linalg.inv(np.dot(M.T, M) + self.lambd * I), M.T)
coef = np.dot(pseudoInv, data)
E0 = 0
for i in range(self.ind_mat.shape[0]):
E0 = E0 + coef[i] * self.Bm[i]
coef = coef / E0
return MapmriFit(self, coef, mu, R, self.ind_mat)
class MapmriFit(ReconstFit):
def __init__(self, model, mapmri_coef, mu, R, ind_mat):
""" Calculates diffusion properties for a single voxel
Parameters
----------
model : object,
AnalyticalModel
mapmri_coef : 1d ndarray,
mapmri coefficients
mu : array, shape (3,)
scale parameters vector for x, y and z
R : array, shape (3,3)
rotation matrix
ind_mat : array, shape (N,3)
indices of the basis for x, y and z
"""
self.model = model
self._mapmri_coef = mapmri_coef
self.gtab = model.gtab
self.radial_order = model.radial_order
self.mu = mu
self.R = R
self.ind_mat = ind_mat
@property
def mapmri_mu(self):
"""The MAPMRI scale factors
"""
return self.mu
@property
def mapmri_R(self):
"""The MAPMRI rotation matrix
"""
return self.R
@property
def mapmri_coeff(self):
"""The MAPMRI coefficients
"""
return self._mapmri_coef
def odf(self, sphere, s=0):
r""" Calculates the analytical Orientation Distribution Function (ODF)
from the signal [1]_ Eq. 32.
Parameters
----------
s : unsigned int
radial moment of the ODF
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
v_ = sphere.vertices
v = np.dot(v_, self.R)
I_s = mapmri_odf_matrix(self.radial_order, self.mu, s, v)
odf = np.dot(I_s, self._mapmri_coef)
return odf
def rtpp(self):
r""" Calculates the analytical return to the plane probability (RTPP)
[1]_.
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
Bm = self.model.Bm
rtpp = 0
const = 1 / (np.sqrt(2 * np.pi) * self.mu[0])
for i in range(self.ind_mat.shape[0]):
if Bm[i] > 0.0:
rtpp += (-1.0) ** (self.ind_mat[i, 0] /
2.0) * self._mapmri_coef[i] * Bm[i]
return const * rtpp
def rtap(self):
r""" Calculates the analytical return to the axis probability (RTAP)
[1]_.
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
Bm = self.model.Bm
rtap = 0
const = 1 / (2 * np.pi * self.mu[1] * self.mu[2])
for i in range(self.ind_mat.shape[0]):
if Bm[i] > 0.0:
rtap += (-1.0) ** (
(self.ind_mat[i, 1] + self.ind_mat[i, 2]) / 2.0) * self._mapmri_coef[i] * Bm[i]
return const * rtap
def rtop(self):
r""" Calculates the analytical return to the origin probability (RTOP)
[1]_.
References
----------
.. [1] <NAME>. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
Bm = self.model.Bm
rtop = 0
const = 1 / \
np.sqrt(
8 * np.pi ** 3 * (self.mu[0] ** 2 * self.mu[1] ** 2 * self.mu[2] ** 2))
for i in range(self.ind_mat.shape[0]):
if Bm[i] > 0.0:
rtop += (-1.0) ** ((self.ind_mat[i, 0] + self.ind_mat[i, 1] + self.ind_mat[
i, 2]) / 2.0) * self._mapmri_coef[i] * Bm[i]
return const * rtop
def predict(self, gtab, S0=1.0):
"""
Predict a signal for this MapmriModel class instance given a gradient
table.
Parameters
----------
gtab : GradientTable,
gradient directions and bvalues container class
S0 : float or ndarray
The non diffusion-weighted signal in every voxel, or across all
voxels. Default: 1
"""
if (gtab.big_delta is None) or (gtab.small_delta is None):
tau = 1 / (4 * np.pi ** 2)
else:
tau = gtab.big_delta - gtab.small_delta / 3.0
qvals = np.sqrt(gtab.bvals / tau) / (2 * np.pi)
qvecs = np.dot(gtab.bvecs, self.R)
q = qvecs * qvals[:, None]
s_mat = mapmri_phi_matrix(self.radial_order, self.mu, q.T)
S_reconst = S0 * np.dot(s_mat, self._mapmri_coef)
return S_reconst
def mapmri_index_matrix(radial_order):
r""" Calculates the indices for the MAPMRI [1]_ basis in x, y and z.
Parameters
----------
radial_order : unsigned int
radial order of MAPMRI basis
Returns
-------
index_matrix : array, shape (N,3)
ordering of the basis in x, y, z
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
index_matrix = []
for n in range(0, radial_order + 1, 2):
for i in range(0, n + 1):
for j in range(0, n - i + 1):
index_matrix.append([n - i - j, j, i])
return np.array(index_matrix)
def b_mat(ind_mat):
r""" Calculates the B coefficients from [1]_ Eq. 27.
Parameters
----------
index_matrix : array, shape (N,3)
ordering of the basis in x, y, z
Returns
-------
B : array, shape (N,)
B coefficients for the basis
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
B = np.zeros(ind_mat.shape[0])
for i in range(ind_mat.shape[0]):
n1, n2, n3 = ind_mat[i]
K = int(not(n1 % 2) and not(n2 % 2) and not(n3 % 2))
B[i] = K * np.sqrt(factorial(n1) * factorial(n2) * factorial(n3)
) / (factorial2(n1) * factorial2(n2) * factorial2(n3))
return B
def mapmri_phi_1d(n, q, mu):
r""" One dimensional MAPMRI basis function from [1]_ Eq. 4.
Parameters
-------
n : unsigned int
order of the basis
q : array, shape (N,)
points in the q-space in which evaluate the basis
mu : float
scale factor of the basis
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
qn = 2 * np.pi * mu * q
H = hermite(n)(qn)
i = np.complex(0, 1)
f = factorial(n)
k = i ** (-n) / np.sqrt(2 ** (n) * f)
phi = k * np.exp(- qn ** 2 / 2) * H
return phi
def mapmri_phi_3d(n, q, mu):
r""" Three dimensional MAPMRI basis function from [1]_ Eq. 23.
Parameters
----------
n : array, shape (3,)
order of the basis function for x, y, z
q : array, shape (N,3)
points in the q-space in which evaluate the basis
mu : array, shape (3,)
scale factors of the basis for x, y, z
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
n1, n2, n3 = n
qx, qy, qz = q
mux, muy, muz = mu
phi = mapmri_phi_1d
return np.real(phi(n1, qx, mux) * phi(n2, qy, muy) * phi(n3, qz, muz))
def mapmri_phi_matrix(radial_order, mu, q_gradients):
r"""Compute the MAPMRI phi matrix for the signal [1]_
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : array, shape (3,)
scale factors of the basis for x, y, z
q_gradients : array, shape (N,3)
points in the q-space in which evaluate the basis
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
ind_mat = mapmri_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_qgrad = q_gradients.shape[1]
M = np.zeros((n_qgrad, n_elem))
for j in range(n_elem):
M[:, j] = mapmri_phi_3d(ind_mat[j], q_gradients, mu)
return M
def mapmri_psi_1d(n, x, mu):
r""" One dimensional MAPMRI propagator basis function from [1]_ Eq. 10.
Parameters
----------
n : unsigned int
order of the basis
x : array, shape (N,)
points in the r-space in which evaluate the basis
mu : float
scale factor of the basis
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
H = hermite(n)(x / mu)
f = factorial(n)
k = 1 / (np.sqrt(2 ** (n + 1) * np.pi * f) * mu)
psi = k * np.exp(- x ** 2 / (2 * mu ** 2)) * H
return psi
def mapmri_psi_3d(n, r, mu):
r""" Three dimensional MAPMRI propagator basis function from [1]_ Eq. 22.
Parameters
----------
n : array, shape (3,)
order of the basis function for x, y, z
q : array, shape (N,3)
points in the q-space in which evaluate the basis
mu : array, shape (3,)
scale factors of the basis for x, y, z
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
n1, n2, n3 = n
x, y, z = r.T
mux, muy, muz = mu
psi = mapmri_psi_1d
return psi(n1, x, mux) * psi(n2, y, muy) * psi(n3, z, muz)
def mapmri_psi_matrix(radial_order, mu, rgrad):
r"""Compute the MAPMRI psi matrix for the propagator [1]_
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : array, shape (3,)
scale factors of the basis for x, y, z
rgrad : array, shape (N,3)
points in the r-space in which evaluate the EAP
References
----------
.. [1] <NAME>. et. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
ind_mat = mapmri_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_rgrad = rgrad.shape[0]
K = np.zeros((n_rgrad, n_elem))
for j in range(n_elem):
K[:, j] = mapmri_psi_3d(ind_mat[j], rgrad, mu)
return K
def mapmri_odf_matrix(radial_order, mu, s, vertices):
r"""Compute the MAPMRI ODF matrix [1]_ Eq. 33.
Parameters
----------
radial_order : unsigned int,
an even integer that represent the order of the basis
mu : array, shape (3,)
scale factors of the basis for x, y, z
s : unsigned int
radial moment of the ODF
vertices : array, shape (N,3)
points of the sphere shell in the r-space in which evaluate the ODF
References
----------
.. [1] <NAME>. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
ind_mat = mapmri_index_matrix(radial_order)
n_vert = vertices.shape[0]
n_elem = ind_mat.shape[0]
odf_mat = np.zeros((n_vert, n_elem))
mux, muy, muz = mu
# Eq, 35a
rho = 1.0 / np.sqrt((vertices[:, 0] / mux) ** 2 +
(vertices[:, 1] / muy) ** 2 + (vertices[:, 2] / muz) ** 2)
# Eq, 35b
alpha = 2 * rho * (vertices[:, 0] / mux)
# Eq, 35c
beta = 2 * rho * (vertices[:, 1] / muy)
# Eq, 35d
gamma = 2 * rho * (vertices[:, 2] / muz)
const = rho ** (3 + s) / np.sqrt(2 ** (2 - s) * np.pi **
3 * (mux ** 2 * muy ** 2 * muz ** 2))
for j in range(n_elem):
n1, n2, n3 = ind_mat[j]
f = np.sqrt(factorial(n1) * factorial(n2) * factorial(n3))
odf_mat[:, j] = const * f * \
_odf_cfunc(n1, n2, n3, alpha, beta, gamma, s)
return odf_mat
def _odf_cfunc(n1, n2, n3, a, b, g, s):
r"""Compute the MAPMRI ODF function from [1]_ Eq. 34.
References
----------
.. [1] <NAME>. al, "Mean apparent propagator (MAP) MRI: A novel
diffusion imaging method for mapping tissue microstructure",
NeuroImage, 2013.
"""
f = factorial
f2 = factorial2
sumc = 0
for i in range(0, n1 + 1, 2):
for j in range(0, n2 + 1, 2):
for k in range(0, n3 + 1, 2):
nn = n1 + n2 + n3 - i - j - k
gam = (-1) ** ((i + j + k) / 2.0) * gamma((3 + s + nn) / 2.0)
num1 = a ** (n1 - i)
num2 = b ** (n2 - j)
num3 = g ** (n3 - k)
num = gam * num1 * num2 * num3
denom = f(n1 - i) * f(n2 - j) * f(
n3 - k) * f2(i) * f2(j) * f2(k)
sumc += num / denom
return sumc
def mapmri_EAP(r_list, radial_order, coeff, mu, R):
r""" Evaluate the MAPMRI propagator in a set of points of the r-space.
Parameters
----------
r_list : array, shape (N,3)
points of the r-space in which evaluate the EAP
radial_order : unsigned int,
an even integer that represent the order of the basis
coeff : array, shape (N,)
the MAPMRI coefficients
mu : array, shape (3,)
scale factors of the basis for x, y, z
R : array, shape (3,3)
MAPMRI rotation matrix
"""
r_list = np.dot(r_list, R)
ind_mat = mapmri_index_matrix(radial_order)
n_elem = ind_mat.shape[0]
n_rgrad = r_list.shape[0]
data_out = np.zeros(n_rgrad)
for j in range(n_elem):
data_out[:] += coeff[j] * mapmri_psi_3d(ind_mat[j], r_list, mu)
return data_out
def create_rspace(gridsize, radius_max):
""" Create the real space table, that contains the points in which
to compute the pdf.
Parameters
----------
gridsize : unsigned int
dimension of the propagator grid
radius_max : float
maximal radius in which compute the propagator
Returns
-------
vecs : array, shape (N,3)
positions of the pdf points in a 3D matrix
tab : array, shape (N,3)
real space points in which calculates the pdf
"""
radius = gridsize // 2
vecs = []
for i in range(-radius, radius + 1):
for j in range(-radius, radius + 1):
for k in range(-radius, radius + 1):
vecs.append([i, j, k])
vecs = np.array(vecs, dtype=np.float32)
tab = vecs / radius
tab = tab * radius_max
vecs = vecs + radius
return vecs, tab
```
#### File: tracking/tests/test_markov.py
```python
from __future__ import division, print_function, absolute_import
import numpy as np
from dipy.tracking import utils
from dipy.reconst.interpolate import NearestNeighborInterpolator
from dipy.tracking.markov import (BoundaryStepper, _closest_peak,
FixedSizeStepper, MarkovIntegrator,
markov_streamline, OutsideImage,
ClosestDirectionTracker,
ProbabilisticOdfWeightedTracker)
from dipy.core.sphere import HemiSphere, unit_octahedron
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_, assert_raises)
def test_BoundaryStepper():
os = 1
bi = BoundaryStepper(overstep=os)
loc = np.array([.5, .5, .5])
step = np.array([1, 1, 1]) / np.sqrt(3)
assert_array_almost_equal(bi(loc, step), os * step + [1, 1, 1])
assert_array_almost_equal(bi(loc, -step), -os * step)
os = 2
bi = BoundaryStepper((2, 3, 4), overstep=2)
assert_array_almost_equal(bi(loc, step), os * step + [2, 2, 2])
assert_array_almost_equal(bi(loc, -step), -os * step)
loc = np.array([7.5, 7.5, 7.5])
assert_array_almost_equal(bi(loc, step), os * step + [8, 8, 8])
assert_array_almost_equal(bi(loc, -step), [6, 6, 6] - os * step)
def test_FixedSizeStepper():
fsi = FixedSizeStepper(step_size=2.)
loc = np.array([2, 3, 12])
step = np.array([3, 2, 4]) / np.sqrt(3)
assert_array_almost_equal(fsi(loc, step), loc + 2. * step)
assert_array_almost_equal(fsi(loc, -step), loc - 2. * step)
def test_markov_streamline():
east = np.array([1, 0, 0])
class MoveEastWest(object):
def get_direction(self, location, prev_step):
if np.any(location < 0):
raise OutsideImage
elif np.any(location > 10.):
return None
if np.dot(prev_step, east) >= 0:
return east
else:
return -east
seed = np.array([5.2, 0, 0])
first_step = east
dir_getter = MoveEastWest()
stepper = FixedSizeStepper(.5)
# The streamline terminates when it goes past (10, 0, 0). (10.2, 0, 0)
# should be the last point in the streamline
streamline = markov_streamline(dir_getter.get_direction, stepper,
seed, first_step, 100)
expected = np.zeros((11, 3))
expected[:, 0] = np.linspace(5.2, 10.2, 11)
assert_array_almost_equal(streamline, expected)
# OutsideImage gets raised when the streamline points become negative
# the streamline should end, and the negative points should not be part
# of the streamline
first_step = -east
streamline = markov_streamline(dir_getter.get_direction, stepper,
seed, first_step, 100)
expected = np.zeros((11, 3))
expected[:, 0] = np.linspace(5.2, 0.2, 11)
assert_array_almost_equal(streamline, expected)
def test_MarkovIntegrator():
class KeepGoing(MarkovIntegrator):
def _next_step(self, location, prev_step):
if prev_step is None:
return np.array([[1., 0, 0],
[0, 1., 0],
[0, 0., 1]])
if not self._mask[location]:
return None
else:
return prev_step
data = np.ones((10, 10, 10, 65))
data_interp = NearestNeighborInterpolator(data, (1, 1, 1))
seeds = [np.array([5.2, 5.2, 5.2])]
stepper = FixedSizeStepper(.5)
mask = np.ones((10, 10, 10), 'bool')
gen = KeepGoing(model=None, interpolator=data_interp, mask=mask,
take_step=stepper, angle_limit=0., seeds=seeds)
streamlines = list(gen)
assert_equal(len(streamlines), 3)
expected = np.zeros((20, 3))
for i in range(3):
expected[:] = 5.2
expected[:, i] = np.arange(.2, 10, .5)
assert_array_almost_equal(streamlines[i], expected)
# Track only the first (largest) peak for each seed
gen = KeepGoing(model=None, interpolator=data_interp, mask=mask,
take_step=stepper, angle_limit=0., seeds=seeds,
max_cross=1)
streamlines = list(gen)
assert_equal(len(streamlines), 1)
expected = np.zeros((20, 3))
expected[:] = 5.2
expected[:, 0] = np.arange(.2, 10, .5)
assert_array_almost_equal(streamlines[0], expected)
mask = np.ones((20, 20, 20), 'bool')
gen = KeepGoing(model=None, interpolator=data_interp, mask=mask,
take_step=stepper, angle_limit=0., seeds=seeds,
max_cross=1, mask_voxel_size=(.5, .5, .5))
streamlines = list(gen)
assert_equal(len(streamlines), 1)
assert_array_almost_equal(streamlines[0], expected)
# Test tracking with affine
affine = np.eye(4)
affine[:3, :] = np.random.random((3, 4)) - .5
seeds = [np.dot(affine[:3, :3], seeds[0] - .5) + affine[:3, 3]]
sl_affine = KeepGoing(model=None, interpolator=data_interp, mask=mask,
take_step=stepper, angle_limit=0., seeds=seeds,
max_cross=1, mask_voxel_size=(.5, .5, .5), affine=affine)
default = np.eye(4)
default[:3, 3] = .5
sl_default = list(utils.move_streamlines(sl_affine, default, affine))
assert_equal(len(sl_default), 1)
assert_array_almost_equal(sl_default[0], expected)
def test_closest_peak():
peak_values = np.array([1, .9, .8, .7, .6, .2, .1])
peak_points = np.array([[1., 0., 0.],
[0., .9, .1],
[0., 1., 0.],
[.9, .1, 0.],
[0., 0., 1.],
[1., 1., 0.],
[0., 1., 1.]])
norms = np.sqrt((peak_points * peak_points).sum(-1))
peak_points = peak_points / norms[:, None]
prev = np.array([1, -.9, 0])
prev = prev / np.sqrt(np.dot(prev, prev))
cp = _closest_peak(peak_points, prev, 0.)
assert_array_equal(cp, peak_points[0])
cp = _closest_peak(peak_points, -prev, 0.)
assert_array_equal(cp, -peak_points[0])
def test_ClosestDirectionTracker():
class MyModel(object):
def fit(self, data):
return MyFit()
class MyFit(object):
pass
class MyDirectionFinder(object):
directions = np.array([[1., 0, 0],
[0, 1., 0],
[0, 0., 1]])
def __call__(self, fit):
return self.directions
data = np.ones((10, 10, 10, 65))
data_interp = NearestNeighborInterpolator(data, (1, 1, 1))
mask = np.ones((10, 10, 10), 'bool')
mask[0, 0, 0] = False
cdt = ClosestDirectionTracker(model=MyModel(), interpolator=data_interp,
mask=mask, take_step=None,
angle_limit=90., seeds=None)
# We're going to use a silly set of directions for the test
cdt._get_directions = MyDirectionFinder()
prev_step = np.array([[.9, .1, .1],
[.1, .9, .1],
[.1, .1, .9]])
prev_step /= np.sqrt((prev_step * prev_step).sum(-1))[:, None]
a, b, c = prev_step
assert_array_equal(cdt._next_step([1., 1., 1.], a), [1, 0, 0])
assert_array_equal(cdt._next_step([1., 1., 1.], b), [0, 1, 0])
assert_array_equal(cdt._next_step([1., 1., 1.], c), [0, 0, 1])
# Assert raises outside image
assert_raises(OutsideImage, cdt._next_step, [-1., 1., 1.], c)
# Returns None when mask is False
assert_equal(cdt._next_step([0, 0, 0], c), None)
# Test Angle limit
cdt = ClosestDirectionTracker(model=MyModel(), interpolator=data_interp,
mask=mask, take_step=None,
angle_limit=45, seeds=None)
# We're going to use a silly set of directions for the test
cdt._get_directions = MyDirectionFinder()
sq3 = np.sqrt(3)
a = np.array([sq3 / 2, 1. / 2, 0])
b = np.array([1. / 2, sq3 / 2, 0])
c = np.array([1, 1, 1]) / sq3
assert_array_equal(cdt._next_step([1., 1., 1.], a), [1, 0, 0])
assert_array_equal(cdt._next_step([1., 1., 1.], b), [0, 1, 0])
assert_array_equal(cdt._next_step([1., 1., 1.], c), None)
def test_ProbabilisticOdfWeightedTracker():
sphere = HemiSphere.from_sphere(unit_octahedron)
# A simple image with three possible configurations, a vertical tract,
# a horizontal tract and a crossing
odf_list = [np.array([0., 0., 0.]),
np.array([1., 0., 0.]),
np.array([0., 1., 0.]),
np.array([1., 1., 0.]),
]
simple_image = np.array([[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 3, 2, 2, 2, 0],
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
])
# Make the image 4d
simple_image = simple_image[..., None, None]
# Simple model and fit for this image
class MyModel():
def fit(self, data):
return MyFit(data)
class MyFit(object):
def __init__(self, n):
self.n = n
def odf(self, sphere):
return odf_list[self.n]
seeds = [np.array([1.5, 1.5, .5])] * 30
model = MyModel()
mask = np.ones([5, 6, 1], dtype="bool")
stepper = FixedSizeStepper(1.)
interpolator = NearestNeighborInterpolator(simple_image, (1, 1, 1))
# These are the only two possible paths though the simple_image
pwt = ProbabilisticOdfWeightedTracker(model, interpolator, mask,
stepper, 90, seeds, sphere)
expected = [np.array([[0.5, 1.5, 0.5],
[1.5, 1.5, 0.5],
[2.5, 1.5, 0.5],
[2.5, 2.5, 0.5],
[2.5, 3.5, 0.5],
[2.5, 4.5, 0.5],
[2.5, 5.5, 0.5]]),
np.array([[0.5, 1.5, 0.5],
[1.5, 1.5, 0.5],
[2.5, 1.5, 0.5],
[3.5, 1.5, 0.5],
[4.5, 1.5, 0.5]])
]
def allclose(x, y):
return x.shape == y.shape and np.allclose(x, y)
path = [False, False]
for streamline in pwt:
if allclose(streamline, expected[0]):
path[0] = True
elif allclose(streamline, expected[1]):
path[1] = True
else:
raise AssertionError()
assert_(all(path))
# The first path is not possible if 90 degree turns are excluded
pwt = ProbabilisticOdfWeightedTracker(model, interpolator, mask,
stepper, 80, seeds, sphere)
for streamline in pwt:
assert_(np.allclose(streamline, expected[1]))
```
#### File: dipy/utils/arrfuncs.py
```python
import sys
from distutils.version import LooseVersion
import numpy as np
from nibabel.volumeutils import endian_codes, native_code, swapped_code
NUMPY_LESS_1_8 = LooseVersion(np.version.short_version) < '1.8'
def as_native_array(arr):
""" Return `arr` as native byteordered array
If arr is already native byte ordered, return unchanged. If it is opposite
endian, then make a native byte ordered copy and return that
Parameters
----------
arr : ndarray
Returns
-------
native_arr : ndarray
If `arr` was native order, this is just `arr`. Otherwise it's a new
array such that ``np.all(native_arr == arr)``, with native byte
ordering.
"""
if endian_codes[arr.dtype.byteorder] == native_code:
return arr
return arr.byteswap().newbyteorder()
def pinv(a, rcond=1e-15):
"""Vectorized version of `numpy.linalg.pinv`
If numpy version is less than 1.8, it falls back to iterating over
`np.linalg.pinv` since there isn't a vectorized version of `np.linalg.svd`
available.
Parameters
----------
a : array_like (..., M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Returns
-------
B : ndarray (..., N, M)
The pseudo-inverse of `a`.
Raises
------
LinAlgError
If the SVD computation does not converge.
See Also
--------
np.linalg.pinv
"""
a = np.asarray(a)
if NUMPY_LESS_1_8:
if a.ndim <= 2:
# properly handle the case of a single 2D array
return np.linalg.pinv(a, rcond)
shape = a.shape[:-2]
a = a.reshape(-1, a.shape[-2], a.shape[-1])
result = np.empty((a.shape[0], a.shape[2], a.shape[1]))
for i, item in enumerate(a):
result[i] = np.linalg.pinv(item, rcond)
return result.reshape(shape + (a.shape[2], a.shape[1]))
else:
swap = np.arange(a.ndim)
swap[[-2, -1]] = swap[[-1, -2]]
u, s, v = np.linalg.svd(a, full_matrices=False)
cutoff = np.maximum.reduce(s, axis=-1, keepdims=True) * rcond
mask = s > cutoff
s[mask] = 1. / s[mask]
s[~mask] = 0
return np.einsum('...ij,...jk',
np.transpose(v, swap) * s[..., None, :],
np.transpose(u, swap))
def eigh(a, UPLO='L'):
"""Iterate over `np.linalg.eigh` if it doesn't support vectorized operation
Parameters
----------
a : array_like (..., M, M)
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray (..., M)
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : ndarray (..., M, M)
The column ``v[..., :, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[..., i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
np.linalg.eigh
"""
a = np.asarray(a)
if a.ndim > 2 and NUMPY_LESS_1_8:
shape = a.shape[:-2]
a = a.reshape(-1, a.shape[-2], a.shape[-1])
evals = np.empty((a.shape[0], a.shape[1]))
evecs = np.empty((a.shape[0], a.shape[1], a.shape[1]))
for i, item in enumerate(a):
evals[i], evecs[i] = np.linalg.eigh(item, UPLO)
return (evals.reshape(shape + (a.shape[1], )),
evecs.reshape(shape + (a.shape[1], a.shape[1])))
return np.linalg.eigh(a, UPLO)
```
#### File: viz/tests/test_regtools.py
```python
import numpy as np
from dipy.viz import regtools
import numpy.testing as npt
from dipy.align.metrics import SSDMetric
from dipy.align.imwarp import SymmetricDiffeomorphicRegistration
# Conditional import machinery for matplotlib
from dipy.utils.optpkg import optional_package
_, have_matplotlib, _ = optional_package('matplotlib')
@npt.dec.skipif(not have_matplotlib)
def test_plot_2d_diffeomorphic_map():
# Test the regtools plotting interface (lightly).
mv_shape = (11, 12)
moving = np.random.rand(*mv_shape)
st_shape = (13, 14)
static = np.random.rand(*st_shape)
dim = static.ndim
metric = SSDMetric(dim)
level_iters = [200, 100, 50, 25]
sdr = SymmetricDiffeomorphicRegistration(metric,
level_iters,
inv_iter=50)
mapping = sdr.optimize(static, moving)
# Smoke testing of plots
ff = regtools.plot_2d_diffeomorphic_map(mapping, 10)
# Defualt shape is static shape, moving shape
npt.assert_equal(ff[0].shape, st_shape)
npt.assert_equal(ff[1].shape, mv_shape)
# Can specify shape
ff = regtools.plot_2d_diffeomorphic_map(mapping,
delta = 10,
direct_grid_shape=(7, 8),
inverse_grid_shape=(9, 10))
npt.assert_equal(ff[0].shape, (7, 8))
npt.assert_equal(ff[1].shape, (9, 10))
```
#### File: scratch/very_scratch/ellipse.py
```python
import sympy
import numpy as np
import scipy as sc
from numpy.random import random_sample as random
def random_uniform_in_disc():
# returns a tuple which is uniform in the disc
theta = 2*np.pi*random()
r2 = random()
r = np.sqrt(r2)
return np.array((r*np.sin(theta),r*np.cos(theta)))
def random_uniform_in_ellipse(a=1,b=1):
x = a*random_uniform_in_disc()[0]
y = b*np.sqrt(1-(x/a)**2)*(1-2*random())
return np.array((x,y))
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
sample = np.array([random_uniform_in_ellipse(a=2,b=1) for i in np.arange(10000)])
ax.scatter(*sample.T)
plt.show()
```
#### File: dipy/tools/pack_examples.py
```python
import os
from os.path import join as pjoin
import sys
import shutil
import tarfile
import dipy
__doc__ = __doc__ % sys.argv[0]
EG_BUILT_SDIR = 'examples_built'
dpv = 'dipy-' + dipy.__version__
archive_name = dpv + '-doc-examples.tar.gz'
try:
out_root = sys.argv[1]
except IndexError:
print __doc__
sys.exit(1)
try:
os.mkdir(out_root)
except OSError:
pass
try:
doc_dir = sys.argv[2]
except IndexError:
doc_dir = os.getcwd()
archive_fname = os.path.join(out_root, archive_name)
eg_built_dir = pjoin(doc_dir, EG_BUILT_SDIR)
eg_out_base = pjoin(out_root, dpv, 'doc')
eg_out_dir = pjoin(eg_out_base, EG_BUILT_SDIR)
if os.path.isdir(eg_out_dir):
shutil.rmtree(eg_out_dir)
def ignorandi(src, names):
return [name for name in names if name == 'README' or name == '.gitignore']
shutil.copytree(eg_built_dir, eg_out_dir, ignore=ignorandi)
os.chdir(out_root)
tar = tarfile.open(archive_fname, 'w|gz')
tar.add(dpv)
tar.close()
shutil.rmtree(pjoin(out_root, dpv))
print("Written " + archive_fname)
``` |
{
"source": "JohnGriffiths/mne-hcp",
"score": 2
} |
#### File: file_mapping/tests/test_file_mapping.py
```python
from hcp.io.file_mapping import get_file_paths
from hcp.io.file_mapping.file_mapping import run_map
import hcp.tests.config as tconf
from nose.tools import assert_raises, assert_equal
def test_basic_file_mapping():
"""Test construction of file paths and names"""
assert_raises(ValueError, get_file_paths,
subject=tconf.subject, data_type='sushi',
output='raw', run_index=0, hcp_path=tconf.hcp_path)
assert_raises(ValueError, get_file_paths,
subject=tconf.subject, data_type='rest',
output='kimchi', run_index=0,
hcp_path=tconf.hcp_path)
for run_index in range(3):
for output in tconf.hcp_outputs:
for data_type in tconf.hcp_data_types:
# check too many runs
if run_index >= len(run_map[data_type]):
assert_raises(
ValueError, get_file_paths,
subject=tconf.subject, data_type=data_type,
output=output, run_index=run_index,
hcp_path=tconf.hcp_path)
# check no event related outputs
elif (data_type in ('rest', 'noise_subject',
'noise_empty_room') and
output in ('trial_info', 'evoked')):
assert_raises(
ValueError, get_file_paths,
subject=tconf.subject, data_type=data_type,
output=output, run_index=run_index,
hcp_path=tconf.hcp_path)
# check no preprocessing
elif (data_type in ('noise_subject',
'noise_empty_room') and output in
('epochs', 'evoked', 'ica', 'annot')):
assert_raises(
ValueError, get_file_paths,
subject=tconf.subject, data_type=data_type,
output=output, run_index=run_index,
hcp_path=tconf.hcp_path)
else:
file_names = get_file_paths(
subject=tconf.subject, data_type=data_type,
output=output, run_index=run_index,
hcp_path=tconf.hcp_path)
if output == 'raw':
assert_equal(
sum('config' in fn for fn in file_names), 1)
assert_equal(
sum('c,rfDC' in fn for fn in file_names), 1)
```
#### File: mne-hcp/hcp/preprocessing.py
```python
import numpy as np
import mne
from mne.io import set_bipolar_reference
from mne.io.bti.bti import (
_convert_coil_trans, _coil_trans_to_loc, _get_bti_dev_t,
_loc_to_coil_trans)
from mne.transforms import Transform
from mne.utils import logger
from .io import read_info
from .io.read import _hcp_pick_info
from .io.read import _data_labels
def set_eog_ecg_channels(raw):
"""Set the HCP ECG and EOG channels
.. note::
Operates in place.
Parameters
----------
raw : instance of Raw
the hcp raw data.
"""
for kind in ['ECG', 'VEOG', 'HEOG']:
set_bipolar_reference(
raw, anode=kind + '-', cathode=kind + '+', ch_name=kind,
copy=False)
raw.set_channel_types({'ECG': 'ecg', 'VEOG': 'eog', 'HEOG': 'eog'})
def apply_ica_hcp(raw, ica_mat, exclude):
"""Apply the HCP ICA.
.. note::
Operates in place and data must be loaded.
Parameters
----------
raw : instance of Raw
the hcp raw data.
ica_mat : numpy structured array
The hcp ICA solution
exclude : array-like
the components to be excluded.
"""
if not raw.preload:
raise RuntimeError('raw data must be loaded, use raw.load_data()')
ch_names = ica_mat['topolabel'].tolist().tolist()
picks = mne.pick_channels(raw.info['ch_names'], include=ch_names)
assert ch_names == [raw.ch_names[p] for p in picks]
unmixing_matrix = np.array(ica_mat['unmixing'].tolist())
n_components, n_channels = unmixing_matrix.shape
mixing = np.array(ica_mat['topo'].tolist())
proj_mat = (np.eye(n_channels) - np.dot(
mixing[:, exclude], unmixing_matrix[exclude]))
raw._data *= 1e15
raw._data[picks] = np.dot(proj_mat, raw._data[picks])
raw._data /= 1e15
def apply_ref_correction(raw, decim_fit=100):
"""Regress out MEG ref channels
Computes linear models from MEG reference channels
on each sensors, predicts the MEG data and subtracts
and computes the residual by subtracting the predictions.
.. note::
Operates in place.
.. note::
Can be memory demanding. To alleviate this problem the model can be fit
on decimated data. This is legitimate because the linear model does
not have any representation of time, only the distributions
matter.
Parameters
----------
raw : instance of Raw
The BTi/4D raw data.
decim_fit : int
The decimation factor used for fitting the model.
Defaults to 100.
"""
from sklearn.linear_model import LinearRegression
meg_picks = mne.pick_types(raw.info, ref_meg=False, meg=True)
ref_picks = mne.pick_types(raw.info, ref_meg=True, meg=False)
if len(ref_picks) == 0:
raise ValueError('Could not find meg ref channels.')
estimator = LinearRegression(normalize=True) # ref MAG + GRAD
Y_pred = estimator.fit(
raw[ref_picks][0][:, ::decim_fit].T,
raw[meg_picks][0][:, ::decim_fit].T).predict(
raw[ref_picks][0].T)
raw._data[meg_picks] -= Y_pred.T
def map_ch_coords_to_mne(inst):
"""Transform sensors to MNE coordinates
.. note::
operates in place
.. warning::
For several reasons we do not use the MNE coordinates for the inverse
modeling. This however won't always play nicely with visualization.
Parameters
----------
inst : MNE data containers
Raw, Epochs, Evoked.
"""
bti_dev_t = Transform('ctf_meg', 'meg', _get_bti_dev_t())
dev_ctf_t = inst.info['dev_ctf_t']
for ch in inst.info['chs']:
loc = ch['loc'][:]
if loc is not None:
logger.debug('converting %s' % ch['ch_name'])
t = _loc_to_coil_trans(loc)
t = _convert_coil_trans(t, dev_ctf_t, bti_dev_t)
loc = _coil_trans_to_loc(t)
ch['loc'] = loc
def interpolate_missing(inst, subject, data_type, hcp_path,
run_index=0, mode='fast'):
"""Interpolate all MEG channels that are missing
.. warning::
This function may require some memory.
Parameters
----------
inst : MNE data containers
Raw, Epochs, Evoked.
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
'noise_empty_room'
'noise_subject'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
mode : str
Either `'accurate'` or `'fast'`, determines the quality of the
Legendre polynomial expansion used for interpolation of MEG
channels.
Returns
-------
out : MNE data containers
Raw, Epochs, Evoked but with missing channels interpolated.
"""
try:
info = read_info(
subject=subject, data_type=data_type, hcp_path=hcp_path,
run_index=run_index if run_index is None else run_index)
except (ValueError, IOError):
raise ValueError(
'could not find config to complete info.'
'reading only channel positions without '
'transforms.')
# full BTI MEG channels
bti_meg_channel_names = ['A%i' % ii for ii in range(1, 249, 1)]
# figure out which channels are missing
bti_meg_channel_missing_names = [
ch for ch in bti_meg_channel_names if ch not in inst.ch_names]
# get meg picks
picks_meg = mne.pick_types(inst.info, meg=True, ref_meg=False)
# some non-contiguous block in the middle so let's try to invert
picks_other = [ii for ii in range(len(inst.ch_names)) if ii not in
picks_meg]
other_chans = [inst.ch_names[po] for po in picks_other]
# compute new n channels
n_channels = (len(picks_meg) +
len(bti_meg_channel_missing_names) +
len(other_chans))
# restrict info to final channels
# ! info read from config file is not sorted like inst.info
# ! therefore picking order matters, but we don't know it.
# ! so far we will rely on the consistent layout for raw files
final_names = [ch for ch in _data_labels if ch in bti_meg_channel_names or
ch in other_chans]
info = _hcp_pick_info(info, final_names)
assert len(info['ch_names']) == n_channels
existing_channels_index = [ii for ii, ch in enumerate(info['ch_names']) if
ch in inst.ch_names]
info['sfreq'] = inst.info['sfreq']
# compute shape of data to be added
is_raw = isinstance(inst, (mne.io.Raw,
mne.io.RawArray,
mne.io.bti.bti.RawBTi))
is_epochs = isinstance(inst, mne.BaseEpochs)
is_evoked = isinstance(inst, (mne.Evoked, mne.EvokedArray))
if is_raw:
shape = (n_channels,
(inst.last_samp - inst.first_samp) + 1)
data = inst._data
elif is_epochs:
shape = (n_channels, len(inst.events), len(inst.times))
data = np.transpose(inst.get_data(), (1, 0, 2))
elif is_evoked:
shape = (n_channels, len(inst.times))
data = inst.data
else:
raise ValueError('instance must be Raw, Epochs '
'or Evoked')
out_data = np.empty(shape, dtype=data.dtype)
out_data[existing_channels_index] = data
if is_raw:
out = mne.io.RawArray(out_data, info)
if inst.annotations is not None:
out.annotations = inst.annotations
elif is_epochs:
out = mne.EpochsArray(data=np.transpose(out_data, (1, 0, 2)),
info=info, events=inst.events,
tmin=inst.times.min(), event_id=inst.event_id)
elif is_evoked:
out = mne.EvokedArray(
data=out_data, info=info, tmin=inst.times.min(),
comment=inst.comment, nave=inst.nave, kind=inst.kind)
else:
raise ValueError('instance must be Raw, Epochs '
'or Evoked')
# set "bad" channels and interpolate.
out.info['bads'] = bti_meg_channel_missing_names
out.interpolate_bads(mode=mode)
return out
```
#### File: mne-hcp/tutorials/plot_reference_correction.py
```python
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import welch
import mne
import hcp
from hcp.preprocessing import apply_ref_correction
###############################################################################
# We first set parameters
storage_dir = op.join(op.expanduser('~'), 'mne-hcp-data')
hcp_path = op.join(storage_dir, 'HCP')
subject = '105923'
data_type = 'rest'
run_index = 0
###############################################################################
# Then we define a spectral plotter for convenience
def plot_psd(X, label, Fs, NFFT, color=None):
freqs, psd = welch(X, fs=Fs, window='hanning', nperseg=NFFT,
noverlap=int(NFFT * 0.8))
freqs = freqs[freqs > 0]
psd = psd[freqs > 0]
plt.plot(np.log10(freqs), 10 * np.log10(psd.ravel()), label=label,
color=color)
###############################################################################
# Now we read in the data
#
# Then we plot the power spectrum of the MEG and reference channels,
# apply the reference correction and add the resulting cleaned MEG channels
# to our comparison.
raw = hcp.read_raw(subject=subject, hcp_path=hcp_path,
run_index=run_index, data_type=data_type)
raw.load_data()
# get meg and ref channels
meg_picks = mne.pick_types(raw.info, meg=True, ref_meg=False)
ref_picks = mne.pick_types(raw.info, ref_meg=True, meg=False)
# put single channel aside for comparison later
chan1 = raw[meg_picks[0]][0]
# add some plotting parameter
decim_fit = 100 # we lean a purely spatial model, we don't need all samples
decim_show = 10 # we can make plotting faster
n_fft = 2 ** 15 # let's use long windows to see low frequencies
# we put aside the time series for later plotting
x_meg = raw[meg_picks][0][:, ::decim_show].mean(0)
x_meg_ref = raw[ref_picks][0][:, ::decim_show].mean(0)
###############################################################################
# Now we apply the ref correction (in place).
apply_ref_correction(raw)
###############################################################################
# That was the easiest part! Let's now plot everything.
plt.figure(figsize=(9, 6))
plot_psd(x_meg, Fs=raw.info['sfreq'], NFFT=n_fft, label='MEG', color='black')
plot_psd(x_meg_ref, Fs=raw.info['sfreq'], NFFT=n_fft, label='MEG-REF',
color='red')
plot_psd(raw[meg_picks][0][:, ::decim_show].mean(0), Fs=raw.info['sfreq'],
NFFT=n_fft, label='MEG-corrected', color='orange')
plt.legend()
plt.xticks(np.log10([0.1, 1, 10, 100]), [0.1, 1, 10, 100])
plt.xlim(np.log10([0.1, 300]))
plt.xlabel('log10(frequency) [Hz]')
plt.ylabel('Power Spectral Density [dB]')
plt.grid()
plt.show()
###############################################################################
# We can see that the ref correction removes low frequencies which is expected
###############################################################################
# By comparing single channel time series we can also see the detrending effect
chan1c = raw[meg_picks[0]][0]
ch_name = raw.ch_names[meg_picks[0]]
plt.figure()
plt.plot(raw.times, chan1.ravel() * 1e15, label='%s before' % ch_name,
color='black')
plt.plot(raw.times, chan1c.ravel() * 1e15, label='%s after' % ch_name,
color='orange')
plt.xlim(raw.times[[0, -1]])
plt.legend(loc='upper left')
plt.ylabel('Magnetometer [fT]')
plt.xlabel('Time [seconds]')
plt.grid()
plt.show()
``` |
{
"source": "John-G-Thomas/blockdata",
"score": 4
} |
#### File: blockdata/lambdadata_John_T/Operations.py
```python
def add(x, y):
return x + y
# This function subtracts two numbers
def subtract(x, y):
return x - y
# This function multiplies two numbers
def multiply(x, y):
return x * y
# This function divides two numbers
def divide(x, y):
return x / y
class calculator(float):
def __init__(self, num1=None, num2=None):
super().__init__()
assert isinstance(num1, float)
self.num2 = num2
assert isinstance(num2, float)
self.num2 = num2
print("Select operation.")
print("1.Add")
print("2.Subtract")
print("3.Multiply")
print("4.Divide")
while True:
# Take input from the user
choice = input("Enter choice(1/2/3/4): ")
# Check if choice is one of the four options
if choice in ('1', '2', '3', '4'):
num1 = float(input("Enter first number: "))
num2 = float(input("Enter second number: "))
if choice == '1':
print(num1, "+", num2, "=", add(num1, num2))
elif choice == '2':
print(num1, "-", num2, "=", subtract(num1, num2))
elif choice == '3':
print(num1, "*", num2, "=", multiply(num1, num2))
elif choice == '4':
print(num1, "/", num2, "=", divide(num1, num2))
break
else:
print("Invalid Input")
``` |
{
"source": "John-G-Thomas/Daily-Warm-Ups",
"score": 3
} |
#### File: sprint-2/sql_and_databases/basic.py
```python
from pathlib import Path
import sqlite3
from typing import List
DB_FILEPATH = "Daily-Warm-Ups/unit-3/sprint-2/pyproject.toml"
# Defining this function outside the scope of the class ensures that
# we can use it in the initializer, but if we import the helper class
# into another file it won't crowd the namespace.
def _valid_db_path(pattern: str):
"""Raise an exception in the event of invalid database."""
class DbHelper(object):
"""Helper class for interacting with SQL databases."""
def __init__(self, path_to_database: str, context: str) -> None:
f"""
:param path_to_database: Database path.
:param context: The type of database to use one of ``_valid_contexts``
"""
def _connect(self):
"""Establish a connection to the database."""
def execute(self, statement: str):
"""Execute a single query."""
def query(self, query: str):
"""Return data from the database."""
def commit(self):
"""Commit changes to the database."""
def close(self):
"""Close the connection."""
# # Stretch: Experiment with context management.
# def __enter__(self):
# """Entering the context statement."""
#
# def __exit__(self, ext_type, exc_value, traceback):
``` |
{
"source": "John-G-Thomas/DS-Unit-3-Sprint-2-SQL-and-Databases",
"score": 4
} |
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/module1-introduction-to-sql/rpg_db_notes.py
```python
import sqlite3
"""
Create a connextion
"""
conn = sqlite3.connect('rpg_db.sqlite3')
"""
Select statement
"""
curs = conn.cursor()
query = 'SELECT * FROM armory_item;'
curs.execute(query)
results = curs.fetchall()
"""
rpg_exmple.py
"""
import sqlite3
def connect_to_db(db_name='rpg_db.sqlite3'):
return sqlite3.connect(db_name)
def execute_query(cursor, query):
cursor.execute(query)
return cursor.fetchall()
GET_CHARACTERS = """
SELECT *
FROM charactercreator_character;
"""
if __name__ == '__main__':
conn = connect_to_db()
curs = conn.cursor()
results = execute_query(curs, GET_CHARACTERS)
print(results)
"""
Query log:
"""
SELECT * FROM charactercreator_character;
SELECT COUNT(*) FROM charactercreator_character;
SELECT COUNT(DISTINCT name) FROM charactercreator_character;
"""
Selecting columns:
"""
SELECT character_id, name FROM charactercreator_character;
"""
Limiting rows:
"""
SELECT character_id, name FROM charactercreator_character LIMIT 10;
"""
Filtering rows with confitions:
"""
SELECT character_id, name
FROM charactercreator_character
WHERE character_id > 50;
SELECT character_id, name
FROM charactercreator_character
WHERE character_id > 50
AND character_id < 55;
"""
Equivalent:
"""
SELECT character_id, name
FROM charactercreator_character
WHERE character_id BETWEEN 51 AND 54;
General theme - often more than one way to do it!
"""
Let's figure out what the duplicate character names are
"""
SELECT name, COUNT(*)
FROM charactercreator_character
GROUP BY name;
"""
Our first business query!
"""
"""
Let's figure out what the duplicate character names are
SELECT - how we choose which columns to get
WHERE - how we set conditions on the rows to be returned
LIMIT - when we only want a certain number of rows
ORDER - when we want to sort the output
JOIN - when we need data from multiple tables combined
"""
"""
A first join
"""
SELECT * FROM charactercreator_character
INNER JOIN charactercreator_fighter
ON character_id = character_ptr_id
WHERE character_id = 1;
"""
Non-inner joins introduce missing values!
"""
SELECT character_id, name, rage FROM charactercreator_character
LEFT JOIN charactercreator_fighter
ON character_id = character_ptr_id;
"""
Explicit inner join:
"""
SELECT character_id, name, rage FROM charactercreator_character
INNER JOIN charactercreator_fighter
ON character_id = character_ptr_id
WHERE character_id BETWEEN 40 and 50;
"""
Equivalent implicit join:
"""
SELECT character_id, name, rage
FROM charactercreator_character, charactercreator_fighter
WHERE character_id = character_ptr_id
AND character_id BETWEEN 40 and 50;
"""
Queries result in tables that can be queried! (Silly example but can be
useful)
"""
SELECT * FROM
(SELECT * FROM charactercreator_character);
"""
Sometimes you need to join >2 tables...
This is where I particularly like implicit joins
"""
SELECT cc.character_id, cc.name, ai.item_id, ai.name
FROM charactercreator_character AS cc,
armory_item AS ai,
charactercreator_character_inventory AS cci
WHERE cc.character_id = cci.character_id
AND ai.item_id = cci.item_id;
"""
Use a subquery to make a temp table to query from
"""
SELECT character_id, COUNT(DISTINCT item_id) FROM
(SELECT cc.character_id, cc.name AS character_name, ai.item_id, ai.name AS
item_name
FROM charactercreator_character AS cc,
armory_item AS ai,
charactercreator_character_inventory AS cci
WHERE cc.character_id = cci.character_id
AND ai.item_id = cci.item_id)
GROUP BY 1 ORDER BY 2 DESC;
```
#### File: DS-Unit-3-Sprint-2-SQL-and-Databases/module4-acid-and-database-scalability-tradeoffs/lecture.py
```python
import itertools
import operator
from collections import Iterable, Callable
from itertools import product
def transform_reduce(lhs: Iterable, rhs: Iterable,
transformer: Callable, reducer: Callable):
""" Transform Reduce
Pairwise transform and then reduction across all results.
DocTests:
>>> transform_reduce(range(1, 6), range(1, 6), operator.mul, sum)
55
>>> transform_reduce(range(1, 6), range(1, 6), operator.add, product)
3840
@param lhs: Left Iterator
@param rhs: Right Iterator
@param transformer: Binary Functor F(x, y) -> Value
@param reducer: Reduction Functor F(Iterable) -> Value
@return: Reduced Value
"""
return reducer(itertools.starmap(transformer, zip(lhs, rhs)))
def inner_product(lhs: Iterable, rhs: Iterable):
""" Inner Product
Performs pairwise multiplication across the iterables,
then returns the sum of the products.
DocTests:
>>> inner_product(range(1, 6), range(1, 6))
55
>>> inner_product(range(11), range(11))
385
@param lhs: Left Iterator
@param rhs: Right Iterator
@return: Sum of the products.
"""
return transform_reduce(lhs, rhs, operator.mul, sum)
``` |
{
"source": "johngtrs/krux",
"score": 2
} |
#### File: krux/src/login.py
```python
import lcd
from embit.networks import NETWORKS
from embit.wordlists.bip39 import WORDLIST
import urtypes
from page import Page
from menu import Menu, MENU_CONTINUE, MENU_EXIT
from input import BUTTON_ENTER, BUTTON_PAGE
from qr import FORMAT_UR
from key import Key, pick_final_word
from wallet import Wallet
TEST_PHRASE_DIGITS = '11111'
TEST_PHRASE_LETTERS = 'aaaaa'
FINAL_WORD_DIGITS = '99999'
FINAL_WORD_LETTERS = 'zzzzz'
class Login(Page):
"""Represents the login page of the app"""
def __init__(self, ctx):
menu = [
(( 'Load Mnemonic' ), self.load_key),
(( 'About' ), self.about),
(( 'Shutdown' ), self.shutdown),
]
if ctx.debugging():
menu.append((( 'DEBUG' ), lambda: MENU_CONTINUE))
Page.__init__(self, ctx, Menu(ctx, menu))
def load_key(self):
"""Handler for the 'load mnemonic' menu item"""
submenu = Menu(self.ctx, [
(( 'Via QR Code' ), self.load_key_from_qr_code),
(( 'Via Text' ), self.load_key_from_text),
(( 'Via Numbers' ), self.load_key_from_digits),
(( 'Via Bits' ), self.load_key_from_bits),
(( 'Back' ), lambda: MENU_EXIT)
])
index, status = submenu.run_loop()
if index == len(submenu.menu)-1:
return MENU_CONTINUE
return status
def _load_key_from_words(self, words):
self.display_mnemonic(words)
self.ctx.display.draw_hcentered_text(( 'Continue?' ), offset_y=220)
btn = self.ctx.input.wait_for_button()
if btn == BUTTON_ENTER:
submenu = Menu(self.ctx, [
(( 'Single-key' ), lambda: MENU_EXIT),
(( 'Multisig' ), lambda: MENU_EXIT)
])
index, _ = submenu.run_loop()
multisig = index == 1
self.ctx.display.flash_text(( 'Loading..' ))
self.ctx.wallet = Wallet(Key(' '.join(words), multisig, network=NETWORKS[self.ctx.net]))
return MENU_EXIT
return MENU_CONTINUE
def load_key_from_qr_code(self):
"""Handler for the 'via qr code' menu item"""
data, qr_format = self.capture_qr_code()
if data is None:
self.ctx.display.flash_text(( 'Failed to load\nmnemonic' ), lcd.RED)
return MENU_CONTINUE
words = []
if qr_format == FORMAT_UR:
try:
words = urtypes.crypto.BIP39.from_cbor(data.cbor).words
except:
words = urtypes.Bytes.from_cbor(data.cbor).data.decode().split(' ')
else:
if ' ' in data:
words = data.split(' ')
elif len(data) == 48 or len(data) == 96:
for i in range(0, len(data), 4):
words.append(WORDLIST[int(data[i:i+4])])
if not words or (len(words) != 12 and len(words) != 24):
self.ctx.display.flash_text(( 'Invalid mnemonic\nlength' ), lcd.RED)
return MENU_CONTINUE
return self._load_key_from_words(words)
def load_key_from_text(self):
"""Handler for the 'via text' menu item"""
words = []
self.ctx.display.draw_hcentered_text(( 'Enter each\nword of your\nBIP-39 mnemonic.' ))
self.ctx.display.draw_hcentered_text(( 'Proceed?' ), offset_y=200)
btn = self.ctx.input.wait_for_button()
if btn == BUTTON_ENTER:
for i in range(24):
if i == 12:
self.ctx.display.clear()
self.ctx.display.draw_centered_text(( 'Done?' ))
btn = self.ctx.input.wait_for_button()
if btn == BUTTON_ENTER:
break
word = ''
while True:
word = self.capture_letters_from_keypad(( 'Word %d' ) % (i+1))
if word in WORDLIST:
break
# If the first 'word' is the TEST_PHRASE_LETTERS sentinel,
# we're testing and just want the test words
if i == 0 and word == TEST_PHRASE_LETTERS:
break
# If the last 'word' is the FINAL_WORD_LETTERS sentinel,
# pick a random final word that is a valid checksum
if (i in (11, 23)) and word == FINAL_WORD_LETTERS:
break
if word == TEST_PHRASE_LETTERS:
words = [WORDLIST[0] if n + 1 < 12 else WORDLIST[1879] for n in range(12)]
break
if word == FINAL_WORD_LETTERS:
word = pick_final_word(words)
self.ctx.display.clear()
self.ctx.display.draw_centered_text(word)
self.ctx.input.wait_for_button()
words.append(word)
return self._load_key_from_words(words)
return MENU_CONTINUE
def load_key_from_digits(self):
"""Handler for the 'via numbers' menu item"""
words = []
self.ctx.display.draw_hcentered_text(
( 'Enter each\nword of your\nBIP-39 mnemonic\nas a number from\n1 to 2048.' )
)
self.ctx.display.draw_hcentered_text(( 'Proceed?' ), offset_y=200)
btn = self.ctx.input.wait_for_button()
if btn == BUTTON_ENTER:
for i in range(24):
if i == 12:
self.ctx.display.clear()
self.ctx.display.draw_centered_text(( 'Done?' ))
btn = self.ctx.input.wait_for_button()
if btn == BUTTON_ENTER:
break
digits = ''
while True:
digits = self.capture_digits_from_numpad(( 'Word %d' ) % (i+1))
if int(digits) >= 1 and int(digits) <= 2048:
break
# If the first 'word' is the TEST_PHRASE_DIGITS sentinel,
# we're testing and just want the test words
if i == 0 and digits == TEST_PHRASE_DIGITS:
break
# If the last 'word' is the FINAL_WORD_DIGITS sentinel,
# pick a random final word that is a valid checksum
if (i in (11, 23)) and digits == FINAL_WORD_DIGITS:
break
if digits == TEST_PHRASE_DIGITS:
words = [WORDLIST[0] if n + 1 < 12 else WORDLIST[1879] for n in range(12)]
break
word = ''
if digits == FINAL_WORD_DIGITS:
word = pick_final_word(words)
else:
word = WORDLIST[int(digits)-1]
self.ctx.display.clear()
self.ctx.display.draw_centered_text(word)
self.ctx.input.wait_for_button()
words.append(word)
return self._load_key_from_words(words)
return MENU_CONTINUE
def load_key_from_bits(self):
"""Handler for the 'via bits' menu item"""
words = []
self.ctx.display.draw_hcentered_text(
( 'Enter each\nword of your\nBIP-39 mnemonic\nas a series of\nbinary digits.' )
)
self.ctx.display.draw_hcentered_text(( 'Proceed?' ), offset_y=200)
btn = self.ctx.input.wait_for_button()
if btn == BUTTON_ENTER:
for i in range(24):
if i == 12:
self.ctx.display.clear()
self.ctx.display.draw_centered_text(( 'Done?' ))
btn = self.ctx.input.wait_for_button()
if btn == BUTTON_ENTER:
break
bits = ''
while True:
bits = self.capture_bits_from_numpad(( 'Word %d' ) % (i+1))
if len(bits) == 11:
break
word = WORDLIST[int('0b' + bits, 0)]
self.ctx.display.clear()
self.ctx.display.draw_centered_text(word)
self.ctx.input.wait_for_button()
words.append(word)
return self._load_key_from_words(words)
return MENU_CONTINUE
def about(self):
"""Handler for the 'about' menu item"""
networks = ['main', 'test']
while True:
self.ctx.display.clear()
self.ctx.display.draw_centered_text(
( 'Krux\n\n\nVersion\n%s\n\nNetwork\n%snet' ) % (self.ctx.version, self.ctx.net)
)
btn = self.ctx.input.wait_for_button()
if btn == BUTTON_PAGE:
for i, network in enumerate(networks):
if self.ctx.net == network:
self.ctx.net = networks[(i + 1) % len(networks)]
break
elif btn == BUTTON_ENTER:
break
return MENU_CONTINUE
``` |
{
"source": "john-guerra/leyesSenadoColombia",
"score": 3
} |
#### File: john-guerra/leyesSenadoColombia/scraper_senado.py
```python
from scrapy.shell import inspect_response
from scrapy.spiders import CrawlSpider
from scrapy.http import Request, FormRequest
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
import scrapy
class APHCrawler(scrapy.Spider):
name = 'senado_crawler'
allowed_domains = ['leyes.senado.gov.co']
# login_page = 'http://imagelibrary.aph.org/aphb/'
start_urls = ['http://leyes.senado.gov.co/proyectos/index.php/proyectos-ley/cuatrenio-2018-2022/2018-2019']
page = 1
rate = 1
def __init__(self):
scrapy.Spider.__init__(self)
self.download_delay = 1/float(self.rate)
def parse(self, response):
print("Parse!!!")
for year in response.css(".mega-nav.level2 a"):
full_url = response.urljoin(year.css("::attr(href)").extract()[0])
print("parse_year", full_url)
req = Request(full_url, callback=self.parse_year)
req.meta['period'] = year.css("::attr(title)").extract()[0].strip()
yield req
def parse_year(self, response):
print ("Parse year**")
# Parse images
for href in response.css("table h3 a"):
full_url = response.urljoin(href.css("::attr(href)").extract()[0])
# print("Parse_law", full_url)
req = Request(full_url, callback=self.parse_law)
req.meta["period"] = response.meta["period"]
yield req
# Parse pagination
for href in response.css(".pagination li a"):
#Skip broken links
if (len(href.css("::attr(href)").extract())==0): continue
next_url = response.urljoin(href.css("::attr(href)").extract()[0])
print("Parse pagination", next_url)
reqNext = Request(next_url, callback=self.parse_year)
reqNext.meta["period"] = response.meta["period"]
yield reqNext
def parse_law(self, response):
self.logger.info('Parse Law %s', response.url)
periodo = response.meta["period"]
extractOrEmpty = lambda x: x[0].strip() if (x and len(x)>0) else ""
res = {
"url": response.url,
"Periodo": periodo,
"N-senado": extractOrEmpty(response.css("#t3-content > div.database-article.item-page > table > tbody > tr:nth-child(1) > th > dd > div > p:nth-child(1)::text").extract()),
"Titulo": extractOrEmpty(response.css("#t3-content > div.database-article.item-page > table > tbody > tr:nth-child(1) > th > dd > div > p:nth-child(2) > big::text").extract())
}
for row in response.css(".block table tr"):
tds = row.css("td")
# Ignore empty attribs
if (len(tds) != 2):
continue
attr = extractOrEmpty(tds[0].css("::text").extract())
val = extractOrEmpty(tds[1].css("::text").extract())
res[attr] = val
yield res
``` |
{
"source": "johngulliver/InnerEye-DeepLearning",
"score": 2
} |
#### File: InnerEye/ML/runner.py
```python
import logging
import os
import sys
import uuid
from pathlib import Path
from typing import Optional, Tuple
# Suppress all errors here because the imports after code cause loads of warnings. We can't specifically suppress
# individual warnings only.
# flake8: noqa
# Workaround for an issue with how AzureML and Pytorch Lightning interact: When spawning additional processes for DDP,
# the working directory is not correctly picked up in sys.path
print(f"Starting InnerEye runner at {sys.argv[0]}")
innereye_root = Path(__file__).absolute().parent.parent.parent
if (innereye_root / "InnerEye").is_dir():
innereye_root_str = str(innereye_root)
if innereye_root_str not in sys.path:
print(f"Adding InnerEye folder to sys.path: {innereye_root_str}")
sys.path.insert(0, innereye_root_str)
from InnerEye.Common import fixed_paths
fixed_paths.add_submodules_to_path()
from azureml._base_sdk_common import user_agent
from azureml.core import Run, ScriptRunConfig
from health.azure.himl import AzureRunInfo, submit_to_azure_if_needed
from health.azure.azure_util import create_run_recovery_id, merge_conda_files, to_azure_friendly_string
import matplotlib
from InnerEye.Azure.tensorboard_monitor import AMLTensorBoardMonitorConfig, monitor
from InnerEye.Azure import azure_util
from InnerEye.Azure.azure_config import AzureConfig, ParserResult, SourceConfig
from InnerEye.Azure.azure_runner import (DEFAULT_DOCKER_BASE_IMAGE, create_dataset_configs, create_experiment_name,
create_runner_parser,
get_git_tags,
parse_args_and_add_yaml_variables,
parse_arguments, additional_run_tags,
set_environment_variables_for_multi_node)
from InnerEye.Azure.azure_util import (RUN_CONTEXT, RUN_RECOVERY_ID_KEY_NAME, get_all_environment_files,
is_offline_run_context)
from InnerEye.Azure.run_pytest import download_pytest_result, run_pytest
from InnerEye.Common.common_util import (FULL_METRICS_DATAFRAME_FILE, METRICS_AGGREGATES_FILE,
disable_logging_to_file, is_linux, logging_to_stdout)
from InnerEye.Common.generic_parsing import GenericConfig
from InnerEye.ML.common import DATASET_CSV_FILE_NAME
from InnerEye.ML.deep_learning_config import DeepLearningConfig
from InnerEye.ML.lightning_base import InnerEyeContainer
from InnerEye.ML.model_config_base import ModelConfigBase
from InnerEye.ML.model_training import is_global_rank_zero, is_local_rank_zero
from InnerEye.ML.run_ml import MLRunner, ModelDeploymentHookSignature, PostCrossValidationHookSignature
from InnerEye.ML.utils.config_loader import ModelConfigLoader
from InnerEye.ML.lightning_container import LightningContainer
# We change the current working directory before starting the actual training. However, this throws off starting
# the child training threads because sys.argv[0] is a relative path when running in AzureML. Turn that into an absolute
# path.
runner_path = Path(sys.argv[0])
if not runner_path.is_absolute():
sys.argv[0] = str(runner_path.absolute())
def initialize_rpdb() -> None:
"""
On Linux only, import and initialize rpdb, to enable remote debugging if necessary.
"""
# rpdb signal trapping does not work on Windows, as there is no SIGTRAP:
if not is_linux():
return
import rpdb
rpdb_port = 4444
rpdb.handle_trap(port=rpdb_port)
# For some reason, os.getpid() does not return the ID of what appears to be the currently running process.
logging.info("rpdb is handling traps. To debug: identify the main runner.py process, then as root: "
f"kill -TRAP <process_id>; nc 127.0.0.1 {rpdb_port}")
def package_setup_and_hacks() -> None:
"""
Set up the Python packages where needed. In particular, reduce the logging level for some of the used
libraries, which are particularly talkative in DEBUG mode. Usually when running in DEBUG mode, we want
diagnostics about the model building itself, but not for the underlying libraries.
It also adds workarounds for known issues in some packages.
"""
# Numba code generation is extremely talkative in DEBUG mode, disable that.
logging.getLogger('numba').setLevel(logging.WARNING)
# Matplotlib is also very talkative in DEBUG mode, filling half of the log file in a PR build.
logging.getLogger('matplotlib').setLevel(logging.INFO)
# Urllib3 prints out connection information for each call to write metrics, etc
logging.getLogger('urllib3').setLevel(logging.INFO)
logging.getLogger('msrest').setLevel(logging.INFO)
# AzureML prints too many details about logging metrics
logging.getLogger('azureml').setLevel(logging.INFO)
# Jupyter notebook report generation
logging.getLogger('papermill').setLevel(logging.INFO)
logging.getLogger('nbconvert').setLevel(logging.INFO)
# This is working around a spurious error message thrown by MKL, see
# https://github.com/pytorch/pytorch/issues/37377
os.environ['MKL_THREADING_LAYER'] = 'GNU'
# Workaround for issues with matplotlib on some X servers, see
# https://stackoverflow.com/questions/45993879/matplot-lib-fatal-io-error-25-inappropriate-ioctl-for-device-on-x
# -server-loc
matplotlib.use('Agg')
class Runner:
"""
This class contains the high-level logic to start a training run: choose a model configuration by name,
submit to AzureML if needed, or otherwise start the actual training and test loop.
:param project_root: The root folder that contains all of the source code that should be executed.
:param yaml_config_file: The path to the YAML file that contains values to supply into sys.argv.
:param post_cross_validation_hook: A function to call after waiting for completion of cross validation runs.
The function is called with the model configuration and the path to the downloaded and merged metrics files.
:param model_deployment_hook: an optional function for deploying a model in an application-specific way.
If present, it should take a model config (SegmentationModelBase), an AzureConfig, and an AzureML
Model as arguments, and return an optional Path and a further object of any type.
:param command_line_args: command-line arguments to use; if None, use sys.argv.
"""
def __init__(self,
project_root: Path,
yaml_config_file: Path,
post_cross_validation_hook: Optional[PostCrossValidationHookSignature] = None,
model_deployment_hook: Optional[ModelDeploymentHookSignature] = None):
self.project_root = project_root
self.yaml_config_file = yaml_config_file
self.post_cross_validation_hook = post_cross_validation_hook
self.model_deployment_hook = model_deployment_hook
# model_config and azure_config are placeholders for now, and are set properly when command line args are
# parsed.
self.model_config: Optional[DeepLearningConfig] = None
self.azure_config: AzureConfig = AzureConfig()
self.lightning_container: LightningContainer = None # type: ignore
def parse_and_load_model(self) -> ParserResult:
"""
Parses the command line arguments, and creates configuration objects for the model itself, and for the
Azure-related parameters. Sets self.azure_config and self.model_config to their proper values. Returns the
parser output from parsing the model commandline arguments.
If no "model" argument is provided on the commandline, self.model_config will be set to None, and the return
value is None.
"""
# Create a parser that will understand only the args we need for an AzureConfig
parser1 = create_runner_parser()
parser_result = parse_args_and_add_yaml_variables(parser1,
yaml_config_file=self.yaml_config_file,
project_root=self.project_root,
fail_on_unknown_args=False)
azure_config = AzureConfig(**parser_result.args)
azure_config.project_root = self.project_root
self.azure_config = azure_config
self.model_config = None
if not azure_config.model:
raise ValueError("Parameter 'model' needs to be set to tell InnerEye which model to run.")
model_config_loader: ModelConfigLoader = ModelConfigLoader(**parser_result.args)
# Create the model as per the "model" commandline option. This can return either a built-in config
# of type DeepLearningConfig, or a LightningContainer.
config_or_container = model_config_loader.create_model_config_from_name(model_name=azure_config.model)
def parse_overrides_and_apply(c: object, previous_parser_result: ParserResult) -> ParserResult:
assert isinstance(c, GenericConfig)
parser = type(c).create_argparser()
# For each parser, feed in the unknown settings from the previous parser. All commandline args should
# be consumed by name, hence fail if there is something that is still unknown.
parser_result = parse_arguments(parser,
settings_from_yaml=previous_parser_result.unknown_settings_from_yaml,
args=previous_parser_result.unknown,
fail_on_unknown_args=True)
# Apply the overrides and validate. Overrides can come from either YAML settings or the commandline.
c.apply_overrides(parser_result.known_settings_from_yaml)
c.apply_overrides(parser_result.overrides)
c.validate()
return parser_result
# Now create a parser that understands overrides at model/container level.
parser_result = parse_overrides_and_apply(config_or_container, parser_result)
if isinstance(config_or_container, LightningContainer):
self.lightning_container = config_or_container
elif isinstance(config_or_container, ModelConfigBase):
# Built-in InnerEye models use a fake container
self.model_config = config_or_container
self.lightning_container = InnerEyeContainer(config_or_container)
else:
raise ValueError(f"Don't know how to handle a loaded configuration of type {type(config_or_container)}")
if azure_config.extra_code_directory:
exist = "exists" if Path(azure_config.extra_code_directory).exists() else "does not exist"
logging.info(f"extra_code_directory is {azure_config.extra_code_directory}, which {exist}")
else:
logging.info("extra_code_directory is unset")
return parser_result
def run(self) -> Tuple[Optional[DeepLearningConfig], AzureRunInfo]:
"""
The main entry point for training and testing models from the commandline. This chooses a model to train
via a commandline argument, runs training or testing, and writes all required info to disk and logs.
:return: If submitting to AzureML, returns the model configuration that was used for training,
including commandline overrides applied (if any).
"""
# Usually, when we set logging to DEBUG, we want diagnostics about the model
# build itself, but not the tons of debug information that AzureML submissions create.
logging_to_stdout(logging.INFO if is_local_rank_zero() else "ERROR")
initialize_rpdb()
user_agent.append(azure_util.INNEREYE_SDK_NAME, azure_util.INNEREYE_SDK_VERSION)
self.parse_and_load_model()
if self.lightning_container.perform_cross_validation:
# force hyperdrive usage if performing cross validation
self.azure_config.hyperdrive = True
azure_run_info = self.submit_to_azureml_if_needed()
self.run_in_situ(azure_run_info)
if self.model_config is None:
return self.lightning_container, azure_run_info
return self.model_config, azure_run_info
def submit_to_azureml_if_needed(self) -> AzureRunInfo:
"""
Submit a job to AzureML, returning the resulting Run object, or exiting if we were asked to wait for
completion and the Run did not succeed.
"""
if self.azure_config.azureml and isinstance(self.model_config, DeepLearningConfig) \
and not self.lightning_container.azure_dataset_id:
raise ValueError("When running an InnerEye built-in model in AzureML, the 'azure_dataset_id' "
"property must be set.")
source_config = SourceConfig(
root_folder=self.project_root,
entry_script=Path(sys.argv[0]).resolve(),
script_params=sys.argv[1:],
conda_dependencies_files=get_all_environment_files(self.project_root),
hyperdrive_config_func=(self.model_config.get_hyperdrive_config if self.model_config
else self.lightning_container.get_hyperdrive_config),
# For large jobs, upload of results can time out because of large checkpoint files. Default is 600
upload_timeout_seconds=86400
)
# Reduce the size of the snapshot by adding unused folders to amlignore. The Test* subfolders are only needed
# when running pytest.
ignored_folders = []
if not self.azure_config.pytest_mark:
ignored_folders.extend(["Tests", "TestsOutsidePackage"])
if not self.lightning_container.regression_test_folder:
ignored_folders.append("RegressionTestResults")
input_datasets = create_dataset_configs(self.azure_config,
all_azure_dataset_ids=self.lightning_container.all_azure_dataset_ids(),
all_dataset_mountpoints=self.lightning_container.all_dataset_mountpoints())
def after_submission_hook(azure_run: Run) -> None:
"""
A function that will be called right after job submission.
"""
# Add an extra tag that depends on the run that was actually submitted. This is used for later filtering
# run in cross validation analysis
recovery_id = create_run_recovery_id(azure_run)
azure_run.tag(RUN_RECOVERY_ID_KEY_NAME, recovery_id)
print("If this run fails, re-start runner.py and supply these additional arguments: "
f"--run_recovery_id={recovery_id}")
if self.azure_config.tensorboard:
print("Starting TensorBoard now because you specified --tensorboard")
monitor(monitor_config=AMLTensorBoardMonitorConfig(run_ids=[azure_run.id]),
azure_config=self.azure_config)
else:
print(f"To monitor this run locally using TensorBoard, run the script: "
f"InnerEye/Azure/tensorboard_monitor.py --run_ids={azure_run.id}")
if self.azure_config.wait_for_completion:
# We want the job output to be visible on the console, but the program should not exit if the
# job fails because we need to download the pytest result file.
azure_run.wait_for_completion(show_output=True, raise_on_error=False)
if self.azure_config.pytest_mark and self.azure_config.wait_for_completion:
# The AzureML job can optionally run pytest. Attempt to download it to the current directory.
# A build step will pick up that file and publish it to Azure DevOps.
# If pytest_mark is set, this file must exist.
logging.info("Downloading pytest result file.")
download_pytest_result(azure_run)
hyperdrive_config = None
if self.azure_config.hyperdrive:
hyperdrive_config = self.lightning_container.get_hyperdrive_config(ScriptRunConfig(source_directory=""))
# Create a temporary file for the merged conda file, that will be removed after submission of the job.
temp_conda: Optional[Path] = None
try:
if len(source_config.conda_dependencies_files) > 1:
temp_conda = source_config.root_folder / f"temp_environment-{uuid.uuid4().hex[:8]}.yml"
# Merge the project-specific dependencies with the packages that InnerEye itself needs. This should not
# be necessary if the innereye package is installed. It is necessary when working with an outer project
# and InnerEye as a git submodule and submitting jobs from the local machine.
# In case of version conflicts, the package version in the outer project is given priority.
merge_conda_files(source_config.conda_dependencies_files, temp_conda)
# Calls like `self.azure_config.get_workspace()` will fail if we have no AzureML credentials set up, and so
# we should only attempt them if we intend to elevate this to AzureML
if self.azure_config.azureml:
if not self.azure_config.cluster:
raise ValueError("self.azure_config.cluster not set, but we need a compute_cluster_name to submit"
"the script to run in AzureML")
azure_run_info = submit_to_azure_if_needed(
entry_script=source_config.entry_script,
snapshot_root_directory=source_config.root_folder,
script_params=source_config.script_params,
conda_environment_file=temp_conda or source_config.conda_dependencies_files[0],
aml_workspace=self.azure_config.get_workspace(),
compute_cluster_name=self.azure_config.cluster,
environment_variables=source_config.environment_variables,
default_datastore=self.azure_config.azureml_datastore,
experiment_name=to_azure_friendly_string(create_experiment_name(self.azure_config)),
max_run_duration=self.azure_config.max_run_duration,
input_datasets=input_datasets,
num_nodes=self.azure_config.num_nodes,
wait_for_completion=False,
ignored_folders=ignored_folders,
pip_extra_index_url=self.azure_config.pip_extra_index_url,
submit_to_azureml=self.azure_config.azureml,
docker_base_image=DEFAULT_DOCKER_BASE_IMAGE,
docker_shm_size=self.azure_config.docker_shm_size,
tags=additional_run_tags(
azure_config=self.azure_config,
commandline_args=" ".join(source_config.script_params)),
after_submission=after_submission_hook,
hyperdrive_config=hyperdrive_config)
else:
# compute_cluster_name is a required parameter in early versions of the HI-ML package
azure_run_info = submit_to_azure_if_needed(
input_datasets=input_datasets,
submit_to_azureml=False,
compute_cluster_name="")
finally:
if temp_conda:
temp_conda.unlink()
# submit_to_azure_if_needed calls sys.exit after submitting to AzureML. We only reach this when running
# the script locally or in AzureML.
return azure_run_info
def print_git_tags(self) -> None:
"""
When running in AzureML, print all the tags that contain information about the git repository status,
for answering the question "which code version was used" from a log file only.
"""
git_tags = get_git_tags(self.azure_config)
if is_offline_run_context(RUN_CONTEXT):
# When running on a VM outside AzureML, we can read git information from the current repository
tags_to_print = git_tags
else:
# When running in AzureML, the git repo information is not necessarily passed in, but we copy the git
# information into run tags after submitting the job, and can read it out here.
# Only print out those tags that were created from git-related information
tags_to_print = {key: value for key, value in RUN_CONTEXT.get_tags().items() if key in git_tags}
logging.info("Git repository information:")
for key, value in tags_to_print.items():
logging.info(f" {key:20}: {value}")
def run_in_situ(self, azure_run_info: AzureRunInfo) -> None:
"""
Actually run the AzureML job; this method will typically run on an Azure VM.
:param azure_run_info: Contains all information about the present run in AzureML, in particular where the
datasets are mounted.
"""
# Only set the logging level now. Usually, when we set logging to DEBUG, we want diagnostics about the model
# build itself, but not the tons of debug information that AzureML submissions create.
# Suppress the logging from all processes but the one for GPU 0 on each node, to make log files more readable
logging_to_stdout(self.azure_config.log_level if is_local_rank_zero() else "ERROR")
package_setup_and_hacks()
if is_global_rank_zero():
self.print_git_tags()
# For the PR build in AzureML, we can either pytest, or the training of the simple PR model. Running both
# only works when using DDP_spawn, but that has as a side-effect that it messes up memory consumption of the
# large models.
if self.azure_config.pytest_mark:
outputs_folder = Path.cwd() / fixed_paths.DEFAULT_AML_UPLOAD_DIR
pytest_passed, results_file_path = run_pytest(self.azure_config.pytest_mark, outputs_folder)
if not pytest_passed:
# Terminate if pytest has failed. This makes the smoke test in
# PR builds fail if pytest fails.
pytest_failures = f"Not all PyTest tests passed. See {results_file_path}"
raise ValueError(pytest_failures)
else:
# Set environment variables for multi-node training if needed. This function will terminate early
# if it detects that it is not in a multi-node environment.
set_environment_variables_for_multi_node()
ml_runner = self.create_ml_runner()
ml_runner.setup(azure_run_info)
ml_runner.start_logging_to_file()
try:
ml_runner.run()
finally:
disable_logging_to_file()
def create_ml_runner(self) -> MLRunner:
"""
Create and return an ML runner using the attributes of this Runner object.
"""
return MLRunner(
model_config=self.model_config,
container=self.lightning_container,
azure_config=self.azure_config,
project_root=self.project_root,
post_cross_validation_hook=self.post_cross_validation_hook,
model_deployment_hook=self.model_deployment_hook)
def default_post_cross_validation_hook(config: ModelConfigBase, root_folder: Path) -> None:
"""
A function to run after cross validation results have been aggregated, before they are uploaded to AzureML.
:param config: The configuration of the model that should be trained.
:param root_folder: The folder with all aggregated and per-split files.
"""
print(f"Analyzing cross validation results for model {config.model_name}")
print(f"Expecting all cross validation result files in folder {root_folder}")
for (file, description) in [
(DATASET_CSV_FILE_NAME, "Dataset"),
(METRICS_AGGREGATES_FILE, "Metrics aggregated at epoch level"),
(FULL_METRICS_DATAFRAME_FILE, "Metrics at subject level")
]:
full_file = root_folder / file
print(f"{description} (exists={full_file.exists()}): {full_file}")
def run(project_root: Path,
yaml_config_file: Path,
post_cross_validation_hook: Optional[PostCrossValidationHookSignature] = None,
model_deployment_hook: Optional[ModelDeploymentHookSignature] = None) -> \
Tuple[Optional[DeepLearningConfig], Optional[Run]]:
"""
The main entry point for training and testing models from the commandline. This chooses a model to train
via a commandline argument, runs training or testing, and writes all required info to disk and logs.
:return: If submitting to AzureML, returns the model configuration that was used for training,
including commandline overrides applied (if any). For details on the arguments, see the constructor of Runner.
"""
runner = Runner(project_root, yaml_config_file, post_cross_validation_hook, model_deployment_hook)
return runner.run()
def main() -> None:
run(project_root=fixed_paths.repository_root_directory(),
yaml_config_file=fixed_paths.SETTINGS_YAML_FILE,
post_cross_validation_hook=default_post_cross_validation_hook)
if __name__ == '__main__':
main()
```
#### File: Tests/ML/test_download_upload.py
```python
import logging
import shutil
from pathlib import Path
from typing import Any, List, Optional
from unittest import mock
import pytest
from InnerEye.Azure.azure_config import AzureConfig
from InnerEye.Common import fixed_paths
from InnerEye.Common.common_util import OTHER_RUNS_SUBDIR_NAME, logging_section, logging_to_stdout
from InnerEye.Common.fixed_paths_for_tests import full_ml_test_data_path
from InnerEye.Common.output_directories import OutputFolderForTests
from InnerEye.ML.common import DATASET_CSV_FILE_NAME
from InnerEye.ML.deep_learning_config import DeepLearningConfig
from InnerEye.ML.lightning_container import LightningContainer
from InnerEye.ML.model_config_base import ModelConfigBase
from InnerEye.ML.run_ml import MLRunner
from InnerEye.ML.utils.run_recovery import RunRecovery
from Tests.AfterTraining.test_after_training import FALLBACK_ENSEMBLE_RUN, FALLBACK_SINGLE_RUN, get_most_recent_run
from Tests.ML.configs.DummyModel import DummyModel
from Tests.ML.configs.lightning_test_containers import DummyContainerWithDatasets
from Tests.ML.util import get_default_azure_config
from health.azure.himl import AzureRunInfo
logging_to_stdout(logging.DEBUG)
@pytest.fixture
def runner_config() -> AzureConfig:
"""
Gets an Azure config that masks out the storage account for datasets, to avoid accidental overwriting.
:return:
"""
config = get_default_azure_config()
config.model = ""
config.train = False
return config
def check_single_checkpoint(downloaded_checkpoints: List[Path]) -> None:
assert len(downloaded_checkpoints) == 1
assert downloaded_checkpoints[0].is_file()
@pytest.mark.after_training_single_run
def test_download_recovery_single_run(test_output_dirs: OutputFolderForTests,
runner_config: AzureConfig) -> None:
output_dir = test_output_dirs.root_dir
config = ModelConfigBase(should_validate=False)
config.set_output_to(output_dir)
run = get_most_recent_run(fallback_run_id_for_local_execution=FALLBACK_SINGLE_RUN)
run_recovery = RunRecovery.download_all_checkpoints_from_run(config, run)
# This fails if there is no recovery checkpoint
check_single_checkpoint(run_recovery.get_recovery_checkpoint_paths())
check_single_checkpoint(run_recovery.get_best_checkpoint_paths())
@pytest.mark.after_training_ensemble_run
def test_download_best_checkpoints_ensemble_run(test_output_dirs: OutputFolderForTests,
runner_config: AzureConfig) -> None:
output_dir = test_output_dirs.root_dir
config = ModelConfigBase(should_validate=False)
config.set_output_to(output_dir)
run = get_most_recent_run(fallback_run_id_for_local_execution=FALLBACK_ENSEMBLE_RUN)
run_recovery = RunRecovery.download_best_checkpoints_from_child_runs(config, run)
other_runs_folder = config.checkpoint_folder / OTHER_RUNS_SUBDIR_NAME
assert other_runs_folder.is_dir()
for child in ["0", "1"]:
assert (other_runs_folder / child).is_dir(), "Child run folder does not exist"
for checkpoint in run_recovery.get_best_checkpoint_paths():
assert checkpoint.is_file(), f"File {checkpoint} does not exist"
def test_download_azureml_dataset(test_output_dirs: OutputFolderForTests) -> None:
dataset_name = "test-dataset"
config = DummyModel()
config.local_dataset = None
config.azure_dataset_id = ""
azure_config = get_default_azure_config()
runner = MLRunner(config, azure_config=azure_config)
# If the model has neither local_dataset or azure_dataset_id, mount_or_download_dataset should fail.
# This mounting call must happen before any other operations on the container, because already the model
# creation may need access to the dataset.
with pytest.raises(ValueError) as ex:
runner.setup()
assert ex.value.args[0] == "Expecting that a dataset is available here."
runner.project_root = test_output_dirs.root_dir
# Pointing the model to a dataset folder that does not exist should raise an Exception
fake_folder = runner.project_root / "foo"
runner.container.local_dataset = fake_folder
with pytest.raises(FileNotFoundError):
runner.download_or_use_existing_dataset(runner.container.azure_dataset_id, runner.container.local_dataset)
# If the local dataset folder exists, mount_or_download_dataset should not do anything.
fake_folder.mkdir()
local_dataset = runner.download_or_use_existing_dataset(runner.container.azure_dataset_id,
runner.container.local_dataset)
assert local_dataset == fake_folder
# Pointing the model to a dataset in Azure should trigger a download
runner.container.local_dataset = None
runner.container.azure_dataset_id = dataset_name
with logging_section("Starting download"):
result_path = runner.download_or_use_existing_dataset(runner.container.azure_dataset_id,
runner.container.local_dataset)
# Download goes into <project_root> / "datasets" / "test_dataset"
expected_path = runner.project_root / fixed_paths.DATASETS_DIR_NAME / dataset_name
assert result_path == expected_path
assert result_path.is_dir()
dataset_csv = Path(result_path) / DATASET_CSV_FILE_NAME
assert dataset_csv.is_file()
# Check that each individual file in the dataset is present
for folder in [1, 10]:
sub_folder = result_path / str(folder)
sub_folder.is_dir()
for file in ["esophagus", "heart", "lung_l", "lung_r", "spinalcord"]:
f = (sub_folder / file).with_suffix(".nii.gz")
assert f.is_file()
def _test_mount_for_lightning_container(test_output_dirs: OutputFolderForTests,
is_offline_run: bool,
local_dataset: Optional[Path],
azure_dataset: str,
is_lightning_model: bool) -> LightningContainer:
config: Optional[DeepLearningConfig] = None
container: Optional[LightningContainer] = None
if is_lightning_model:
container = DummyContainerWithDatasets()
container.azure_dataset_id = azure_dataset
container.local_dataset = local_dataset
else:
config = DummyModel()
config.azure_dataset_id = azure_dataset
config.local_dataset = local_dataset
# The legacy InnerEye models require an existing dataset_csv file present in the dataset folder. Create that.
download_path = test_output_dirs.root_dir / "downloaded"
mount_path = test_output_dirs.root_dir / "mounted"
if not is_lightning_model:
train_and_test_data = "train_and_test_data"
for path in [download_path, mount_path, test_output_dirs.root_dir]:
# If destination folder exists, delete content to ensure consistency and avoid 'FileExistsError'
if (path / train_and_test_data).is_dir():
shutil.rmtree(path / train_and_test_data)
# Creates directory structure and copy data
shutil.copytree(full_ml_test_data_path(train_and_test_data), path / train_and_test_data)
# Copy 'data.csv' file
shutil.copy(full_ml_test_data_path(DATASET_CSV_FILE_NAME), path / DATASET_CSV_FILE_NAME)
with mock.patch("InnerEye.ML.run_ml.MLRunner.is_offline_run", is_offline_run):
with mock.patch("InnerEye.ML.run_ml.download_dataset", return_value=download_path):
runner = MLRunner(config, container=container,
azure_config=None, project_root=test_output_dirs.root_dir)
path_from_aml: List[Optional[Path]] = [None] if is_offline_run else [mount_path]
runner.setup(azure_run_info=AzureRunInfo(input_datasets=path_from_aml,
output_datasets=[],
run=None,
is_running_in_azure=False,
output_folder=Path(),
logs_folder=Path()
))
return runner.container
@pytest.mark.parametrize(("is_lightning_model", "expected_error"),
[
# A built-in InnerEye model must have either local dataset or azure dataset provided.
(False, "Expecting that a dataset is available here."),
# ... but this is OK for Lightning container models. A Lightning container could simply
# download its data from the web before training.
(True, "")
])
def test_mount_failing_offline_runs(test_output_dirs: OutputFolderForTests,
is_lightning_model: bool,
expected_error: str) -> None:
"""
Test cases when MLRunner.mount_or_download_dataset raises an exception, when running outside AzureML.
"""
def run() -> Any:
return _test_mount_for_lightning_container(test_output_dirs=test_output_dirs,
is_offline_run=True,
local_dataset=None,
azure_dataset="",
is_lightning_model=is_lightning_model)
if expected_error:
with pytest.raises(ValueError) as ex:
run()
assert expected_error in str(ex)
else:
assert run().local_dataset is None
def test_mount_in_azureml1(test_output_dirs: OutputFolderForTests) -> None:
"""
Test cases when MLRunner.mount_or_download_dataset runs inside AzureML.
"""
container = _test_mount_for_lightning_container(test_output_dirs=test_output_dirs,
is_offline_run=False,
local_dataset=None,
azure_dataset="foo",
is_lightning_model=False)
assert "mounted" in str(container.local_dataset)
def test_mount_in_azureml2(test_output_dirs: OutputFolderForTests) -> None:
"""
Test cases when MLRunner.mount_or_download_dataset runs inside AzureML.
"""
container = _test_mount_for_lightning_container(test_output_dirs=test_output_dirs,
is_offline_run=False,
local_dataset=None,
azure_dataset="",
is_lightning_model=True)
assert container.local_dataset is None
def test_mount_or_download(test_output_dirs: OutputFolderForTests) -> None:
"""
Tests the different combinations of local and Azure datasets, with Innereye built-in and container models.
"""
root = test_output_dirs.root_dir
for is_lightning_model in [True, False]:
# With runs outside of AzureML, an AML dataset should get downloaded.
container = _test_mount_for_lightning_container(test_output_dirs=test_output_dirs,
is_offline_run=True,
local_dataset=None,
azure_dataset="foo",
is_lightning_model=is_lightning_model)
assert "downloaded" in str(container.local_dataset)
# For all InnerEye built-in models, the paths from container level need to be copied down to legacy config
# level.
if not is_lightning_model:
assert container.config.local_dataset == container.local_dataset
# With runs in AzureML, an AML dataset should get mounted.
container = _test_mount_for_lightning_container(test_output_dirs=test_output_dirs,
is_offline_run=False,
local_dataset=None,
azure_dataset="foo",
is_lightning_model=is_lightning_model)
assert "mounted" in str(container.local_dataset)
if not is_lightning_model:
assert container.config.local_dataset == container.local_dataset
container = _test_mount_for_lightning_container(test_output_dirs=test_output_dirs,
is_offline_run=True,
local_dataset=root,
azure_dataset="",
is_lightning_model=is_lightning_model)
assert container.local_dataset == root
if not is_lightning_model:
assert container.config.local_dataset == container.local_dataset
``` |
{
"source": "JohnGuoy/iMATools",
"score": 3
} |
#### File: JohnGuoy/iMATools/mrv.py
```python
import sys
import os
import os.path
import shutil
import argparse
import textwrap
import re
import configparser
import hashlib
import sqlite3
import pickle
import portion
import matplotlib
import matplotlib.pyplot as plt
import tqdm
DEBUG = False
CHROMOSOMES_CpG_RANGES = {}
DATA_FILE = ""
DATA_FILE_ROW_COUNT = 0
DATA_FILE_SHA256SUM = ""
OUT_PUTDIR = "."
PREPROCESS_DIR = ""
def parse_args(args):
prompt = """usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv is a visualization tool used to visualize whether the CpG site of a certain read data obtained by long-read \
sequencing is methylated.
optional arguments:
-h, --help show this help message and exit
--data-file DATAFILE file path, specify the path of a text file containing long-read sequencing reads data
--chromosome CHROMOSOME
text, specify chromosome name
--output-dir OUTPUTDIR
directory path, specify the path of the output directory default: current directory
--version, -V show mrv version
--cpg-range RANGE [RANGE ...]
text, specify the range of CpG sites. The syntax format of RANGE is: [start,end].
You can give several RANGEs at once, such as
--cpg-range [1085,1200] [1220,1280] [2300,2395]
--to-visualize-file TOVISUALIZEFILE
file path, specify the path of a text file. You can put the names of multiple
chromosomes and their CpG site intervals to be visualized in this text file, and
hand them to mrv to calculate and output multiple visual files. An example of
TOVISUALIZEFILE:
[Y]
5431,9587
15680,17506
12003,12210
80,3327
[KI270580.1]
1154,1669
756,1321
800,1154
examples:
$ python mrv.py --data-file /home/someone/data.txt --chromosome Y --cpg-range [10802025,10861195]
$ python mrv.py --data-file /home/someone/data.txt --chromosome Y --cpg-range [1085,1200] [1220,1280] [2300,2395]
$ python mrv.py --data-file /home/someone/data.txt --to-visualize-file /home/someone/chromosomes_CpGs.txt"""
if "mrv.py" in sys.argv:
sys.argv.remove("mrv.py")
if len(sys.argv) == 0:
# print("error: you did not specify any parameters.\n")
print(prompt)
sys.exit(1)
prog = "mrv.py"
usage = """python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE [RANGE ...]> | \
<--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help"""
description = """mrv is a visualization tool used to visualize whether the CpG site of a certain read data \
obtained by long-read sequencing is methylated."""
epilog = """examples:
$ python mrv.py --data-file /home/someone/data.txt --chromosome Y --cpg-range [10802025,10861195]
$ python mrv.py --data-file /home/someone/data.txt --chromosome Y --cpg-range [1085,1200] [1220,1280] [2300,2395]
$ python mrv.py --data-file /home/someone/data.txt --to-visualize-file /home/someone/chromosomes_CpGs.txt"""
parser = argparse.ArgumentParser(prog=prog,
usage=usage,
description=description,
epilog=epilog,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--data-file",
dest="datafile",
required=True,
type=str,
help="file path, specify the path of a text file containing long-read sequencing reads data")
parser.add_argument("--chromosome",
dest="chromosome",
type=str,
help="text, specify chromosome name")
parser.add_argument("--output-dir",
dest="outputdir",
default=".",
type=str,
help="directory path, specify the path of the output directory default: current directory")
parser.add_argument("--version",
"-V",
dest="version",
action="version",
version="%(prog)s 1.0.0",
help="show mrv version")
exclusive_group = parser.add_mutually_exclusive_group()
exclusive_group.add_argument("--cpg-range",
dest="range",
action="extend",
nargs="+",
type=str,
help=textwrap.dedent("""\
text, specify the range of CpG sites. The syntax format of RANGE is: [start,end].
You can give several RANGEs at once, such as
--cpg-range [1085,1200] [1220,1280] [2300,2395]"""))
exclusive_group.add_argument("--to-visualize-file",
dest="tovisualizefile",
type=str,
help=textwrap.dedent("""\
file path, specify the path of a text file. You can put the names of multiple
chromosomes and their CpG site intervals to be visualized in this text file, and
hand them to mrv to calculate and output multiple visual files. An example of
TOVISUALIZEFILE:
[Y]
5431,9587
15680,17506
12003,12210
80,3327
[KI270580.1]
1154,1669
756,1321
800,1154"""))
parsed_args = parser.parse_args(args)
if "--to-visualize-file" in args:
parsed_args.chromosome = None
else:
if "--chromosome" not in args and "--cpg-range" not in args:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: the following arguments are required: --chromosome and --cpg-range""")
return False
elif "--chromosome" not in args:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: the following arguments are required: --chromosome""")
return False
elif "--cpg-range" not in args:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: the following arguments are required: --cpg-range""")
return False
if DEBUG:
print("------DEBUG: parsed_args------")
print(parsed_args)
print("------parsed_args :DEBUG------")
if not os.path.exists(parsed_args.datafile):
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s does not exist""" % parsed_args.datafile)
return False
elif not os.path.isfile(parsed_args.datafile):
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s is not a file""" % parsed_args.datafile)
return False
global DATA_FILE
DATA_FILE = parsed_args.datafile
def add_to_chromosomes_cpg_ranges(ch, ra):
global CHROMOSOMES_CpG_RANGES
CHROMOSOMES_CpG_RANGES[ch] = ra
if parsed_args.range is not None:
ranges = []
rc = re.compile(r"^\[\d+,\d+\]$")
for range_str in parsed_args.range:
matched_obj = rc.match(range_str)
if matched_obj is not None:
li_str = matched_obj.group()
li = eval(li_str)
if li[0] == li[1]:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range \
RANGE [RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s is not a valid RANGE, the left and right endpoints of an interval should not be equal.""" %
range_str)
return False
elif li[0] > li[1]:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range \
RANGE [RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s is not a valid RANGE, the left endpoints of the interval should be less than the right endpoints.""" %
range_str)
return False
ranges.append(li)
else:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s is not a valid RANGE, the correct format of RANGE is such as [5431,9587]. Notice: there is no \
space between [ and ], and the numbers should be integer.""" % range_str)
return False
add_to_chromosomes_cpg_ranges(parsed_args.chromosome, ranges)
if parsed_args.tovisualizefile is not None:
if not os.path.exists(parsed_args.tovisualizefile):
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s does not exist""" % parsed_args.tovisualizefile)
return False
elif not os.path.isfile(parsed_args.tovisualizefile):
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s is not a file""" % parsed_args.tovisualizefile)
return False
tovisualizefile_example = textwrap.dedent("""\
[Y]
5431,9587
15680,17506
12003,12210
80,3327
[KI270580.1]
1154,1669
756,1321
800,1154""")
config = configparser.ConfigParser(delimiters=",")
try:
config.read(parsed_args.tovisualizefile, encoding="UTF-8")
except configparser.ParsingError as e:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s syntax error. Here is a correct syntax example of TOVISUALIZEFILE:\n%s""" %
(parsed_args.tovisualizefile, tovisualizefile_example))
return False
chromosomes = config.sections()
if len(chromosomes) == 0:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s syntax error: %s is an empty file. Here is a correct syntax example of TOVISUALIZEFILE:\n%s""" %
(parsed_args.tovisualizefile, parsed_args.tovisualizefile, tovisualizefile_example))
return False
if DEBUG:
print("------DEBUG: chromosomes------")
print(chromosomes)
print("------chromosomes :DEBUG------")
for chromosome in chromosomes:
ranges = []
ranges_list = config.items(chromosome)
for element in ranges_list:
try:
e1 = int(element[0])
e2 = int(element[1])
except ValueError as e:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range \
RANGE [RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s syntax error. The left and right endpoints of the interval should be integers. Here is \
a correct syntax example of TOVISUALIZEFILE:\n%s""" % (parsed_args.tovisualizefile, tovisualizefile_example))
return False
if e1 < e2:
li = [e1, e2]
elif e1 > e2:
li = [e2, e1]
else:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range \
RANGE [RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: %s. The left and right endpoints of the interval should not be equal. Here is \
a correct syntax example of TOVISUALIZEFILE:\n%s""" % (parsed_args.tovisualizefile, tovisualizefile_example))
return False
ranges.append(li)
if len(ranges_list) == 0:
continue
add_to_chromosomes_cpg_ranges(chromosome, ranges)
global OUT_PUTDIR
OUT_PUTDIR = parsed_args.outputdir
return True
def get_file_row_count(file_path):
with open(file_path, 'r', encoding="UTF-8") as f:
count = 0
for i in f:
count += 1
return count
def get_file_sha256sum(file_path):
with open(file_path, "rb") as f:
sha256sum = hashlib.new("sha256", b"")
while True:
data = f.read(64 * 1024)
if not data:
break
sha256sum.update(data)
return sha256sum.hexdigest()
def is_preprocessed(file_path, dir_path="."):
mrv_output_dir_path = "%s/%s/" % (dir_path.rstrip("/"), "mrv_output")
global DATA_FILE_ROW_COUNT
DATA_FILE_ROW_COUNT = get_file_row_count(file_path)
global DATA_FILE_SHA256SUM
DATA_FILE_SHA256SUM = get_file_sha256sum(file_path)
if DEBUG:
print("------DEBUG: DATA_FILE_SHA256SUM------")
print(DATA_FILE_SHA256SUM)
print("------DATA_FILE_SHA256SUM :DEBUG------")
global PREPROCESS_DIR
PREPROCESS_DIR = "%s/%s/" % (mrv_output_dir_path.rstrip("/"), DATA_FILE_SHA256SUM)
if not os.path.exists(PREPROCESS_DIR):
return False
if DATA_FILE_SHA256SUM not in os.listdir(mrv_output_dir_path):
return False
else:
meta_data_file_path = "%s/%s/%s" % (PREPROCESS_DIR.rstrip("/"), "data", "meta_data")
if not os.path.exists(meta_data_file_path):
return False
meta_data_file = open(meta_data_file_path, 'rb')
count = pickle.load(meta_data_file)
meta_data_file.close()
data_file_row_count = DATA_FILE_ROW_COUNT
if count == data_file_row_count:
return True
else:
return False
def create_output_directory(file_path, dir_path="."):
if dir_path != ".":
if not os.path.exists(dir_path):
try:
os.makedirs(dir_path)
except OSError as e:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: can not create directory %s""" % dir_path)
return False
mrv_output_dir_path = "%s/%s/" % (dir_path.rstrip("/"), "mrv_output")
if "mrv_output" not in os.listdir(dir_path):
try:
os.makedirs(mrv_output_dir_path)
except OSError as e:
print("""usage: python mrv.py <--data-file DATAFILE> {<--chromosome CHROMOSOME> <--cpg-range RANGE \
[RANGE ...]> | <--to-visualize-file TOVISUALIZEFILE>} [--output-dir OUTPUTDIR]
| --version
| --help
mrv.py: error: can not create directory %s""" % mrv_output_dir_path)
return False
sha256sum = DATA_FILE_SHA256SUM
global PREPROCESS_DIR
PREPROCESS_DIR = "%s/%s/" % (mrv_output_dir_path.rstrip("/"), sha256sum)
if sha256sum not in os.listdir(mrv_output_dir_path):
os.makedirs(PREPROCESS_DIR)
os.makedirs("%s/%s/" % (PREPROCESS_DIR.rstrip("/"), "data/"))
os.makedirs("%s/%s/" % (PREPROCESS_DIR.rstrip("/"), "visualization/"))
else:
shutil.rmtree(PREPROCESS_DIR)
os.makedirs(PREPROCESS_DIR)
os.makedirs("%s/%s/" % (PREPROCESS_DIR.rstrip("/"), "data/"))
os.makedirs("%s/%s/" % (PREPROCESS_DIR.rstrip("/"), "visualization/"))
return True
def preprocess_file(file_path):
if DEBUG:
print("------DEBUG: PREPROCESS_DIR------")
print(PREPROCESS_DIR)
print("------PREPROCESS_DIR :DEBUG------")
with open(file_path, 'r', encoding="UTF-8") as f:
chromosomes = []
sqlite3_conns = {}
count = 0
# progress_bar = 0
pbar = tqdm.tqdm(total=DATA_FILE_ROW_COUNT, desc='Processing', unit="rows", colour="GREEN")
for each_row in f:
row = each_row.strip()
row_split = row.split('\t')
if len(row_split) < 6:
print("Data format error! At least 6 columns of data, with each column separated by the tab key.")
pbar.close()
return False
count += 1
if count == 1:
continue
chromosome = row_split[0]
if chromosome not in chromosomes:
chromosomes.append(chromosome)
db = "%s/%s/%s.db" % (PREPROCESS_DIR.rstrip("/"), "data", chromosome)
conn = sqlite3.connect(db)
if DEBUG:
print("------DEBUG: SQLite db------")
print("creating %s..." % db)
print("------SQLite db :DEBUG------")
sqlite3_conns[chromosome] = conn
cur = conn.cursor()
cur.execute("""
CREATE TABLE '%s'(
start int,
read_name nvarchar(40),
is_methylated boolean DEFAULT 0
);""" % chromosome)
if DEBUG:
print("------DEBUG: SQLite table------")
print("creating table %s..." % chromosome)
print("------SQLite table :DEBUG------")
is_methylated = 0
if not row_split[5].startswith('-'):
is_methylated = 1
start = row_split[2]
read_name = row_split[4]
cur.execute("insert into '%s'(start,read_name,is_methylated) values(%s,'%s',%s);" %
(chromosome, start, read_name, is_methylated))
conn.commit()
else:
is_methylated = 0
if not row_split[5].startswith('-'):
is_methylated = 1
start = row_split[2]
read_name = row_split[4]
sqlite3_conns[chromosome].execute(
"insert into '%s'(start,read_name,is_methylated) values(%s,'%s',%s);" % (
chromosome, start, read_name, is_methylated))
sqlite3_conns[chromosome].commit()
pbar.update(1)
pbar.close()
if DEBUG:
print("------DEBUG: count------")
print("processed %s rows totally..." % count)
print("------count :DEBUG------")
meta_data_file_path = "%s/%s/%s" % (PREPROCESS_DIR.rstrip("/"), "data", "meta_data")
meta_data_file = open(meta_data_file_path, 'wb')
pickle.dump(count, meta_data_file)
meta_data_file.close()
for key in sqlite3_conns.keys():
chromosome = key
conn = sqlite3_conns[key]
conn.commit()
if DEBUG:
print("------DEBUG: SQLite index------")
print("creating index of %s table..." % chromosome)
print("------SQLite index :DEBUG------")
conn.execute("CREATE INDEX start_index ON '%s'(start);" % chromosome)
conn.commit()
conn.execute("CREATE INDEX read_name_index ON '%s'(read_name);" % chromosome)
conn.commit()
conn.close()
return True
def preprocess_chromosomes_cpg_ranges(chromosomes_cpg_ranges):
def preprocess_chromosome_cpg_ranges(chromosome, cpg_ranges):
if DEBUG:
print("------DEBUG: chromosome and cpg_ranges------")
print("%s:%s" % (chromosome, cpg_ranges))
print("------chromosome and cpg_ranges :DEBUG------")
old_intervals = portion.empty()
for e in cpg_ranges:
interval = portion.closed(e[0], e[1])
old_intervals = old_intervals.union(interval)
global CHROMOSOMES_CpG_RANGES
new_ranges = []
for e in portion.to_data(old_intervals):
new_ranges.append([e[1], e[2]])
if len(new_ranges) == 0:
CHROMOSOMES_CpG_RANGES[chromosome] = None
print("There is no CpGs' information of chromosome %s in given ranges." % chromosome)
else:
CHROMOSOMES_CpG_RANGES[chromosome] = new_ranges
global CHROMOSOMES_CpG_RANGES
for key in CHROMOSOMES_CpG_RANGES.keys():
dbs_dir = "%s/%s/" % (PREPROCESS_DIR.rstrip("/"), "data")
if (key + ".db") not in os.listdir(dbs_dir):
CHROMOSOMES_CpG_RANGES[key] = None
print("The data of chromosome %s you specified does not exist in the data file." % key)
continue
preprocess_chromosome_cpg_ranges(key, CHROMOSOMES_CpG_RANGES[key])
if DEBUG:
print("------DEBUG: new CHROMOSOMES_CpG_RANGES------")
print(CHROMOSOMES_CpG_RANGES)
print("------new CHROMOSOMES_CpG_RANGES :DEBUG------")
return True
def visualize(chromosomes_cpg_ranges):
def visualize_one(chromosome, cpg_ranges):
print("For chromosome %s and the given ranges:" % chromosome)
read_names = []
cpg_positions = []
db = "%s/%s/%s.db" % (PREPROCESS_DIR.rstrip("/"), "data", chromosome)
if not os.path.exists(db):
print("the data of chromosome %s you specified does not exist in the data file." % chromosome)
return False
conn = sqlite3.connect(db)
cur = conn.cursor()
cpg_ranges_len = len(cpg_ranges)
if cpg_ranges_len == 0:
print("there is no CpGs' information of chromosome %s in given ranges." % chromosome)
return False
if cpg_ranges_len == 1:
interval = cpg_ranges[0]
dql = "SELECT DISTINCT read_name FROM (SELECT DISTINCT read_name,start FROM '%s' WHERE start BETWEEN %s\
AND %s ORDER BY start);" % (chromosome, interval[0], interval[1])
cur.execute(dql)
for row in cur:
read_names.append(row[0])
elif cpg_ranges_len <= 300:
dql = "SELECT DISTINCT read_name FROM "
count = 1
for interval in cpg_ranges:
if count == 1:
dql += "(SELECT DISTINCT read_name,start FROM '%s' WHERE start BETWEEN %s AND %s" % (chromosome,
interval[0],
interval[1])
elif count == cpg_ranges_len:
dql += " UNION SELECT DISTINCT read_name,start FROM '%s' WHERE start BETWEEN %s AND %s\
ORDER BY start);" % (chromosome, interval[0], interval[1])
else:
dql += " UNION SELECT DISTINCT read_name,start FROM '%s' WHERE start BETWEEN %s AND %s" % (
chromosome, interval[0], interval[1])
count += 1
cur.execute(dql)
for row in cur:
read_names.append(row[0])
else:
try:
cur.execute("DROP TABLE read_names;")
except sqlite3.OperationalError:
pass
cur.execute("CREATE TABLE read_names(start int, read_name nvarchar(40));")
conn.commit()
if DEBUG:
print("------DEBUG: SQLite table------")
print("creating temporary table read_names...")
print("------SQLite table :DEBUG------")
dql = "SELECT DISTINCT read_name FROM "
count = 1
times300 = 1
for interval in cpg_ranges:
if count == 1:
dql += "(SELECT DISTINCT read_name,start FROM '%s' WHERE start BETWEEN %s AND %s" % (chromosome,
interval[0],
interval[1])
elif count == cpg_ranges_len:
dql += " UNION SELECT DISTINCT read_name,start FROM '%s' WHERE start BETWEEN %s AND %s\
ORDER BY start);" % (chromosome, interval[0], interval[1])
cur.execute(dql)
for row in cur:
cur_temp = conn.cursor()
cur_temp.execute("insert into read_names(read_name) values('%s');" % row[0])
conn.commit()
break
else:
dql += " UNION SELECT DISTINCT read_name,start FROM '%s' WHERE start BETWEEN %s AND %s" % (
chromosome, interval[0], interval[1])
if count >= 300 * times300 - 1:
dql += " UNION SELECT DISTINCT read_name,start FROM '%s' WHERE start BETWEEN %s AND %s\
ORDER BY start);" % (chromosome, interval[0], interval[1])
cur.execute(dql)
for row in cur:
dql_temp = "SELECT start FROM '%s' WHERE read_name='%s' ORDER BY start LIMIT 1;" % (
chromosome, row[0])
cur_temp = conn.cursor()
cur_temp.execute(dql_temp)
for row_temp in cur_temp:
the_first_CpG_position = row_temp[0]
cur_temp.execute("insert into read_names(start, read_name) values(%d,'%s');" % (
the_first_CpG_position, row[0]))
conn.commit()
times300 += 1
dql = "SELECT DISTINCT read_name FROM (SELECT DISTINCT read_name,start FROM '%s' WHERE 1=0 " % chromosome
count += 1
cur.execute("CREATE INDEX read_names_table_read_name_index ON read_names (read_name);")
cur.execute("CREATE INDEX read_names_table_start_index ON read_names (start);")
conn.commit()
dql = "SELECT DISTINCT read_name FROM read_names ORDER BY start;"
cur.execute(dql)
for row in cur:
read_names.append(row[0])
if DEBUG:
print("------DEBUG: read_names------")
# print(read_names)
print("length of read_names is %d" % len(read_names))
print("------read_names :DEBUG------")
read_names_len = len(read_names)
if read_names_len == 0:
print("there is no CpGs' information of chromosome %s in given ranges." % chromosome)
return False
where_clause_part = ""
count = 1
for r in cpg_ranges:
if count == 1:
where_clause_part += " start between %s and %s " % (r[0], r[1])
else:
where_clause_part += "or start between %s and %s " % (r[0], r[1])
count += 1
if read_names_len == 1:
read_name = read_names[0]
dql = "SELECT DISTINCT start FROM '%s' WHERE read_name='%s' and %s ORDER BY start;" % (
chromosome, read_name, where_clause_part)
cur.execute(dql)
for row in cur:
cpg_positions.append(row[0])
elif read_names_len <= 300:
dql = ""
count = 1
for read_name in read_names:
if count == 1:
dql += "SELECT DISTINCT start FROM '%s' WHERE read_name='%s' and %s " % (
chromosome, read_name, where_clause_part)
elif count == read_names_len:
dql += " UNION SELECT DISTINCT start FROM '%s' WHERE read_name='%s' and %s ORDER BY start;" % (
chromosome, read_name, where_clause_part)
else:
dql += " UNION SELECT DISTINCT start FROM '%s' WHERE read_name='%s' and %s " % (
chromosome, read_name, where_clause_part)
count += 1
cur.execute(dql)
for row in cur:
cpg_positions.append(row[0])
else:
try:
cur.execute("DROP TABLE cpg_positions;")
except sqlite3.OperationalError:
pass
cur.execute("CREATE TABLE cpg_positions(start int);")
conn.commit()
dql = ""
count = 1
times300 = 1
for read_name in read_names:
if count == 1:
dql += "SELECT DISTINCT start FROM '%s' WHERE read_name='%s' and %s " % (
chromosome, read_name, where_clause_part)
elif count == read_names_len:
dql += " UNION SELECT DISTINCT start FROM '%s' WHERE read_name='%s' and %s ORDER BY start;" % (
chromosome, read_name, where_clause_part)
for row in cur:
cur_temp = conn.cursor()
cur_temp.execute("insert into cpg_positions(start) values(%d);" % row[0])
conn.commit()
break
else:
dql += " UNION SELECT DISTINCT start FROM '%s' WHERE read_name='%s' and %s " % (
chromosome, read_name, where_clause_part)
if count >= 300 * times300 - 1:
dql += " UNION SELECT DISTINCT start FROM '%s' WHERE read_name='%s' and %s ORDER BY start;" % (
chromosome, read_name, where_clause_part)
cur.execute(dql)
for row in cur:
cur_temp = conn.cursor()
cur_temp.execute("insert into cpg_positions(start) values(%d);" % row[0])
conn.commit()
times300 += 1
dql = "SELECT DISTINCT start FROM '%s' where 1=0 " % chromosome
count += 1
cur.execute("CREATE INDEX cpg_positions_table_start_index ON cpg_positions (start);")
conn.commit()
dql = "SELECT DISTINCT start FROM cpg_positions ORDER BY start;"
cur.execute(dql)
for row in cur:
cpg_positions.append(row[0])
if DEBUG:
print("------DEBUG: cpg_positions------")
print("length of cpg_positions is %d" % len(cpg_positions))
print("------cpg_positions :DEBUG------")
matrix = [[-1] * len(cpg_positions) for _ in range(len(read_names))]
for i in range(len(read_names)):
the_first_CpG_position = 0
dql = "SELECT start FROM '%s' WHERE read_name='%s' and %s ORDER BY start LIMIT 1;" % (
chromosome, read_names[i], where_clause_part)
cur.execute(dql)
for row in cur:
the_first_CpG_position = row[0]
the_first_CpG_position_index = cpg_positions.index(the_first_CpG_position)
dql = "SELECT DISTINCT start, is_methylated FROM '%s' WHERE read_name='%s' and %s ORDER BY start;" % (
chromosome, read_names[i], where_clause_part)
cur.execute(dql)
j = the_first_CpG_position_index
for row in cur:
if j >= len(cpg_positions):
if DEBUG:
print("------Error:j >= len(cpg_positions)------")
print("read_name=%s, row[0]=%d, j=%d" % (read_names[i], row[0], j))
print("------j >= len(cpg_positions) :Error------")
return False
continue
if row[0] != cpg_positions[j]:
pass
else:
if row[1] == 1:
matrix[i][j] = 1
else:
matrix[i][j] = 0
j += 1
conn.close()
indexes = []
for i in range(len(matrix)):
if matrix[i].count(1) == 1 and matrix[i].count(1) + matrix[i].count(-1) == len(matrix[i]) or \
matrix[i].count(0) == 1 and matrix[i].count(0) + matrix[i].count(-1) == len(matrix[i]):
indexes.append(i)
read_names_to_remove = [read_names[i] for i in indexes]
rows_to_remove = [matrix[i] for i in indexes]
for obj in read_names_to_remove:
read_names.remove(obj)
for obj in rows_to_remove:
matrix.remove(obj)
finished_flag = 0
for j in reversed(list(range(len(cpg_positions)))):
del_flag = 1
for i in range(len(read_names)):
if matrix[i][j] != -1:
del_flag = 0
finished_flag = 1
break
if finished_flag:
break
for i in range(len(read_names)):
if del_flag == 1:
matrix[i].pop()
cpg_positions.pop()
if finished_flag == 0:
print("there is no CpGs' information of chromosome %s in given ranges." % chromosome)
return False
visualization_txt_file_path = "%s/%s/%s_%s_%s_visualization.txt" % (
PREPROCESS_DIR.rstrip("/"), "visualization", chromosome, cpg_positions[0], cpg_positions[-1])
with open(visualization_txt_file_path, 'w', encoding="UTF-8") as f:
blank = ""
for i in range(len(read_names[0])):
blank += " "
f.write(blank)
f.write('\t')
for cpg in cpg_positions:
f.write(str(cpg))
f.write('\t')
f.write(os.linesep)
for r in range(len(read_names)):
f.write(read_names[r])
f.write('\t')
i = 0
for c in matrix[r]:
for j in range(len(str(cpg_positions[i])) - 1):
f.write(' ')
if c == -1:
f.write(' ')
elif c == 0:
f.write('0')
else:
f.write('1')
f.write('\t')
i += 1
f.write(os.linesep)
print("the visualization txt file is at %s" % visualization_txt_file_path)
import matplotlib.ticker as ticker
matplotlib.use('svg')
if len(cpg_positions) * 2 <= len(read_names):
fig, ax = plt.subplots(figsize=(len(cpg_positions) * 4 * 0.6, len(read_names) * 0.6),
constrained_layout=True)
else:
fig, ax = plt.subplots(figsize=(len(cpg_positions) * 2.5 * 0.6, len(read_names) * 0.6),
constrained_layout=True)
plt.ylim(0, len(read_names) + 1)
plt.xlim(0, len(cpg_positions) + 1)
xticks = range(1, len(cpg_positions) + 1)
xlabels = cpg_positions
ax.set_xticks(xticks)
ax.set_xticklabels(labels=xlabels, rotation=30, ha="center")
yticks = range(1, len(read_names) + 1)
ylabels = read_names
ax.set_yticks(yticks)
ax.set_yticklabels(labels=ylabels)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
def draw_a_read(i):
start_index = 0
end_index = -1
flag = 0
for j in range(len(cpg_positions)):
if matrix[i][j] != -1 and flag == 0:
start_index = j
flag = 1
continue
if matrix[i][j] == -1 and flag == 1:
k = j
while k < len(cpg_positions) - 1 and matrix[i][k] == -1:
k = k + 1
if not k < len(cpg_positions) - 1:
end_index = j - 1
break
else:
continue
if j == len(cpg_positions) - 1 and flag == 1:
end_index = j
x = list(range(start_index, end_index + 1))
markers0 = []
markers1 = []
for j in x:
if matrix[i][j] == -1:
pass
elif matrix[i][j] == 0:
markers0.append(j)
else:
markers1.append(j)
x = [_ + 1 for _ in x]
markers0 = [_ + 1 for _ in markers0]
markers1 = [_ + 1 for _ in markers1]
y = [i + 1 for _ in range(len(x))]
ax.plot(x, y, linestyle='-', color="black", linewidth=2)
for maker in markers0:
ax.plot(maker, y[0], marker='o', markersize=8, markerfacecolor='white', markeredgecolor='black')
for maker in markers1:
ax.plot(maker, y[0], marker='o', markersize=8, markerfacecolor='black', markeredgecolor='black')
for i in range(len(read_names)):
draw_a_read(i)
ax.set_title("CpG ranges [%s, %s] of %s Chromosome" % (cpg_positions[0], cpg_positions[-1], chromosome),
fontsize=12)
ax.set_xlabel("CpG sites", fontsize=12)
ax.set_ylabel("reads", fontsize=12)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
visualization_svg_file_path = "%s/%s/%s_%s_%s_visualization.svg" % (
PREPROCESS_DIR.rstrip("/"), "visualization", chromosome, cpg_positions[0], cpg_positions[-1])
fig.savefig(visualization_svg_file_path, dpi=600, format='svg')
print("the visualization svg file is at %s" % visualization_svg_file_path)
global CHROMOSOMES_CpG_RANGES
for key in CHROMOSOMES_CpG_RANGES.keys():
if CHROMOSOMES_CpG_RANGES[key] is None:
continue
visualize_one(key, CHROMOSOMES_CpG_RANGES[key])
return True
def main():
if sys.version_info.major < 3:
print("You current Python version is %d.%d, the mrv require Python 3, Python 3.6+ is better." %
(sys.version_info.major, sys.version_info.minor))
sys.exit(1)
# print("Parsing options...")
if not parse_args(sys.argv):
print("Error parsing command line options.")
sys.exit(1)
print("Parsing command line options...")
print("Done.")
if DEBUG:
print("------DEBUG: global variables------")
print("DATA_FILE:")
print(DATA_FILE)
print("CHROMOSOMES_CpG_RANGES:")
print(CHROMOSOMES_CpG_RANGES)
print("OUT_PUTDIR:")
print(OUT_PUTDIR)
print("------global variables :DEBUG------")
if not is_preprocessed(DATA_FILE, OUT_PUTDIR):
print("\nCreating output directory...")
if not create_output_directory(DATA_FILE, OUT_PUTDIR):
sys.exit(1)
print("Done.")
print("\nPreprocessing %s..." % DATA_FILE)
if not preprocess_file(DATA_FILE):
sys.exit(1)
print("Done.")
print("\nPreprocessing Chromosomes and their CpG ranges...")
if not preprocess_chromosomes_cpg_ranges(CHROMOSOMES_CpG_RANGES):
sys.exit(1)
print("Done.")
print("\nVisualizing...")
if not visualize(CHROMOSOMES_CpG_RANGES):
sys.exit(1)
print("\nAll have done.")
sys.exit(0)
if __name__ == "__main__":
main()
``` |
{
"source": "JohnGWebDev/coinloggr",
"score": 2
} |
#### File: coinloggr/ai_coin_identifier/views.py
```python
from django.shortcuts import render
# Create your views here.
def ai_coin_identifier_home_page(request):
return render(request, 'ai_coin_identifier/index.html')
```
#### File: coinloggr/main/utilities.py
```python
import datetime
from django.core.validators import MaxValueValidator
def get_year():
return datetime.date.today().year
def coin_year_max_value(value):
return MaxValueValidator(get_year())(value)
def auto_slug_function(instance):
c = instance.composition
d = instance.denomination
g = instance.grade
y = str(instance.year)
if c in d or d in c:
return g + " " + y + " " + d
else:
return g + " " + y + " " + c + " " + d
```
#### File: main/views/base.py
```python
from django.shortcuts import render
from django.views.generic.edit import CreateView
from main import models
# Create your views here.
# Temporary landing page allows users to sign up for pre-release updates
class LandingPage(CreateView):
model = models.PreReleaseSubscriber
fields = '__all__'
template_name = 'main/index.html'
success_url = '/thank-you/'
# A page to thank users for signing up
def thank_you(request):
return render(request, 'main/thank_you.html')
# def dashboard(request):
# return render(request, 'main/dashboard.html')
# def newsfeed(request):
# return render(request, 'main/newsfeed.html')
# def explore(request):
# return render(request, 'main/explore.html')
``` |
{
"source": "JohnH01/training",
"score": 2
} |
#### File: training/accounts/views.py
```python
from django.shortcuts import render, HttpResponse
# Create your views here.
def home(request):
data = {
'first_name': request.user.first_name,
'last_name': request.user.last_name,
'username': request.user.username
}
return render(request, 'accounts/home.html', data)
``` |
{
"source": "johnh2o2/kdsphere",
"score": 3
} |
#### File: kdsphere/kdsphere/kdsphere.py
```python
import numpy as np
from scipy.spatial import cKDTree, KDTree
from .utils import spherical_to_cartesian
class KDSphere(object):
"""KD Tree for Spherical Data, built on scipy's cKDTree
Parameters
----------
data : array_like, shape (N, 2)
(lon, lat) pairs measured in radians
**kwargs :
Additional arguments are passed to cKDTree
"""
def __init__(self, data, **kwargs):
self.data = np.asarray(data)
self.data3d = spherical_to_cartesian(self.data)
self.kdtree_ = cKDTree(self.data3d, **kwargs)
def query(self, data, k=1, eps=0, **kwargs):
"""Query for k-nearest neighbors
Parameters
----------
data : array_like, shape (N, 2)
(lon, lat) pairs measured in radians
k : integer
The number of nearest neighbors to return.
eps : non-negative float
Return approximate nearest neighbors; the k-th returned value
is guaranteed to be no further than (1+eps) times the
distance to the real k-th nearest neighbor.
Returns
-------
d : array_like, float, shape=(N, k)
The distances to the nearest neighbors
i : array_like, int, shape=(N, k)
The indices of the neighbors
"""
data_3d, r = spherical_to_cartesian(data, return_radius=True)
dist_3d, ind = self.kdtree_.query(data_3d, k=k, eps=eps, **kwargs)
dist_2d = 2 * np.arcsin(dist_3d * 0.5 / r)
return dist_2d, ind
def query_ball_tree(self, data, r, **kwargs):
""" Query for matches within ``r`` radians
Parameters
----------
data : Either a KDTree instance or array_like, shape (N, 2)
(lon, lat) pairs measured in radians, or another KDSphere (or KDTree)
instance
r : float
Search radius (radians)
**kwargs:
Additional arguments passed to scipy.spatial.cKDTree.query_ball_tree
Returns
-------
matches: list of lists
For each element ``self.data[i]`` of this tree, ``matches[i]`` is
a list of the indices of its neighbors in ``data``
"""
other_kdtree = None
if isinstance(data, (cKDTree, KDTree)):
other_kdtree = data
elif isinstance(data, KDSphere):
other_kdtree = data.kdtree_
else:
other_kdtree = KDSphere(data).kdtree_
return self.kdtree_.query_ball_tree(other_kdtree, r, **kwargs)
def query_ball_point(self, data, r, **kwargs):
""" Query for all points within within ``r`` radians of ``data``.
Parameters
----------
data : tuple or array_like, shape (N, 2)
(lon, lat) pair(s) measured in radians
r : float
Search radius (radians)
**kwargs:
Additional arguments passed to ``scipy.spatial.cKDTree.query_ball_point``
Returns
-------
matches: list or list of lists
If ``data`` is a single (lat, long) pair, ``matches`` is a list
of indices to neighbors within ``r`` radians. If ``data`` is a list
of (lat, long) pairs, ``matches`` is a list of lists, and
``matches[i]`` is a list of the indices of neighbors within ``r``
radians from ``data[i]``.
"""
data_3d = spherical_to_cartesian(np.atleast_2d(data),
return_radius=False)
results = self.kdtree_.query_ball_point(data_3d, r, **kwargs)
if np.atleast_2d(data).shape[0] == 1:
return results[0]
return results
```
#### File: kdsphere/tests/test_kdsphere.py
```python
import numpy as np
from numpy.testing import assert_allclose
from nose import SkipTest
from kdsphere import KDSphere
def generate_lon_lat(N, rseed=42):
rand = np.random.RandomState(rseed)
lon = 2 * np.pi * rand.rand(N)
lat = np.pi * (0.5 - rand.rand(N))
return lon, lat
def test_kdsphere_vs_astropy():
try:
from astropy.coordinates import SkyCoord
except ImportError:
raise SkipTest('astropy not available')
lon1, lat1 = generate_lon_lat(100, rseed=1)
lon2, lat2 = generate_lon_lat(100, rseed=2)
coord1 = SkyCoord(lon1, lat1, unit='rad')
coord2 = SkyCoord(lon2, lat2, unit='rad')
i_apy, d2d_apy, d3d_apy = coord2.match_to_catalog_3d(coord1)
data1 = np.array([lon1, lat1]).T
data2 = np.array([lon2, lat2]).T
kd = KDSphere(data1)
dist, ind = kd.query(data2, k=1)
assert_allclose(ind.ravel(), i_apy)
assert_allclose(dist.ravel(), d2d_apy.radian)
``` |
{
"source": "johnh865/election_sim",
"score": 3
} |
#### File: archive/archive_benchmarks/dummytemplate.py
```python
import time
from votesim.benchmarks.runtools import benchrun
from votesim.models import spatial
BENCHMARK_NAME = 'dummy'
OUTPUT_FILE = BENCHMARK_NAME + '-%s.pkl.gz'
def model(x, methods):
"""Define election model here
Parameters
----------
x : tuple
Input arguments created from generator `case_args`
Returns
--------
out : Election
Election object.
"""
#time.sleep(1)
seed = x
cnum = 2
vnum = 10
ndim = 1
strategy = 'candidate'
trialnum = 2
e = spatial.Election(None, None, seed=seed, name=BENCHMARK_NAME)
v = spatial.Voters(seed=seed, strategy=strategy)
v.add_random(vnum, ndim=ndim)
for trial in range(trialnum):
c = spatial.Candidates(v, seed=trial)
c.add_random(cnum, sdev=1.5)
e.set_models(voters=v, candidates=c)
e.user_data(seed=seed)
for method in methods:
e.run(etype=method)
return e
def case_args(methods):
"""Define benchmark parameters in this generator
Parameters
----------
methods : list of str
Voting methods to evaluate.
Yields
---------
args : tuple
Arguments passed onto benchmark `model` function.
"""
for i in range(15):
yield (i, methods)
def run(methods, filename=OUTPUT_FILE, cpus=1):
"""Define function to run benchmark"""
return benchrun(methods,
model=model,
case_args=case_args,
filename=filename,
cpus=cpus,
)
```
#### File: election_sim/globalcache/globalcache.py
```python
from collections import OrderedDict
from functools import wraps
import inspect
CACHE_INFO = {}
CACHE_INFO['GLOBAL_CACHE'] = {}
CACHE_INFO['SIZE'] = {}
CACHE_INFO['ENABLE'] = False
def create(g, name='GLOBAL_CACHE', maxsize=1, reset=False, enable=True):
"""
Create a global dictionary cache.
Parameters
-----------
g : dict from globals()
Input globals() from main script which you want to control cache from.
name : str
Name of the global cache you are creating
maxsize : int
Max number of previous output to keep for each cached function.
Defaults to 1.
reset : bool (default False)
If True, resets the cache
Returns
------
d : dict
global cache created inside globals()
"""
sizename = name + "_SIZE"
name = "__" + name + "__"
if (name not in g) or reset:
g[name] = {}
g[sizename] = {}
CACHE_INFO['GLOBAL_CACHE'] = g[name]
CACHE_INFO['SIZE'] = g[sizename]
CACHE_INFO['ENABLE'] = enable
return g[name]
def reset():
"""Reset the global cache"""
del CACHE_INFO['GLOBAL_CACHE']
del CACHE_INFO['SIZE']
return
def disable():
CACHE_INFO['ENABLE'] = False
def enable():
CACHE_INFO['ENABLE'] = True
def cache_decorate(name, alias_key='cache_alias', size=1):
"""Decorator used to cache/memoize a function. You must assign
a unique name to the function to store into the cache.
This decorator also adds a keyword argument to the target function that can be
used to input hashable arguments that can be used for lookup.
Parameters
-----------
name : str
Unique name used to store function output.
alias_key : str, default "cache_alias"
New keyword name. cache_decorate gives the function it decorates a
new keyword argument. alias_key specifies the name of the new keyword.
The new keyword is used to input in an alternative argument
alias.
Returns
--------
wrapper :
A new function decorator that supports caching.
"""
def wrapper(fn):
module = inspect.getsourcefile(fn)
@wraps(fn)
def newfunc(*args, **kwargs):
########################################
### Run the function normally if cache not enabled. Get rid of the alias_key.
if not CACHE_INFO['ENABLE']:
try:
kwargs.pop(alias_key)
except KeyError:
pass
return fn(*args, **kwargs)
### Construct keys for dictionary read/access
try:
key = kwargs.pop(alias_key)
except KeyError:
key = (args, frozenset(kwargs.items()))
########################################
### Retrieve global cache.
g = CACHE_INFO['GLOBAL_CACHE']
gsize = CACHE_INFO['SIZE']
if module not in g:
g[module] = {}
gsize[module] = {}
module_dict = g[module]
maxsize_dict = gsize[module]
maxsize_dict[name] = size
########################################
### Retrieve function cache
try:
func_dict = module_dict[name]
except KeyError:
func_dict = OrderedDict()
module_dict[name] = func_dict
### Get cache size limit
maxsize = maxsize_dict[name]
### Get value of function dictionary
try:
return func_dict[key]
except KeyError:
value = fn(*args, **kwargs)
func_dict[key] = value
if len(func_dict) > maxsize:
func_dict.popitem(False)
return value
return newfunc
return wrapper
#def _run_func(name, func, key, args, kwargs):
# """
# Run function with cache on.
#
# Parameters
# ----------
# name : str
# Name of function
# func : function
# Function to decorate
# key : Hashable
# Key signature of arguments
# args : tuple
# Positional arguments for function
# kwargs : dict
# Keyword arguments for function
#
# """
#
# # Get name of function as source file path + name
#
# if not CACHE_INFO['ENABLE']:
# return func(*args, **kwargs)
#
# module = inspect.getsourcefile(func)
#
# gdict = CACHE_INFO['GLOBAL_CACHE'][module]
# maxsize = CACHE_INFO['SIZE'][module]
#
## self.lastargs[name] = key
#
# # Get dictioary where function is stored
# try:
# func_dict = gdict[name]
#
# except KeyError:
# func_dict = OrderedDict()
# gdict[name] = func_dict
#
# # Get value of function dictionary
# try:
# return func_dict[key]
# except KeyError:
# value = func(*args, **kwargs)
# func_dict[key] = value
#
# if len(func_dict) > maxsize:
# func_dict.popitem(False)
# return value
#
#
#
```
#### File: sims/basecompare/post.py
```python
import os
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import definitions
import globalcache
# sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
sns.set()
pd.options.mode.chained_assignment = 'raise'
import votesim
from votesim.benchmarks import simple
from votesim import plots, post
# %% Read
benchmark = simple.simple_base_compare_test()
dirname = definitions.DIR_DATA_BENCHMARKS
dirname = os.path.join(dirname, benchmark.name)
@globalcache.cache_decorate('read')
def read():
return benchmark.read(dirname=dirname)
g = globalcache.create(globals())
p = read()
df = p.dataframe
# %%
tolname = 'args.voter-0.1.set_behavior.tol'
basename = 'args.voter-0.1.set_behavior.base'
yname = 'args.etype'
zname = 'output.winner.regret_efficiency_candidate'
df1 = df[[
tolname, basename, yname, zname]].copy()
df1[zname] = df1[zname] * 100
groupby = df1.groupby(by=basename)
for basename1 in groupby.groups:
dfb = groupby.get_group(basename1)
plt.figure()
plt.title(basename1)
plots.heatmap(x=tolname, y=yname, hue=zname, data=dfb)
```
#### File: simple3way/archive/probe_irv.py
```python
import os
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import globalcache
import definitions
sns.set()
import votesim
from votesim.benchmarks import simple
from votesim import plots
benchmark = simple.simple3way()
dirname = definitions.DIR_DATA_BENCHMARKS
dirname = os.path.join(dirname, benchmark.name)
@globalcache.cache_decorate('read')
def read():
return benchmark.read(dirname=dirname)
# %% Read
g = globalcache.create(globals())
p = read()
df = p.dataframe
# %% Post
## Retrieve elections where IRV and top_two results disagree
yname = 'args.etype'
xname = 'output.winner.regret_efficiency_voter'
vseed = 'args.voter.0.set_seed.seed'
cseed = 'args.candidate.0.set_seed.seed'
parameters = list(p.parameters)
parameters.remove(yname)
df1 = df.copy()
# Filter by voting methods and tolerance
method1 = 'irv'
method2 = 'top_two'
i1 = df1[yname] == method1
i2 = df1[yname] == method2
i3 = df1['args.user.voter_tolerance'] == 3.0
inew = (i1 | i2) & i3
df1 = df1.loc[inew].reset_index(drop=False)
# df1 = df1.set_index(p.parameters)
df2 = df1[[
'index',
vseed,
cseed,
yname,
xname,
]]
df2 = df2.set_index([vseed, cseed, yname])
df2 = df2.unstack(yname)
regrets = 1 - df2[xname]
disagree = regrets.loc[:, 'irv'] != regrets.loc[:, 'top_two']
df2 = df2.loc[disagree]
# %% Re-run
ii = 0
i_irv = df2.values[ii, 0]
i_tt = df2.values[ii, 1]
e1 = benchmark.rerun(index=i_irv, df=df)
e2 = benchmark.rerun(index=i_tt, df=df)
```
#### File: sims/simple3way/plot_vse.py
```python
import os
import itertools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import globalcache
import definitions
# sns.set(style="white", rc={"axes.facecolor": (0, 0, 0, 0)})
sns.set()
pd.options.mode.chained_assignment = 'raise'
import votesim
from votesim.benchmarks import simple
from votesim import plots
benchmark = simple.simple3way()
dirname = definitions.DIR_DATA_BENCHMARKS
dirname = os.path.join(dirname, benchmark.name)
@globalcache.cache_decorate('read')
def read():
return benchmark.read(dirname=dirname)
def categorize(df):
"""
Category Combinations
Labels
-------
- M = majority winner
- P = plurality winner
- C = condorcet winner
- U = utility winner
Categories
----------
- MU = Has majority utility winner
- M = Has majority winner that is not utility winner.
-
- CPU = Has condorcet, utility, plurality winner
- CU = Has condorcet, utility winner that is not plurality winner
- CP = Has condorcet, plurality winner that is not utility winner
- C = Has condorcet winner who is not plurality and utility winner
-
- NC = Has no Condorcet winner
-
"""
iM = df['output.candidate.winner_majority']
iP = df['output.candidate.winner_plurality']
iC = df['output.candidate.winner_condorcet']
iU = df['output.candidate.winner_utility']
df = df.copy()
df.loc[:, 'categories'] = 'No category'
maj = iM > -1
no_maj = ~maj
MU = (iM == iU)
M = maj & (iM != iU)
CPU = no_maj & (iC == iP) & (iC == iU)
CP = no_maj & (iC == iP) & (iC != iU)
CU = no_maj & (iC == iU) & (iC != iP)
C = (iC > -1) & (iC != iP) & (iC != iU)
PU = no_maj & (iP == iU) & (iP != iC)
NC = (iC == -1)
df.loc[MU, 'categories'] = 'MU'
df.loc[M, 'categories'] = 'M'
df.loc[CPU, 'categories'] = 'CPU'
df.loc[CP, 'categories'] = 'CP'
df.loc[CU, 'categories'] = 'CU'
df.loc[C, 'categories'] = 'C'
df.loc[PU, 'categories'] = 'PU'
df.loc[NC, 'categories'] = 'nc'
return df
# %% Read
g = globalcache.create(globals())
p = read()
df = p.post_data
###########################################
# %% Post
os.makedirs('images', exist_ok=True)
os.chdir('images')
yname = 'args.etype'
# otype = 'regret-efficiency'
# xname = 'output.winner.regret_efficiency_candidate'
otype = 'regret-voter'
xname = 'output.winner.regret_efficiency_candidate'
no_majority = df['output.candidate.winner_majority'] == -1
no_condorcet = df['output.candidate.winner_condorcet'] == -1
regret = 100* (1 - df[xname])
pratio = df['output.candidate.plurality_ratio'] * 100
df = df.reset_index()
df.loc[:, 'plurality_ratio'] = pratio
df.loc[:, 'no_majority'] = no_majority
df.loc[:, 'no_condorcet'] = no_condorcet
df.loc[:, 'regret'] = regret
### Categorize scenario parameters
arg_tol = df['args.user.voter_tolerance']
groupby = df.groupby(by='args.user.voter_tolerance')
keys = groupby.groups.keys()
dframes = (groupby.get_group(k) for k in keys)
# # %% Plot categories
# ### Plot election categories
df = groupby.get_group(list(keys)[0])
etype_num = len(df['args.etype'].unique())
sim_num = len(df) / etype_num
# plots.vset()
# plots.subplot_2row()
# plt.subplot(2, 1, 1)
# sns.distplot(pratio, bins=10, norm_hist=True, kde=False)
# plt.xlabel('% plurality winner ratio')
# plt.ylabel('Scenario probability density')
# plt.title('Probability of Plurality Ratio in Benchmark')
# plt.subplot(2, 1, 2)
df = categorize(df)
c = df['categories']
counts = c.value_counts() / len(c)*100
# # sns.barplot(x=counts.keys(), y=counts.values)
# ax = plots.bar(x=counts.keys(), y=counts.values, fmt='g')
# plt.ylabel('% Occurrence')
# # sns.countplot(x='categories', data=df,)
# plt.xlabel('Scenario Categories')
# plt.title('Majority/Condorcet/Utility/Plurality Occurrences')
# string = '''MU = majority-utility winner
# CU = condorcet-utility winner
# CPU = condorcet-plurality-utility winner
# M = majority winner is not utility winner
# PU = plurality-utility winner
# nc = No condorcet winner.
# CP = condorcet-plurality winner is not utility winner'''
# # place a text box in upper left in axes coords
# props = dict(facecolor='white', alpha=0.5)
# ax.text(0.4, 0.9, string, transform=ax.transAxes, fontsize=10,
# verticalalignment='top',
# horizontalalignment='left',
# bbox=props)
# plt.suptitle('3-Way Election, 1-Dimensional, %d simulations' % sim_num)
# plt.savefig('scenario-categories.png')
# %% Plot heatmaps
i = 0
for key, df in zip(keys, dframes):
# plt.figure(figsize=(12,8))
plots.subplot_2row()
plt.subplot(2, 1, 1)
bins = [0, 30, 40, 50, 60, 70, 80, 90, 100]
ax, dfp = plots.heatmap(x='plurality_ratio', y='args.etype', hue='regret',
data=df, xbin=bins, vmax=25)
plt.xlabel('% plurality winner ratio')
plt.ylabel('')
plt.title('% VSE vs Plurality Ratio')
# plt.hist(pratio, density=True, )
# hist, _ = np.histogram(pratio, bins=bins,) / len(pratio)
# plots.bar(x=bins, )
###############################################################################
df = categorize(df)
ysortkey = dfp.index.values
xsortkey = counts.index.values
plt.subplot(2, 1, 2)
ax, dfp = plots.heatmap(x='categories', y='args.etype', hue='regret',
data=df,
xsortkey=xsortkey,
ysortkey=ysortkey,
vmax=50)
plt.ylabel('')
#ax.set_yticklabels('')
plt.title('% VSE vs Category')
plt.xlabel('Scenario Categories')
plt.subplots_adjust(left=.185, wspace=.025)
plt.suptitle('3-Way Election, 1-Dimensional, voter tolerance=%s, '
'%d simulations' % (key, sim_num))
plt.savefig('vse-%d.png' % i)
i += 1
###############################################################################
```
#### File: election_sim/votesim/ballot.py
```python
import numpy as np
import copy
import votesim
from votesim import votemethods
from votesim import utilities
from votesim.models.vcalcs import distance2rank
from votesim.metrics.metrics import regret_tally
__all__ = ['gen_honest_ballots',
'CombineBallots',
'BaseBallots',
'BallotClass',
]
class BallotClass(object):
"""
Base BallotClass class used to create Ballot sub-classes.
Parameters
----------
ranks : array shape (a, b) or None (default)
Input rank data for `a` voters and `b` candidates, where
1 is the most preferred rank and 0 is unranked.
ratings : array shape (a, b) or None (default)
Input ratings data for `a` voters and `b` candidates from 0 to 1.
distances : array shape (a, b) or None (default)
Voter regret, or preference distance away from each candidate
tol : (a,) array, float, or None
Voter preference tolerance at which candidate ratings are less than zero.
The default is None.
rtol : (a,) array, float, or None
Relative voter preference tolerance based on worst candidate.
Either tol or rtol can be specified, but not both.
The default is None.
udata : array shape (a, c)
User input data that may be used in Ballot subclasses
maxscore : int (default 5)
Maximum integer score for score ballot generation
ballots : :class:`~votesim.ballots.BallotClass` subclass
Ballot to read information from.
"""
def __init__(self, ranks=None, ratings=None, distances=None, tol=None,
rtol=None,
udata=None,
maxscore=5, ballots=None,):
if ballots is not None:
self.from_ballots(ballots)
else:
s = 'if ballots is None, distances & tol must be defined'
assert distances is not None, s
#assert tol is not None, s
self.ranks = ranks
self.ratings = ratings
self.distances = distances
tol = self._init_tol(tol, rtol)
self.tol = tol
self.maxscore=maxscore
self.udata = udata
self._init_subclass()
@utilities.lazy_property
def scores(self):
"""Generate integer scores from ratings."""
if self.ratings is None:
raise ValueError('Ratings must be generated to retrieve scores.')
return np.round(self.maxscore * self.ratings)
@utilities.lazy_property
def votes(self):
"""Plurality votes constructed from ranks."""
if self.ranks is None:
raise ValueError('Ranks must be generated to retrieve votes.')
return votemethods.tools.getplurality(ranks=self.ranks)
def _init_subclass(self):
"""You can stick in custom initialization routines for subclasses here."""
return
def _init_tol(self, tol, rtol):
"""Manipulate tol so that it is an acceptable parameter.
Handle default values; rtol is for relative tolerance."""
if tol is None and rtol is None:
tol = self.relative_tol
elif rtol is not None:
tol = self.relative_tol * rtol
else:
tol = np.array(tol)
if rtol is not None and tol is not None:
raise ValueError('Only either rtol OR tol can be specified.')
if tol.ndim == 1:
tol = tol[:, None]
return tol
def from_ballots(self, ballots):
"""Set data of an Ballot object to arguments.
Parameters
----------
ballots : :class:`~votesim.ballots.BallotClass` subclass
Ballots object
"""
ballots = ballots.copy()
self.ranks = ballots.ranks
self.ratings = ballots.ratings
self.distances = ballots.distances
self.tol = ballots.tol
self.maxscore = ballots.maxscore
self.udata = ballots.udata
return
# @property
# def ranks(self):
# if self._ranks is None:
# raise AttributeError('ballot ranks not yet defined.')
# return self._ranks
# @property
# def ratings(self):
# if self._ratings is None:
# raise AttributeError('ballot ratings not yet defined.')
# return self._ratings
# @property
# def distances(self):
# if self._distances is None:
# raise AttributeError('ballot distances not yet defined.')
# return self._distances
# @property
# def tol(self):
# if self._tol is None:
# raise AttributeError('ballot tol not yet defined.')
# return self._tol
def copy(self):
"""Return copy of Ballots."""
return copy.deepcopy(self)
def run(self, etype, rstate=None, numwinners=1) -> votemethods.eRunner:
"""Run election method on ballots.
Parameters
----------
etype : str
Name of election method
rstate : numpy.random.RandomState
Random number generator
numwinners : int
Number of winners desired.
Returns
-------
:class:`~votesim.votemethods.eRunner`
eRunner election output object
"""
assert etype is not None
ballots = self.get_ballots(etype)
er = votemethods.eRunner(etype=etype,
ballots=ballots,
rstate=rstate,
numwinners=numwinners,)
self._erunner = er
return er
def get_ballots(self, etype: str):
"""Retrieve the ballots needed for an election method."""
if etype in votemethods.ranked_methods:
ballots = self.ranks
elif etype in votemethods.vote_methods:
ballots = self.votes
elif etype in votemethods.scored_methods:
ballots = self.scores
elif etype in votemethods.rated_methods:
ballots = self.ratings
return ballots
def set_erunner(self, erunner: votemethods.eRunner):
if erunner is not None:
self._erunner = erunner
return
@property
def erunner(self) -> votemethods.eRunner:
"""eRunner object from last run election."""
try:
return getattr(self, '_erunner')
except AttributeError:
raise AttributeError('self.run(...) must first be executed to access erunner.')
def chain(self, s):
"""Chain together multiple ballot manipulation functions, call
by the method name
Examples
------------
>>> s = 'rank_honest.rate_linear.compromise.bury'
>>> out = self.chain(s)
"""
cmds = s.split('.')
obj = self
for cmd in cmds:
obj = getattr(obj, cmd)()
return obj
def set_maxscore(self, maxscore):
"""Set the maximum score for scored ballots."""
b = self.copy()
b.maxscore = maxscore
return b
@utilities.lazy_property
def relative_tol(self):
"""Set tolerance relative to the worst candidate."""
dmax = np.max(self.distances, axis=1)
return dmax
class CombineBallots(BallotClass):
"""Combine multiple ballot objects.
Not everything is combined, only ranks, ratings, and distances.
Parameters
----------
children : list of type :class:`~votesim.ballots.BallotClass`
Ballots to combine.
"""
def __init__(self, children):
list_ranks = [b.ranks for b in children]
list_ratings = [b.ratings for b in children]
list_dist = [b.distances for b in children]
maxscore = children[0].maxscore
ranks = np.vstack(list_ranks)
ratings = np.vstack(list_ratings)
distances = np.vstack(list_dist)
self.ranks = ranks
self.ratings = ratings
self.distances = distances
self.tol = None
self.maxscore = maxscore
self.udata = None
self.children = children
self._init_subclass()
@utilities.lazy_property
def children_indices(self):
"""Row indices to obtain child's voters for all children in the voter
preference and ballot arrays.
Returns
-------
slices : list of slice
Slice which returns the child
"""
lengths = [len(child.ranks) for child in self.children]
iarr = np.cumsum(lengths)
iarr = np.append(0, iarr)
slices = [slice(iarr[i], iarr[i+1]) for i in iarr[:-1]]
return slices
class BaseBallots(BallotClass):
"""Base ballot construction class.
Create variants of honest ballots -- such as setting cutoffs for ranking,
scaling and setting rating tolerance, etc.
See :class:`~votesim.ballots.BallotClass`.
"""
def rank_honest(self):
"""Construct honest ballots."""
self.ranks = distance2rank(self.distances)
return self
def rate_linear(self):
"""Construct ratings as r = (1 - d/tol)."""
r = (1.0 - self.distances / self.tol)
r = np.maximum(r, 0)
self.ratings = r
return self
def rate_quadratic(self):
"""Construct ratings as r = (1 - d/tol)^2."""
utility = 1.0 - self.distances / self.tol
utility = np.maximum(0, utility)
r = utility ** 2
self.ratings = r
return self
def rate_sqrt(self):
"""Construct ratings as r = sqrt(1 - d/tol)."""
utility = 1.0 - self.distances / self.tol
utility = np.maximum(0, utility)
r = np.sqrt(utility)
self.ratings = r
return self
def rate_norm(self):
"""Construct normalized ballots; rating of best candidate set to maximum rating."""
ratings = self.ratings
max_ratings = np.max(ratings, axis=1)[:, None]
i2 = np.where(max_ratings == 0)
max_ratings[i2] = 1. # avoid divide by zero error for unfilled ballots
factor = 1.0 / max_ratings
self.ratings = ratings * factor
return self
def rank_cut(self):
"""Cut off rankings of candidates.
Cut rankings where ratings are less than zero
(ie, candidates outside tolerance).
"""
err_tol = 1e-5
ii = self.distances > self.tol + err_tol
self.ranks[ii] = 0
return self
def gen_honest_ballots(distances, tol=None, rtol=None, maxscore=5,
base='linear',):
"""
Create voter ballots.
Parameters
----------
distances : (a, b) array
`a` Voter distances from `b` candidates
tol : (a,) array, float, or None
Voter preference tolerance at which candidate ratings are less than zero.
The default is None.
rtol : (a,) array, float, or None
Relative voter preference tolerance based on worst candidate.
Either tol or rtol can be specified, but not both.
The default is None.
maxscore : int, optional
Max ballot integer score. The default is 5.
base : str, optional
Base ballot type. The default is 'linear'.
- 'linear' - Linear mapping of distance to rating
- 'quadratic' - Quadratic mapping of distance to rating
- 'sqrt' - Square root mappiong of distance to rating
Returns
-------
ballots : subclass of :class:`~votesim.ballots.BallotClass`
Constructed voter ballots.
"""
ballots = BaseBallots(distances=distances,
tol=tol,
rtol=rtol,
maxscore=maxscore)
# names = tactics.split(',')
# names = [names.strip() for n in names]
if base == 'linear':
ballots = (ballots.rank_honest()
.rate_linear()
.rate_norm()
.rank_cut()
)
elif base == 'quadratic':
ballots = (ballots.rank_honest()
.rate_quadratic()
.rate_norm()
.rank_cut()
)
elif base == 'sqrt':
ballots = (ballots.rank_honest()
.rate_sqrt()
.rate_norm()
.rank_cut()
)
return ballots
```
#### File: votesim/benchmarks/tactical_v2.py
```python
import numpy as np
import votesim
import votesim.benchmarks.runtools as runtools
from votesim.models import spatial
from votesim import votemethods
from votesim.metrics import TacticCompare
tactics_ranked = {}
tactics_ranked['r0'] = ['bury']
tactics_ranked['r1'] = ['deep_bury']
tactics_ranked['r2'] = ['compromise', 'bury']
tactics_ranked['r3'] = ['compromise', 'deep_bury']
tactics_ranked['r4'] = ['compromise']
tactics_ranked['r5'] = ['truncate_hated']
tactics_ranked['r6'] = ['truncate_preferred']
tactics_ranked['r7'] = ['bullet_preferred']
tactics_scored = {}
tactics_scored['s0'] = ['bury']
tactics_scored['s1'] = ['compromise', 'bury']
tactics_scored['s2'] = ['compromise']
tactics_scored['s3'] = ['truncate_hated']
tactics_scored['s4'] = ['truncate_preferred']
tactics_scored['s5'] = ['bullet_preferred']
tactics_scored['s6'] = ['minmax_hated']
tactics_scored['s7'] = ['minmax_preferred']
tactics_plurality = {}
tactics_plurality['p1'] = ['bullet_preferred']
statnames = [
'output.tactic_compare.regret_efficiency_candidate.topdog-0',
'output.tactic_compare.regret_efficiency_candidate.underdog-0',
]
def get_tactics(etype: str) -> list:
ballot_type = votemethods.get_ballot_type(etype)
if ballot_type == 'rank':
return list(tactics_ranked.values())
elif ballot_type == 'score' or ballot_type == 'rate':
return list(tactics_scored.values())
elif ballot_type == 'vote':
return list(tactics_plurality.values())
def get_topdog_tactic(etype: str) -> str:
"""Return topdog defensive strategy"""
return 'bullet_preferred'
def tactical_model_v2(
name: str,
methods : list,
seed=0,
numvoters=51,
cnum=5,
cstd=1.5,
ndim=1,
tol=None,
ratio=1.0,
) -> spatial.Election:
"""Tactical Election model that test every single candidate as an underdog,
and tests topdog resistance using bullet voting.
"""
e = spatial.Election(None, None, seed=seed, name=name)
# Construct base strategy
strategy_base = {}
strategy_base['ratio'] = ratio
strategy_base['subset'] = 'underdog'
# Create underdog strategy
strategy2 = strategy_base.copy()
# Create topdog strategy
strategy3 = strategy_base.copy()
strategy3['tactics'] = ['bullet_preferred']
strategy3['subset'] = 'topdog'
# Generate voters
v = spatial.Voters(seed=seed, tol=tol, base='linear')
v.add_random(numvoters, ndim=ndim)
# Generate candidates
c = spatial.Candidates(v, seed=seed)
c.add_random(cnum, sdev=cstd)
e.set_models(voters=v, candidates=c)
# Construct election identification
eid = (seed, numvoters, cnum, ndim,)
for method in methods:
# Set empty (honest) strategy
e.set_models(strategies=spatial.StrategiesEmpty())
e.user_data(eid=eid, strategy='honest')
result1 = e.run(etype=method)
winner = result1.winners[0]
stats_honest = result1.stats
underdog_list = list(range(cnum))
underdog_list.remove(winner)
# test each underdog
for underdog in underdog_list:
strategy2['underdog'] = underdog
strategy3['underdog'] = underdog
# test each tactic
tactics = get_tactics(method)
for tactic in tactics:
strategy2['tactics'] = tactic
# Run one-sided strategy
s = spatial.Strategies(v).add(**strategy2)
e.set_models(strategies=s)
e.user_data(eid=eid, strategy='one-sided')
result2 = e.run(etype=method, result=result1)
# Create tactical comparison output, add to output
tactic_compare = TacticCompare(
e_strat=result2.stats,
e_honest=stats_honest)
e.append_stat(tactic_compare)
# Run two-sided strategy with top-dog bullet vote.
s.add(**strategy3)
e.set_models(strategies=s)
e.user_data(eid=eid, strategy='two-sided')
result3 = e.run(etype=method, result=result1)
# Create tactical comparison output, add to output
tactic_compare = TacticCompare(
e_strat=result3.stats,
e_honest=stats_honest)
e.append_stat(tactic_compare)
return e
def tactical_v2_0():
name = 'tactical_v2_0'
model = tactical_model_v2
kwargs = {}
kwargs['name'] = name
kwargs['seed'] = np.arange(100)
kwargs['numvoters'] = 51
kwargs['ndim'] = [1, 2,]
kwargs['cnum'] = [5]
case_args = runtools.CaseGenerator(**kwargs)
benchmark = runtools.CreateBenchmark(name, model, case_args)
return benchmark
def tactical_v2_1():
name = 'tactical_v2_1'
model = tactical_model_v2
kwargs = {}
kwargs['name'] = name
kwargs['seed'] = np.arange(1000)
kwargs['numvoters'] = 51
kwargs['ndim'] = [1, 2, 3]
kwargs['cnum'] = [3, 5]
case_args = runtools.CaseGenerator(**kwargs)
benchmark = runtools.CreateBenchmark(name, model, case_args)
return benchmark
```
#### File: votesim/metrics/metrics.py
```python
import copy
import logging
import pdb
import numpy as np
from votesim import utilities
from votesim.utilities import modify_lazy_property
from votesim.models import vcalcs
from votesim.models.dataclasses import (ElectionData,
VoterData,
CandidateData)
from votesim.votemethods.condcalcs import condorcet_check_one
from votesim.votemethods.tools import winner_check
# from votesim.models.spatial import Voters, Candidates, Election
# from votesim.models import spatial
logger = logging.getLogger(__name__)
#def interp_nearest(x, y):
# x = np.array(x)
# if x.shape[1] == 1:
# class ___ElectionData(object):
# """Election data storage for arrays to be passed on to metrics.
# Store to make output calculations.
# Not meant to be used directly by user, created by ElectionStats.
# """
# def __init__(self,
# voters: VoterData=None,
# candidates: CandidateData=None,
# election: ElectionData=None):
# self.weights = None
# self.order = 1
# self.set(voters, candidates, election)
# return
# def set(self, voters=None, candidates=None, election=None):
# if voters is not None:
# self.set_voters(voters)
# if candidates is not None:
# self.set_candidates(candidates)
# if election is not None:
# self.set_election(election)
# def set_voters(self, voters):
# self.voters = voters.pref
# try:
# self.weights = voters.weights
# except AttributeError:
# self.weights = None
# self.order = voters.order
# def set_candidates(self, candidates):
# self.candidates = candidates.pref
# def set_election(self, election):
# self.group_indices = election.ballotgen.index_dict
# self.winners = election._result_calc.winners
# self.ballots = election._result_calc.ballots
# self.ties = election._result_calc.ties
# class _ElectionStatData(object):
# """Store electionStat temporary data that must be used to generate stats."""
# def __init__(self):
# return
# def set(self,
# voters: VoterData=None,
# candidates: CandidateData=None,
# election: ElectionData=None):
# pass
class BaseStats(object):
"""Base inheritance class for Stats objects.
Use this to create new Statistic Output objects.
All attributse that do not start with underscore '_' will be used as
output variables to be stored.
Parameters
----------
electionStats : `ElectionStats`
ElectionStats parent object
Attributes
----------
_electionStats : ElectionStats
Top-level output object
_electionData : ElectionData
Temporary election data used for making calculations
name : str
Name of statistic for output dict
Example
-------
Create your new output object
>>> import numpy as np
>>>
>>> class MyStats(BaseStats):
>>> @votesim.utilities.lazy_property
>>> def stat1(self):
>>> v = self._electionData.voters
>>> return np.mean(v, axis=0)
"""
def __init__(self, electionStats: "ElectionStats"):
self._electionStats = electionStats
self._reinit()
return
def _reinit(self):
"""Define custom initialization routines here."""
self._name = 'base'
return
@utilities.lazy_property
def _keys(self):
"""Retrieve output keys as list."""
a = dir(self)
new = []
for name in a:
if name.startswith('_'):
pass
else:
new.append(name)
return new
@property
def _dict(self):
"""Retrieve all statistics output and return as `dict`."""
keys = self._keys
return {k: getattr(self, k) for k in keys}
@utilities.lazy_property
def _docs(self):
"""Retrieve all descriptions of outputs and return as dict."""
clss = type(self)
new = {}
for attrname in self._dict.keys():
doc = getattr(clss, attrname).__doc__
# Get rid of newlines in docstring
doc = doc.replace('\n', ' ')
# Get rid of too much whitespace
doc = ' '.join(doc.split())
new[attrname] = doc
return new
class VoterStats(BaseStats):
"""Voter population statistics."""
def __init__(self,
data: VoterData=None,
pref: np.ndarray=None,
weights=None,
order: int=None):
if data is not None:
pref = data.pref
weights = data.weights
order = data.order
self._voters = pref
self._weights = weights
self._order = order
return
def _reinit(self):
self._name = 'voter'
return
@utilities.lazy_property
def regret_mean(self):
"""Regret of voters if winner is located at preference mean."""
return mean_regret(self._voters, self._weights, order=self._order)
@utilities.lazy_property
def regret_median(self):
"""Regret of voters if winner is located at preference median."""
return median_regret(self._voters, self._weights)
@utilities.lazy_property
def regret_random_avg(self):
"""Average regret of voters if winner is randomly selected from voter population."""
r = voter_regrets(self._voters,
self._weights,
maxsize=5000,
order=self._order,
seed=0)
return np.mean(r)
@utilities.lazy_property
def pref_mean(self):
"""(n,) array: Preference mean of voters for n preference dimensions."""
return np.mean(self._voters, axis=0)
@utilities.lazy_property
def pref_median(self):
"""(n,) array: Preference median of voters for n preference dimensions."""
return np.median(self._voters, axis=0)
@utilities.lazy_property
def pref_std(self):
"""(n,) array: Preference standard deviation of voters.
For n preference dimensions.
"""
return np.std(self._voters, axis=0)
class CandidateStats(BaseStats):
"""Candidate statistics.
See base class :class:`~votesim.metrics.BaseStats`.
Dependent on :class:`~votesim.metrics.ElectionStats.voters`.
"""
def __init__(self,
pref: np.ndarray,
distances: np.ndarray,
):
self._distances = distances
self._pref = pref
self._reinit()
return
def _reinit(self):
self._name = 'candidate'
return
@utilities.lazy_property
def pref(self):
"""(a, b) array: Candidate preference coordinates."""
return self._pref
@utilities.lazy_property
def regrets(self):
"""(c,) array: voter regret for each candidate."""
distances = self._distances
return np.mean(distances, axis=0)
@utilities.lazy_property
def _regret_best(self):
"""Retrieve best regrests and corresponding winner indices."""
regrets = self.regrets
ii = np.argsort(regrets)
ii_ideal = ii[0]
ri = np.mean(regrets[ii_ideal])
return ri, ii_ideal
@property
def regret_best(self):
"""Best possible regret for the best candidate in election."""
return self._regret_best[0]
@utilities.lazy_property
def regret_avg(self):
"""Average regret if a random candidate became winner."""
return np.mean(self.regrets)
@property
def winner_utility(self):
"""Best utility candidate in election."""
return self._regret_best[1]
@utilities.lazy_property
def winner_condorcet(self):
"""Condorcet winner of election, return -1 if no condorcet winner found."""
distances = self._distances
return condorcet_check_one(scores=-distances)
@utilities.lazy_property
def _winner_plurality_calcs(self):
"""Plurality winner of election; return -1 if tie found.
Returns
-------
winner : int
Candidate index of plurality winner
votes : int
Number of votes cast for plurality winner
counts : array shape (a,)
Vote counts for all candidates
"""
distances = self._distances
ii = np.argmin(distances, axis=1)
ulocs, ucounts = np.unique(ii, return_counts=True)
counts = np.zeros(distances.shape[1], dtype=int)
counts[ulocs] = ucounts
votes = np.max(counts)
winner, ties = winner_check(counts, numwin=1)
if len(ties) > 1:
winner = -1
else:
winner = winner[0]
return winner, votes, counts
@property
def winner_plurality(self):
"""Plurality winning candidate of election."""
return self._winner_plurality_calcs[0]
@utilities.lazy_property
def winner_majority(self):
"""Majority winner of election; return -1 if no majority found."""
winner, votes, counts = self._winner_plurality_calcs
vnum = len(self._distances)
if votes > vnum/2.:
return winner
else:
return -1
@utilities.lazy_property
def plurality_ratio(self):
"""float: Ratio of plurality winning votes to total votes.
This metric attempts to measure to competitiveness of an election.
"""
votes = self._winner_plurality_calcs[1]
vnum = len(self._distances)
return float(votes) / vnum
# @utilities.lazy_property
# def utility_ratio(self):
# """Utility ratio of the best candidate compared to average candidate.
# Normalized by the utility range from random to ideal candidate. This
# metric attempts to measure if there's a clear stand-out winner in
# the election.
# """
# v_median = self._electionStats.voter.regret_median
# #v_rand = self._electionStats.voter.regret_random_avg
# v_best = self.regret_best
# v_avg = self.regret_avg
# return (v_avg - v_best) / (v_avg - v_median)
class ElectionStats(object):
"""Collect election output data.
Re-routes that data
towards various calculations and post-process variables.
Parameters
----------
voters : array shape (a, ndim)
Voter preference data for `ndim` preference dimensions.
weights : float or array shape (a, ndim)
Preference dimension weights
order : int or None (default)
Distance calculation norm order.
candidates : array shape (b, ndim)
Candidate preference data for `ndim` preference dimensions.
winners : array shape (c,)
Winners candidate index locations for election.
distances : array shape (a, b)
Preference distances of each voter away from each candidate
ballots : array shape (a, b)
Ballots used for election for each voter for each candidate.
Attributes
----------
_election_data : ElectionData
_voter_data : VoterData
_candidate_data : CandidateData
"""
voters : VoterStats
candidates: CandidateStats
_election_data: ElectionData
_voter_data : VoterData
_candidate_data : CandidateData
def __init__(self,
voters: VoterData=None,
candidates: CandidateData=None,
election: ElectionData=None):
self._output_categories = self._default_categories
self._cache_result = {}
self.set_data(voters=voters, candidates=candidates, election=election)
return
def set_data(self,
voters: VoterData=None,
candidates: CandidateData=None,
election: ElectionData=None,):
"""Set election data, delete cached statistics."""
self._cache_result = {}
if voters is not None:
self.voters = voters.stats
self._voter_data = voters
if candidates is not None:
self.candidates = candidates.stats
self._candidate_data = candidates
if election is not None:
self._election_data = election
return
def set_raw(self,
voters=None,
weights=None,
order=1,
candidates=None,
winners=None,
distances=None,
ballots=None,
ties=None):
vstat = VoterStats(pref=voters, weights=weights, order=order)
vdata = VoterData(pref=voters,
weights=weights,
order=order,
stats=vstat,
tol=None,
base='linear')
distances = vcalcs.voter_distances(voters=voters,
candidates=candidates,
weights=weights,
order=order)
cstats = CandidateStats(pref=candidates, distances=distances)
cdata = CandidateData(pref=candidates,
distances=distances,
stats=cstats)
edata = ElectionData(ballots=ballots,
winners=winners,
ties=ties,
group_index=None)
self.set_data(voters=vdata, candidates=cdata, election=edata)
return
# def set_raw(self, voters=None, weights=-1, order=None, candidates=None,
# winners=None, distances=None, ballots=None, ties=None,
# **kwargs):
# """Set new election raw data, delete cached statistics."""
# self._cache_result = {}
# if voters is not None:
# self.electionData.voters = voters
# self._cache_voter = {}
# self._cache_candidate = {}
# if weights != -1:
# self.electionData.weights = weights
# self._cache_voter = {}
# self._cache_candidate = {}
# if order is not None:
# self.electionData.order = order
# if candidates is not None:
# self.electionData.candidates = candidates
# self._cache_candidate = {}
# if winners is not None:
# self.electionData.winners = winners
# if ballots is not None:
# self.electionData.ballots = ballots
# if ties is not None:
# self.electionData.ties = ties
# ### Calculate voter distances
# calculate = False
# if distances is None:
# if ((self.electionData.candidates is not None) and
# (self.electionData.voters is not None)):
# calculate = True
# else:
# self.electionData.distances = distances
# if calculate:
# self.electionData.distances = vcalcs.voter_distances(
# voters=self.electionData.voters,
# candidates=self.electionData.candidates,
# weights=self.electionData.weights,
# order=self.electionData.order,
# )
# self.electionData.set(**kwargs)
# return
_default_categories = [
'voters',
'candidates',
'winner',
'winner_categories',
'ballot'
]
def set_categories(self, names, fulloutput=False):
"""Set output categories to output.
Parameters
----------
names : list of str
Output category names.
fulloutput : bool, optional
If True output all avaialable outputs. The default is False.
Returns
-------
None.
"""
if fulloutput == True:
names = self.get_categories()
self._output_categories = names
return
def get_categories(self):
"""Retrieve available output categories."""
return self._default_categories
# def add_output(self, output, name='', cache='_cache_result'):
# """Add an output object.
# This output's base class must be :class:`~votesim.metrics.BaseStats`.
# Parameters
# ----------
# name : str
# Name of output
# output : subtype of :class:`~votesim.metrics.BaseStats`
# User defined output. Define this output by creating a class
# inherited from :class:`~votesim.metrics.BaseStats`
# cache : str
# Name of output cache to store results. This determines when
# output is retained and when it is deleted and regenerated
# during election model creation. The options are
# - '_cache_voter' - Clear cache when voter data changes (least aggressive)
# - '_cache_candidate' - Clear cache when candidate data changes
# - '_cache_result' - Clear cache after every election (most aggressive)
# Returns
# -------
# None.
# """
# if name == '':
# try:
# name = getattr(output, 'name')
# except AttributeError:
# name = output.__name__.lower()
# if hasattr(self, name):
# s = 'Name "%s" for output already taken. Use another' % name
# raise ValueError(s)
# if type(output) is type:
# # Set cache decorator. The default clears cache every new election.
# output = utilities.lazy_property2(cache)(output)
# else:
# utilities.modify_lazy_property(instance=self,
# name=name,
# value=output,
# dictname=cache)
# setattr(self, name, output)
# #self._default_categories.append(name)
# self._output_categories.append(name)
# return
def get_dict(self):
"""Retrieve desired category key and values and return dict of dict."""
d = {}
for key in self._output_categories:
stat = getattr(self, key)
di = stat._dict
d[key] = di
return d
def get_docs(self):
"""Retrieve all available statistic descriptions as dict."""
d = {}
for key in self._output_categories:
stat = getattr(self, key)
di = stat._docs
d[key] = di
return d
# def calculate_distance(self, data):
# """Re-calculate distance as the distance from Election may have error."""
# distances = vcalcs.voter_distances(
# voters=data.voters.pref,
# candidates=data.candidates.pref,
# weights=data.voters.weights,
# order=data.voters.order,
# )
# return distances
@utilities.lazy_property2('_cache_result')
def winner(self):
"""See :class:`~votesim.metrics.WinnerStats`."""
return WinnerStats(self)
@utilities.lazy_property2('_cache_result')
def winner_categories(self):
"""See :class:`~votesim.metrics.WinnerCategories`."""
return WinnerCategories(self)
@utilities.lazy_property2('_cache_result')
def ballot(self):
"""See :class:`~votesim.metrics.BallotStats`."""
return BallotStats(self)
def copy(self):
return copy.deepcopy(self)
class WinnerStats(BaseStats):
"""Winner output statistics."""
def _reinit(self):
self._candidate_regrets = self._electionStats.candidates.regrets
self._data = self._electionStats._election_data
self._winners = self._data.winners
self._name = 'winner'
return
@utilities.lazy_property
def regret(self):
"""Overall satisfaction of all winners for all voters."""
candidate_regrets = self._candidate_regrets
ii = self._winners
winner_regrets = candidate_regrets[ii]
return np.mean(winner_regrets)
@utilities.lazy_property
def regret_efficiency_candidate(self):
"""Voter satisfaction efficiency, compared to random candidate."""
random = self._electionStats.candidates.regret_avg
best = self._electionStats.candidates.regret_best
U = self.regret
R = random
B = best
vse = (U - R) / (B - R)
return vse
@utilities.lazy_property
def regret_efficiency_voter(self):
"""Voter satisfaction.
VSE equation normalized to voter
population regret of an ideal winner vs a random voter.
"""
v_random = self._electionStats.voters.regret_random_avg
v_median = self._electionStats.voters.regret_median
best = self._electionStats.candidates.regret_best
U = self.regret
R2 = v_random
R1 = v_median
B = best
return 1.0 - abs(U - B) / (R2 - R1)
@utilities.lazy_property
def regret_normed(self):
"""Voter regret normalized to ideal."""
U = self.regret
R = self._electionStats.voters.regret_median
return U / R - 1
@property
def winners(self):
"""int array: Index location of winners."""
return self._data.winners
@property
def ties(self):
"""int array: Index location of ties."""
return self._data.ties
class WinnerCategories(BaseStats):
"""Determine whether majority, condorcet, or utility winner was elected."""
def _reinit(self):
self._winners = self._electionStats._election_data.winners
self._name = 'winner_categories'
return
@utilities.lazy_property
def is_condorcet(self):
"""bool: check whether condorcet winner was elected."""
ii = self._electionStats.candidates.winner_condorcet
if self._winners[0] == ii:
return True
return False
@utilities.lazy_property
def is_majority(self):
"""bool: check if majority winner was elected."""
ii = self._electionStats.candidates.winner_majority
if self._winners[0] == ii:
return True
return False
@utilities.lazy_property
def is_utility(self):
"""bool: check if utility winner was elected."""
ii = self._electionStats.candidates.winner_utility
if self._winners[0] == ii:
return True
return False
class BallotStats(BaseStats):
"""Ballot marking statistics."""
def _reinit(self):
self._ballots = self._electionStats._election_data.ballots
self._name = 'ballot'
return
@utilities.lazy_property2('_cache_ballot')
def _ballot_stats(self) -> dict:
ballots = np.atleast_2d(self._ballots)
ballot_num, candidate_num = ballots.shape
# Get number of candidates marked for each ballot
marked_array = np.sum(ballots > 0, axis=1)
# Get ballots where bullet voting happened
bullet_num = np.sum(marked_array == 1)
bullet_ratio = bullet_num / ballot_num
#Get ballots where all but one candidate is marked
full_num = np.sum(marked_array >= (candidate_num - 1))
full_ratio = full_num / ballot_num
marked_num = np.sum(marked_array)
marked_avg = np.mean(marked_array)
marked_std = np.std(marked_array)
d = {}
d['ballot.bullet.num'] = bullet_num
d['ballot.bullet.ratio'] = bullet_ratio
d['ballot.full.num'] = full_num
d['ballot.full.ratio'] = full_ratio
d['ballot.marked.num'] = marked_num
d['ballot.marked.avg'] = marked_avg
d['ballot.marked.std'] = marked_std
return d
@property
def bullet_num(self):
"""Number of ballots where voters only bullet voted for 1 candidate."""
return self._ballot_stats['ballot.bullet.num']
@property
def bullet_ratio(self):
"""Ratio of ballots where voters only bullet voted for 1 candidate."""
return self._ballot_stats['ballot.bullet.ratio']
@property
def full_num(self):
"""Number of ballots where all but one candidate is marked."""
return self._ballot_stats['ballot.bullet.ratio']
@property
def full_ratio(self):
"""Ratio of ballots where all but one candidate is marked."""
return self._ballot_stats['ballot.bullet.ratio']
@property
def marked_num(self):
"""Total number of marked candidates for all ballots."""
return self._ballot_stats['ballot.marked.num']
@property
def marked_avg(self):
"""Average number of marked candidates per ballot."""
return self._ballot_stats['ballot.marked.avg']
@property
def marked_std(self):
"""Std deviation of marked candidates per ballot."""
return self._ballot_stats['ballot.marked.std']
class PrRegret(BaseStats):
"""Metrics for proportional representation."""
def _reinit(self):
edata = self._electionStats._election_data
cdata = self._electionStats._candidate_data
self._distances = cdata.distances
self._num_voters, self._num_candidates = self._distances.shape
self._num_winners = len(edata.winners)
self._winners = edata.winners
self._name = 'pr_regret'
return
@utilities.decorators.lazy_property
def _nearest_winners(self):
"""(a,) array: index locations of the nearest winners for each voter.
For `a` total voters.
"""
return np.argmin(self._distances[:, self._winners], axis=1)
@utilities.decorators.lazy_property
def _nearest_winner_distances(self):
"""array shaped (a,)
Preference distances of nearest winner for `a` voters.
"""
ii = np.arange(self._num_voters)
jj = self._nearest_winners
return self._distances[ii, jj]
@utilities.decorators.lazy_property
def avg_regret(self):
"""float: Average voter regret for his nearest winner."""
distances = self._nearest_winner_distances
num_voters = self._num_voters
num_winners = self._num_winners
regret = np.sum(distances) / num_voters
regret = regret * num_winners
return regret
@utilities.decorators.lazy_property
def winners_regret(self):
"""(b,) array: Avg voter regrets for each winner."""
num_voters = self._num_voters
num_winners = self._num_winners
sregrets = []
for ii in range(num_winners):
index = (ii == self._nearest_winners)
distances = self._nearest_winner_distances[index]
regret = np.sum(distances)
sregrets.append(regret)
sregrets = np.array(sregrets) / num_voters * num_winners
return sregrets
@utilities.decorators.lazy_property
def winners_regret_std(self):
"""float: Standard deviation of nearest regrets for each winner.
An ideal proportional system ought to have low std deviation.
"""
return np.std(self.winners_regret)
@utilities.decorators.lazy_property
def std_num_voters_per_winner(self):
"""float: Standard deviation of number of nearest voters for each winner."""
num_voters = self._num_voters
num_winners = self._num_winners
wcounts = []
for ii in range(num_winners):
wcount = np.sum(ii == self._nearest_winners)
wcounts.append(wcount)
voters_per_winner = num_voters / num_winners
std = np.std(wcounts) / voters_per_winner
return std
def candidate_regrets(voters, candidates, weights=None, order=1):
"""Calculate the voter regret for each candidate or winner.
Parameters
----------
voters : array (a, n)
Voter preferences; n-dimensional voter cardinal preferences for n issues.
candidates : array (b, n)
Candidate preferences for `b` candidates and `n`-dimensional issues.
Returns
-------
out : (b,) array
Average preference distance of voters from each candidate numbering `b`.
"""
voters = np.atleast_2d(voters)
candidates = np.atleast_2d(candidates)
num_voters = len(voters)
# distance shape (a, b) for `a` num voters, `b` num candidates.
distances = vcalcs.voter_distances(voters,
candidates,
weights=weights,
order=order)
avg_distances = np.sum(distances, axis=0) / num_voters
return avg_distances
def voter_regrets(voters, weights=None,
order=1, pnum=10, maxsize=5000, seed=None):
"""Calculate the voter regrets for each other if voters became a candidate.
Parameters
----------
voters : array shape (a, n)
Voter preferences; `a` number of voters, cardinal preferences for `n` issues.
weights : None or array shape (a, n)
Dimensional weightings of each voter for each dimension.
Only relevant if n > 1
order : int
Order of norm
* 1 = taxi-cab norm; preferences for each issue add up
* 2 = euclidean norm; take the sqrt of squares.
pnum : int
Number of voters to calculate distances for at-a-time, for memory issues
maxsize: int
For large populations this calculation is expensive. Use this to sample
a subset of the voter population. Default 5000.
Set to None to use all voters.
Returns
-------
out : array shape (c,)
Voter regrets for each voter as a proposed candidate.
- c = a if maxsize <= number voters or maxsize==None
- c = maxsize otherwise for sampled voters.
"""
cnum = len(voters)
if maxsize is not None:
if cnum > maxsize:
rs = np.random.RandomState(seed)
ii = rs.choice(cnum, size=maxsize, replace=False)
voters = voters[ii]
numbers = np.arange(0, cnum + pnum, pnum)
lb_nums = numbers[0:-1]
ub_nums = numbers[1:]
dlist = []
for lb, ub in zip(lb_nums, ub_nums):
candidatesi = voters[lb : ub]
try:
d = candidate_regrets(voters, candidatesi, weights=weights, order=order)
dlist.append(d)
except MemoryError:
return voter_regrets(voters, weights, order, pnum=1)
return np.concatenate(dlist)
def consensus_regret(voters, winners, _distances=None, order=1):
"""Measure overall average satisfaction of all winners for all voters.
Parameters
----------
voters : array, shape (a, n)
Voter preferences; n-dimensional voter cardinal preferences for n issues.
winners : array, shape (b, n)
Winner preferences for `b` winners and `n`-dimensional issues.
Returns
-------
regret : float
Consensus voter regret metric
"""
num_winners = len(winners)
if _distances is not None:
distances = _distances
else:
distances = candidate_regrets(voters, winners, order=order)
regret = np.sum(distances) / num_winners
return regret
def mean_regret(voters, weights=None, order=1):
"""
Measure overall regret of voters if a candidate located at the centroid was elected.
Parameters
----------
voters : array, shape (a, n)
Voter preferences; n-dimensional voter cardinal preferences for n issues.
weights : array, shape (a, n)
Voter preference weights for each preference. (ie, voters care
more about some preferences than others).
"""
num = len(voters)
center = np.mean(voters, axis=0)
if weights is None:
diff = voters - center
else:
diff = (voters - center) * weights
dist = np.sum(np.linalg.norm(diff, axis=1, ord=order)) / num
return dist
def median_regret(voters, weights=None, order=1):
num = len(voters)
center = np.median(voters, axis=0)
if weights is None:
diff = voters - center
else:
diff = (voters - center) * weights
dist = np.sum(np.linalg.norm(diff, axis=1, ord=order)) / num
return dist
def regret_std(voters, meanvoter=None, weights=None, order=1):
if meanvoter is None:
v_mean = np.mean(voters, axis=0)
else:
v_mean = meanvoter
v_dist = vcalcs.voter_distances(voters,
v_mean[None, :],
weights=weights,
order=order)
std = np.std(v_dist)
return std
#
#def _ballot_stats(self, election):
#
# scores = election.scores
# ranks = election.ranks
# ratings = election.ratings
#
# num_scored = np.sum(scores > 0, axis=1)
# num_ranked = np.sum(ranks > 0, axis=1)
# num_rated = np.sum(ratings > 0, axis=1)
#
# self.avg_num_rated = np.average(num_rated)
# self.avg_num_scored = np.average(num_scored)
# self.avg_num_ranked = np.average(num_ranked)
# self.std_num_scored = np.std(num_scored)
# self.std_num_ranked = np.std(num_ranked)
#
#
def regret_tally(estats: ElectionStats):
"""Estimate front running candidates for a utility maximizing voting method.
Parameters
----------
estats : ElectionStats
Election Stats to generate regret tally from.
Returns
-------
tally : array(cnum,)
1-d array of length candidate num, measure of front-runner status
with 1.0 as the best front runner.
"""
cand_regrets = estats.candidate.regret
regret_best = estats.candidate.regret_best
tally = 2.0 - cand_regrets / regret_best
return tally
class __ElectionStats_OLD(object):
"""Calculate and store various regret metrics
Parameters
----------
voters : array, shape (a, n)
Voter preferences; n-dimensional voter cardinal preferences for n issues.
candidates : array (b, n)
Candidate preferences for `b` candidates and `n`-dimensional issues.
winners : array, shape (b, n)
Winner preferences for `b` winners and `n`-dimensional issues.
ballots : array, shape (a, b)
Submitted ballots, rows as voters & columns as candidates
- Zero data ballots are minimum score, unranked, or not chosen candidates.
order : int
Order of regret distance calculations (default 1)
Usage
------
To access all metrics, use
>>> self.get_dict()
To retrieve descriptions for all matrics, use
>>> self.get_docs()
"""
def __init__(self, voters=None, weights=None,
candidates=None, winners=None, ballots=None, order=1):
self.stats = {}
self._voters = voters
self._weights = weights
self._candidates = candidates
self._winners = winners
self._ballots = ballots
self._order = order
self._cache_voter = {}
self._cache_candidate = {}
self._cache_result = {}
self._distances = None
# self.run(voters=voters,
# weights=weights,
# candidates=candidates,
# winners=winners,
# ballots=ballots)
return
def set(self,
voters=None,
weights=-1,
candidates=None,
winners=None,
ballots=None):
"""Set voters, weights, candidates or winners to recalculate"""
if voters is not None:
self._voters = voters
self._cache_voter = {}
self._cache_candidate = {}
self._cache_result = {}
if weights != -1:
self._weights = weights
self._cache_voter = {}
self._cache_candidate = {}
self._cache_result = {}
if candidates is not None:
self._candidates = candidates
self._cache_candidate = {}
self._cache_result = {}
if winners is not None:
self._winners = winners
self._cache_result = {}
if ballots is not None:
self._ballots = ballots
self._distances = vcalcs.voter_distances(voters=self._voters,
candidates=self._candidates,
weights=self._weights,
order=1
)
# def run(self, voters=None, weights=None,
# candidates=None, winners=None, ballots=None):
#
# d = self.stats.copy()
# if voters is not None:
# stats1 = self._voter_stats(voters, weights)
# d.update(stats1)
# self.stats = d
#
# if (candidates is not None) and (winners is not None):
# stats2 = self._result_stats(voters, candidates, winners, weights)
# d.update(stats2)
#
# if ballots is not None:
# stats3 = self._ballot_stats(ballots)
# d.update(stats3)
#
# self.stats = d
# return
def _get_category_keys(self, category):
"""Divide metrics into categories defined by an attribute prefix"""
a = dir(self)
prefix = category + '_'
new = {}
for name in a:
if name.startswith(prefix):
newkey = name.replace(prefix, category + '.')
new[newkey] = name
return new
@property
def _keys_voter(self):
"""Retrieve voter metrics' attribute names"""
category = 'voter'
return self._get_category_keys(category)
@property
def _keys_candidate(self):
"""Retrieve candidate metrics' attribute names"""
category = 'candidate'
return self._get_category_keys(category)
@property
def _keys_regret(self):
"""Retrieve regret metrics' attribute names"""
category = 'regret'
return self._get_category_keys(category)
@property
def _keys_winner(self):
"""Retrieve winner metrics' attribute names"""
category = 'winner'
return self._get_category_keys(category)
@property
def _keys_ballot(self):
"""Retrieve ballot metrics' attribute names"""
category = 'ballot'
return self._get_category_keys(category)
@utilities.lazy_property
def _keydict(self):
"""Retrieve dict keynames that retrieve attribute data"""
new = {}
new.update(self._keys_voter)
new.update(self._keys_candidate)
new.update(self._keys_regret)
new.update(self._keys_winner)
new.update(self._keys_ballot)
return new
def get_keys(self):
"""Get a list of all available statistics"""
return list(self._keydict.keys())
def get_dict(self):
"""Retrieve all available statistics"""
new = {}
for key, attrname in self._keydict.items():
try:
new[key] = getattr(self, attrname)
except RuntimeError:
pass
return new
def get_docs(self):
"""Retrieve all available statistic descriptions as dict"""
clss = type(self)
new = {}
for key, attrname in self._keydict.items():
doc = getattr(clss, attrname).__doc__
doc = doc.replace('\n', ' ')
doc = ' '.join(doc.split())
new[key] = doc
return new
@property
def voters(self):
if self._voters is None:
raise RuntimeError('Voters are not yet defined in Metrics')
return self._voters
@property
def weights(self):
return self._weights
@property
def candidates(self):
if self._candidates is None:
raise RuntimeError('Candidates are not yet defined in Metrics')
return self._candidates
@property
def winners(self):
if self._winners is None:
raise RuntimeError('Winners are not yet defined in Metrics')
return self._winners
@property
def ballots(self):
if self._ballots is None:
raise RuntimeError('Ballots are not yet defined in Metrics')
return self._ballots
### Metrics
@utilities.lazy_property2('_cache_voter')
def voter_regret_mean(self):
"""Regret of voters if winner is located at preference mean"""
return mean_regret(self.voters, self.weights, order=self._order)
@utilities.lazy_property2('_cache_voter')
def voter_regret_median(self):
"""Regret of voters if winner is located at preference median"""
return median_regret(self.voters, self.weights)
@utilities.lazy_property2('_cache_voter')
def voter_regret_random_avg(self):
"""Average regret of voters if winner is randomly selected from voter
population"""
r = voter_regrets(self.voters,
self.weights,
maxsize=5000,
order=self._order,
seed=0)
return np.mean(r)
@utilities.lazy_property2('_cache_voter')
def voter_mean(self):
""""array shape (n) : Preference mean of voters for n preference dimensions"""
return np.mean(self.voters, axis=0)
@utilities.lazy_property2('_cache_voter')
def voter_median(self):
"""array shape (n) : Preference median of voters for n preference dimensions"""
return np.median(self.voters, axis=0)
@utilities.lazy_property2('_cache_voter')
def voter_std(self):
"""array shape (n) : Preference standard deviation of voters for
n preference dimensions"""
return np.std(self.voters, axis=0)
@utilities.lazy_property2('_cache_voter')
def voter_regret_std(self):
"""Standard deviation of regret """
meanvoter = self.voter_mean
return regret_std(self.voters,
meanvoter=meanvoter,
weights=self.weights,
order=self._order)
@utilities.lazy_property2('_cache_candidate')
def candidate_regrets(self):
"""array shape (c) : voter regret for each candidate"""
return candidate_regrets(self.voters,
self.candidates,
order=self._order)
# @utilities.lazy_property2('_cache_result')
# def _PR_regret(self):
# pr = PrRegret(self.voters, self.winners, self.weights)
# regret = pr.regret
# std_regret = pr.std_regret
# return regret, std_regret
# @property
# def regret_PR(self):
# """Multi-winner average regret for Proportional Representation.
# Average voter regret for his nearest winner"""
# return self._PR_regret[0]
# @property
# def regret_PR_std(self):
# """Standard deviation of nearest regrets for each winner. An ideal
# proportional system ought to have low std deviation"""
# return self._PR_regret[1]
@utilities.lazy_property2('_cache_result')
def winner_num(self):
"""Number of winners for this election"""
return len(self.winners)
@property
def winner_all(self):
"""All winners of election"""
return self.winners
@utilities.lazy_property2('_cache_result')
def regret_consensus(self):
"""overall satisfaction of all winners for all voters."""
candidate_regrets = self.candidate_regrets
ii = self.winners
winner_pref = self.candidates[ii]
rc = consensus_regret(self.voters,
winner_pref,
_distances=candidate_regrets[ii])
return rc
@utilities.lazy_property2('_cache_candidate')
def _regret_best(self):
"""Retrieve best regrests and corresponding winner indices"""
candidate_regrets = self.candidate_regrets
winner_num = self.winner_num
ii = np.argsort(candidate_regrets)
ii_ideal = ii[0 : winner_num]
ri = np.mean(candidate_regrets[ii_ideal])
return ri, ii_ideal
@property
def regret_best(self):
"""Best possible regret for the best candidate in election"""
return self._regret_best[0]
@property
def candidate_best(self):
"""Best possible candidate (in terms of regret) in election"""
return self._regret_best[1]
@utilities.lazy_property2('_cache_candidate')
def candidate_regret_random(self):
"""Average regret if a random candidate became winner"""
return np.mean(self.candidate_regrets)
@property
def candidate_preference(self):
"""Preference locations of candidates"""
return self.candidates
@property
def regret_efficiency_candidate(self):
"""Voter satisfaction efficiency, compared to random candidate"""
U = self.regret_consensus
R = self.candidate_regret_random
B = self.regret_best
vse = (U - R) / (B - R)
return vse
@property
def regret_efficiency_voter(self):
"""My updated satisfaction efficiency equation normalizing to voter population
rather than candidate population"""
U = self.regret_consensus
R2 = self.voter_regret_random_avg
R1 = self.voter_regret_median
B = self.regret_best
return 1.0 - abs(U - B) / (R2 - R1)
@property
def regret_normed(self):
"""Voter regret normalized to ideal"""
U = self.regret_consensus
R = self.voter_regret_median
return U / R - 1
@utilities.lazy_property2('_cache_ballot')
def _ballot_stats(self):
ballots = np.atleast_2d(self.ballots)
ballot_num, candidate_num = ballots.shape
# Get number of candidates marked for each ballot
marked_array = np.sum(ballots > 0, axis=1)
# Get ballots where bullet voting happened
bullet_num = np.sum(marked_array == 1)
bullet_ratio = bullet_num / ballot_num
#Get ballots where all but one candidate is marked
full_num = np.sum(marked_array >= (candidate_num - 1))
full_ratio = full_num / ballot_num
marked_num = np.sum(marked_array)
marked_avg = np.mean(marked_array)
marked_std = np.std(marked_array)
d = {}
d['ballot.bullet.num'] = bullet_num
d['ballot.bullet.ratio'] = bullet_ratio
d['ballot.full.num'] = full_num
d['ballot.full.ratio'] = full_ratio
d['ballot.marked.num'] = marked_num
d['ballot.marked.avg'] = marked_avg
d['ballot.marked.std'] = marked_std
return d
@property
def ballot_bullet_num(self):
"""Number of ballots where voters only bullet voted for 1 candidate"""
return self._ballot_stats['ballot.bullet.num']
@property
def ballot_bullet_ratio(self):
"""Ratio of ballots where voters only bullet voted for 1 candidate"""
return self._ballot_stats['ballot.bullet.ratio']
@property
def ballot_full_num(self):
"""Number of ballots where all but one candidate is marked"""
return self._ballot_stats['ballot.bullet.ratio']
@property
def ballot_full_ratio(self):
"""Ratio of ballots where all but one candidate is marked"""
return self._ballot_stats['ballot.bullet.ratio']
@property
def ballot_marked_num(self):
"""Total number of marked candidates for all ballots"""
return self._ballot_stats['ballot.marked.num']
@property
def ballot_marked_avg(self):
"""Average number of marked candidates per ballot"""
return self._ballot_stats['ballot.marked.avg']
@property
def ballot_marked_std(self):
"""Std deviation of marked candidates per ballot"""
return self._ballot_stats['ballot.marked.std']
# def _result_stats(self, voters, candidates, winners, weights):
# regret_candidates = candidate_regrets(voters, candidates)
# rr = np.mean(regret_candidates)
#
# ### Average satisfaction of voter to closest winner
#
# pr = PrRegret(voters, winners, weights)
#
# ### Overall satisfaction of all voters for all winners
# winner_pref = candidates[winners]
# winner_num = len(winners)
# ii = winners
# rc = consensus_regret(voters,
# winner_pref,
# _distances=regret_candidates[ii])
#
# ### Minimum possible consensus regret for this election
#
#
# regret_best, candidate_best = self._regret_best(regret_candidates, winner_num)
# vse = self._vse(rc, rr, regret_best)
#
# rvm = self.stats['voter.regret.median']
# regret_median_acc = self._regret_median_accuracy(rc, rvm)
#
# d = {}
# d['regret.candidates'] = regret_candidates
# d['regret.PR'] = pr.regret
# d['regret.PR_std'] = pr.std_regret
#
# d['regret.consensus'] = rc
# d['regret.best'] = regret_best
# d['regret.random'] = rr
# d['regret.vse'] = vse
# d['regret.vsp'] = self._satisfaction_population(rc, regret_best, rvm)
# d['regret.median_accuracy'] = regret_median_acc
# d['winners.num'] = winner_num
# d['winners'] = winners
# d['candidates.preference'] = candidates
# d['candidates.best'] = candidate_best
# return d
#
#if __name__ == '__main__':
# rs = np.random.RandomState(None)
# win_num = np.arange(5, 100, 1)
# regrets = []
# ndim = 1
# for w in win_num:
# voters = rs.rand(5000, ndim) * 10
# winners = rs.rand(w, ndim) * 10
# r = PR_regret(voters, winners)
# regrets.append(r)
#
#
# import matplotlib.pyplot as plt
# plt.plot(win_num, regrets)
# plt.ylim(0, None)
#
#
```
#### File: votesim/models/spatialerror.py
```python
import numpy as np
from votesim.models.spatial import Voters
from votesim.models import vcalcs
from votesim import utilities
raise NotImplementedError("This is not ready.")
class ErrorVoters(Voters):
"""Voters who get things wrong"""
@utilities.recorder.record_actions()
def add_random(self,
numvoters,
ndim=1,
error_mean=0.0,
error_width=0.0,
clim_mean=-1,
clim_width=2):
"""Add random normal distribution of voters
Parameters
-----------
numvoters : int
Number of voters to generate
ndim : int
Number of preference dimensions of population
error_mean : float
Average error center of population
- At 0, half population is 100% accurate
- At X, the the mean voter's accuracy is X std-deviations of
voter preference,
error_width : float
Error variance about the error_mean
"""
super(ErrorVoters, self).add_random(numvoters, ndim=ndim)
self._add_error(numvoters,
error_mean=error_mean,
error_width=error_width)
self._add_ignorance(numvoters, clim_mean, clim_width)
return
@utilities.recorder.record_actions()
def add_points(self,
avgnum,
pnum,
ndim=1,
error_mean=0.0,
error_width=0.0,
clim_mean=-1,
clim_width=2):
"""Add a random point with several clone voters at that point
Parameters
-----------
avgnum : int
Number of voters per unique point
pnum : int
Number of unique points
ndim : int
Number of dimensions
"""
vnum1 = len(self.voters)
super(ErrorVoters, self).add_points(avgnum, pnum, ndim=ndim)
vnum2 = len(self.voters)
vdiff = vnum2 - vnum1
self._add_error(vdiff,
error_mean=error_mean,
error_width=error_width)
self._add_ignorance(vdiff, clim_mean, clim_width)
return
def _add_error(self, numvoters, error_mean=0.0, error_width=0.0):
"""Create voter error attribute for the specified number of voters
self.voter_error describes the maximum candidate distance error
a voter will make during the election.
"""
rs = self._randomstate
e = rs.normal(loc=error_mean,
scale=error_width,
size=(numvoters,))
e = np.maximum(0, e)
try:
error = np.concatenate((self.voter_error, e))
except AttributeError:
error = e
self.voter_error = error
return
def _add_ignorance(self, numvoters, avg=7, std=2):
rs = self._randomstate
# if -1 thenn voters have perfect memory
if avg == -1:
cnum = np.ones(numvoters) * -1
else:
cnum = rs.normal(loc=avg, scale=std, size=(numvoters,))
cnum = np.maximum(0, cnum)
try:
self.voter_memory = np.concatenate((self.voter_memory, cnum))
except AttributeError:
self.voter_memory = cnum
return
def calculate_distances(self, candidates):
"""Calculate regret distances.
Parameters
----------
candidates : array shaped (a, b)
Candidate preference data
"""
pref = self._pref
error = self.voter_error
rs = self._randomstate
try:
weights = self.weights
except AttributeError:
weights = None
distances = vcalcs.voter_distances(voters=pref,
candidates=candidates,
weights=weights)
distances = vcalcs.voter_distance_error(distances, error, rstate=rs)
return distances
```
#### File: election_sim/votesim/plots.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
def vset():
"""Set votesim preferred matplotlib global options"""
sns.set()
# mpl.rcParams['figure.figsize'] = [8, 6]
mpl.rcParams['figure.dpi'] = 100
mpl.rcParams['savefig.dpi'] = 150
mpl.rcParams['font.size'] = 10
# subplot = {
# 'figure.subplot.bottom': 0.1,
# 'figure.subplot.hspace': 0.35,
# 'figure.subplot.left': 0.1,
# 'figure.subplot.right': 0.95,
# 'figure.subplot.top': 0.9,
# 'figure.subplot.wspace': 0.2,
# }
# mpl.rcParams.update(subplot)
return
def subplot_4set(**kwargs):
figsize = [9, 6.5]
fig = plt.figure(figsize=figsize, **kwargs)
left = 0.05 # the left side of the subplots of the figure
right = 0.95 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for space between subplots,
# expressed as a fraction of the average axis width
hspace = .35 # the amount of height reserved for space between subplots,
# expressed as a fraction of the average axis height
plt.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
return fig
def plot_1set(**kwargs):
figsize = [6.5, 4.5]
fig = plt.figure(figsize=figsize, **kwargs)
left = 0.1 # the left side of the subplots of the figure
right = 0.98 # the right side of the subplots of the figure
def subplot_2set(**kwargs):
figsize = [9.0, 3.5]
fig = plt.figure(figsize=figsize, **kwargs)
left = 0.1 # the left side of the subplots of the figure
right = 0.98 # the right side of the subplots of the figure
bottom = 0.2 # the bottom of the subplots of the figure
top = 0.85 # the top of the subplots of the figure
wspace = 0.2 # the amount of width reserved for space between subplots,
# expressed as a fraction of the average axis width
hspace = .35 # the amount of height reserved for space between subplots,
# expressed as a fraction of the average axis height
plt.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
return fig
def subplot_2row(**kwargs):
figsize=[6.5, 9]
fig = plt.figure(figsize=figsize, **kwargs)
left = 0.1125 # the left side of the subplots of the figure
right = 0.98 # the right side of the subplots of the figure
bottom = 0.1 # the bottom of the subplots of the figure
top = 0.925 # the top of the subplots of the figure
wspace = 0.1 # the amount of width reserved for space between subplots,
# expressed as a fraction of the average axis width
hspace = .3 # the amount of height reserved for space between subplots,
# expressed as a fraction of the average axis height
plt.subplots_adjust(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
return fig
def heatmap(x, y, hue, data,
func='mean',
xbin=None,
ybin=None,
annot=True,
fmt='.1f',
xfmt='g',
yfmt='g',
cbar=False,
linewidths=0.5,
cmap='viridis_r',
sort=True,
xsortkey=None,
ysortkey=None,
**kwargs):
"""Custom heatmap for either categorical or numeric data.
Parameters
------------
x : str
column name of data plotted on x-axis
y : str
Column name of data plotted on y-axis
hue : str
Column name of data values plotted on heatmap.
data : Pandas DataFrame
Data used in plot
func : str or function
aggregation function for `data.agg`, for example
- 'min', 'mean', 'max', 'sum'
- np.mean
xbin, ybin : None, int, or array (n,)
If x or y is not categorical data, bin
- None (default) -- if x or y is categorical data, do nothing.
- int -- Set to number of bins to divide data using pandas.cut
- array -- User defined bins to divide data using pandas.cut
fmt, xfmt, yfmt : str
Formatting string for hue, x, and y axes, default '.2f'.
sort : bool
Sort the results by their average across the x-axis. Default True.
**kwargs :
Additional arguments passed into `seaborn.heatmap`.
Returns
------
ax : matplotlib Axes
Axes object with the heatmap.
dfp : pandas DataFrame
Pivot table used to construct heatmap
"""
xfmt = '%' + xfmt
yfmt = '%' + yfmt
dfp, xbin, ybin = heat_pivot(data, x, y, hue,
func=func, xbin=xbin, ybin=ybin, sort=sort)
if ysortkey is not None:
dfp = dfp.loc[ysortkey]
if xsortkey is not None:
dfp = dfp[xsortkey]
ax = sns.heatmap(
dfp,
annot=annot,
fmt=fmt,
cbar=cbar,
linewidths=linewidths,
cmap=cmap,
**kwargs
)
if xbin is not None:
ax.set_xticks(np.arange(len(xbin)))
xlabels = [xfmt % xi for xi in xbin]
ax.set_xticklabels(xlabels)
if ybin is not None:
ax.set_yticks(np.arange(len(ybin)))
ylabels = [yfmt % yi for yi in ybin]
ax.set_yticklabels(ylabels)
return ax, dfp
def heat_pivot(data, x, y, hue,
func='mean',
xbin=None,
ybin=None,
sort=True,
):
"""Pivot & aggregate data to use in a heat plot.
Parameters
----------
data : Pandas dataframe
Data to pivot.
x : str
Dataframe Label to use on x-axis.
y : str
Dataframe Label to use on y-axis.
hue : str
Dataframe label to use as hue.
func : str or func, optional
Aggregation method for DataFrame.agg(...). The default is 'mean'.
xbin : list or ndarray, optional
x-axis bin edges. The default is None.
ybin : list or ndarray, optional
y-axis bin edges. The default is None.
sort : bool, optional
True to sort the data by mean values. The default is True.
Returns
-------
dfp : DataFrame
Output heat plot dataframe.
xbins : ndarray or None
If xbin specified, returns x-axis bins.
ybins : ndarray or None
If ybin specified, returns y-axis bins..
"""
data = data.copy()
if xbin is not None:
xcat, xbins = pd.cut(data[x], xbin, retbins=True)
data.loc[:, x] = xcat
else:
xbins = None
data.loc[:, x] = data[x].astype('category')
if ybin is not None:
ycat, ybins = pd.cut(data[y], ybin, retbins=True)
data.loc[:, y] = ycat
else:
ybins = None
data.loc[:, y] = data[y].astype('category')
data = data[[x, y, hue]]
dfp = (data.groupby([x, y])
.agg(func)
.reset_index()
.pivot(index=y, columns=x, values=hue)
)
if sort:
regret_avg = np.nanmean(dfp.values, axis=1)
ii = np.argsort(regret_avg)[::-1]
sort_index = dfp.index[ii]
dfp = dfp.loc[sort_index]
return dfp, xbins, ybins
def bar(x, y, data=None, fmt='g', **kwargs):
"""
Custom bar plot with values on bars
Parameters
----------
x : str
data column name for x-axis
y : str
data column name for y-axis
dataDataFrame, array, or list of arrays, optional
Dataset for plotting.
Returns
---------
ax : matplotlib Axes
Returns the Axes object with the plot drawn onto it.
"""
ax = sns.barplot(x=x, y=y, data=data, **kwargs)
show_values_on_bars(ax, fmt=fmt)
#num = len(x)
#x1 = np.arange(num) - .5
#fmt = '%' + fmt
#yrange = np.max(y) - np.min(y)
#ydelta = yrange / 25
# for (xi, yi) in zip(x1, y):
# s = fmt % yi
# ax.annotate(s, xy=(xi +.125, yi + ydelta))
return ax
def show_values_on_bars(axs, height=.2, fmt='g'):
"""Put labels on seaborn bar chart from stack overflow
https://stackoverflow.com/questions/43214978/seaborn-barplot-displaying-values
"""
ffmt = '{:' + fmt + '}'
def _show_on_single_plot(ax):
for p in ax.patches:
_x = p.get_x() + p.get_width() / 2
_y = p.get_y() + p.get_height() + height
value = ffmt.format(p.get_height())
ax.text(_x, _y, value, ha="center")
if isinstance(axs, np.ndarray):
for idx, ax in np.ndenumerate(axs):
_show_on_single_plot(ax)
else:
_show_on_single_plot(axs)
### test
def test_heat_category():
x = np.random.randint(0, 5, size=1000)
y = np.random.randint(0, 5, size=1000)
z = x * y
d = {'x': x,
'y': y,
'z': z,}
df = pd.DataFrame(data=d)
heatmap('x', 'y', 'z', df, xfmt='.2f')
return
def test_heat_continuous():
x = np.random.rand(1000)
y = np.random.rand(1000)
z = x * y
d = {'x': x,
'y': y,
'z': z,}
df = pd.DataFrame(data=d)
heatmap('x', 'y', 'z', df,
xbin=10, ybin=10, yfmt='.2f', xfmt='.2f')
def test_heat_mixed():
x = np.random.rand(1000)
y = np.random.randint(0, 5, size=1000)
z = x * y
d = {'x': x,
'y': y,
'z': z,}
df = pd.DataFrame(data=d)
heatmap('x', 'y', 'z', df, xbin=10, xfmt='.2f')
def test_bar():
plt.figure()
x = np.arange(10)
y = x + 6
bar(x, y)
ax = plt.gca()
# def test_heat():
# x = np.arange(10)
# z = {}
# z['a'] = x
# z['b'] = x+2
# z['c'] = x**1.1
# z['d'] = -x + 3
# z['e'] = -x + 4
# z['f'] = -x + 5
# df = pd.DataFrame(z)
# heatmap(df)
# plt.xlabel('test')
# assert True
def test_2set():
fig = subplot_2set()
plt.subplot(1,2,1)
test_heat_continuous()
plt.title('Subplot #1')
plt.subplot(1,2,2)
test_heat_mixed()
plt.title('Subplot #2')
plt.suptitle("this is the test title")
def test_2row():
fig = subplot_2row()
plt.subplot(2,1,1)
test_heat_continuous()
plt.title('Subplot #1')
plt.subplot(2,1,2)
test_heat_mixed()
plt.title('Subplot #2')
plt.suptitle("this is the test title")
if __name__ == '__main__':
vset()
test_2row()
test_2set()
# plt.figure()
test_heat_mixed()
# test_bar()
```
#### File: election_sim/votesim/post.py
```python
import numpy as np
import pandas as pd
def categorize_condorcet(df: pd.DataFrame):
"""
Categorize Elections based on Condorcet/Utility/Plurality/Majority
conditionals. Categories focus on Condorcet criterion.
Parameters
----------
df : Pandas DataFrame
Dataframe produced by :module:`~votesim.benchmarks`.
Returns
--------
df : Pandas DataFrame
Dataframe with new 'categories' column.
Category Combinations
Labels
-------
- M = majority winner
- P = plurality winner that is not majority winner
- C = condorcet winner that is not majority winner
- U = utility winner
Categories
----------
- MU = Has majority utility winner
- M = Has majority winner that is not utility winner.
- CPU = Has condorcet, utility, plurality winner
- CU = Has condorcet, utility winner that is not plurality winner
- CP = Has condorcet, plurality winner that is not utility winner
- C = Has condorcet winner who is not plurality and utility winner
- NC = Has no Condorcet winner
"""
iM = df['output.candidate.winner_majority']
iP = df['output.candidate.winner_plurality']
iC = df['output.candidate.winner_condorcet']
iU = df['output.candidate.winner_utility']
df = df.copy()
df.loc[:, 'categories'] = 'No category'
maj = iM > -1
no_maj = ~maj
MU = (iM == iU)
M = maj & (iM != iU)
CPU = no_maj & (iC == iP) & (iC == iU)
CP = no_maj & (iC == iP) & (iC != iU)
CU = no_maj & (iC == iU) & (iC != iP)
#PU = no_maj & (iP == iU) & (iP != iC) # not mutually exclusive
NC = (iC == -1)
C = (iC > -1) & (iC != iP) & (iC != iU)
df.loc[MU, 'categories'] = 'MU'
df.loc[M, 'categories'] = 'M'
df.loc[CPU, 'categories'] = 'CPU'
df.loc[CP, 'categories'] = 'CP'
df.loc[CU, 'categories'] = 'CU'
df.loc[C, 'categories'] = 'C'
#df.loc[PU, 'categories'] = 'PU'
df.loc[NC, 'categories'] = 'nc'
return df
def benchmark_score(df: pd.DataFrame):
"""Construct benchmark score of how good electoral methods are.
"""
xname1 = 'output.winner.regret_efficiency_voter'
xname2 = 'output.winner.regret_efficiency_candidated'
eff1 = df[xname1]
eff2 = df[xname2]
regret = 1 - np.minimum(eff1, eff2)
pratio = df['output.candidate.plurality_ratio'] * 100
```
#### File: strategy/tests/test_ballots.py
```python
import numpy as np
from votesim.models import spatial
from votesim import ballot
# from votesim.strategy import TacticalRoot, Ra
from votesim.strategy.tacticalballots import (TacticalRoot,
TacticalGroup,
RatedTactics,
RankedTactics,
StrategyData)
etype = 'score5'
vnum = 8
rs = np.random.RandomState(0)
distances = rs.rand(vnum, 3)
# tol = np.ones((20, 3)) * 0.75
tol = None
b = ballot.gen_honest_ballots(distances=distances, tol=tol,
base='linear')
ballots = b.ratings
## Test compromise strategy
# def test_compromise():
# bt = TacticalBallots(etype=etype, ballots=b)
# bt.set()
# bt = bt.compromise()
# ii = np.arange(len(bt.ratings))
# jj = bt.preferred_frontrunner
# assert np.all(bt.ratings[ii, jj] == 1)
# assert np.all(bt.ranks[ii, jj] == 1)
strategy = StrategyData({})
def test_compromise_rated():
ballots = b.ratings
root = TacticalRoot(etype, ballots=ballots, distances=distances)
tgroup = root.get_tactical_group(strategy=strategy)
b_rated = RatedTactics(ballots, group=tgroup)
b_rated.compromise()
ii = np.arange(len(ballots))
jj = tgroup.preferred_frontrunner
assert np.all(b_rated.ballots[ii, jj] == 1)
def test_compromise_ranked():
ballots = b.ranks
root = TacticalRoot(etype, ballots=ballots, distances=distances)
tgroup = root.get_tactical_group(strategy=strategy)
b_ranked = RankedTactics(ballots, group=tgroup)
b_ranked.compromise()
ii = np.arange(len(ballots))
jj = tgroup.preferred_frontrunner
assert np.all(b_ranked.ballots[ii, jj] == 1)
def test_compromise_rated2():
strategy = {'tactics' : ['compromise']}
ballots = b.ratings
root = TacticalRoot(etype, ballots=ballots, distances=distances)
tgroup = root.get_tactical_group(strategy=strategy)
new = root.modify_ballot(ballots=ballots, strategy=strategy)
ii = np.arange(len(ballots))
jj = tgroup.preferred_frontrunner
assert np.all(new[ii, jj] == 1)
if __name__ == '__main__':
test_compromise_rated()
test_compromise_rated2()
test_compromise_ranked()
```
#### File: votesim/tests/test_ballots.py
```python
import numpy as np
from votesim import ballot
import matplotlib.pyplot as plt
def test_rank_cut():
"""Test to make sure rank cutting is working."""
np.random.seed(0)
distances = np.random.rand(3, 5)
ballots = ballot.BaseBallots(distances=distances, tol=0.5)
ballots1 = ballots.rate_linear().rate_norm().rank_honest().rank_cut()
zero_locs1 = ballots1.ratings == 0
zero_locs2 = ballots1.ranks == 0
assert np.all(zero_locs1 == zero_locs2)
def test_rating_shapes():
"""Test the 3 rating shapes linear, sqrt, and quadratic.
The generated
"""
np.random.seed(0)
distances = np.random.rand(100, 5)
ballots = ballot.BaseBallots(distances=distances, tol=0.8)
ballots1 = ballots.copy().rate_linear()
ballots2 = ballots.copy().rate_sqrt()
ballots3 = ballots.copy().rate_quadratic()
plt.figure()
plt.plot(ballots1.distances, ballots1.ratings, '.')
plt.plot(ballots2.distances, ballots2.ratings, 'x')
plt.plot(ballots3.distances, ballots3.ratings, 'o')
plt.xlabel('Preference Distance')
plt.ylabel('Ballot Score')
plt.text(0.0, 0.5,
"THIS PLOT SHOULD LOOK LIKE\n SQRT, LINEAR, AND QUADRATIC FUNCTION!")
assert np.all(ballots1.ratings >= ballots3.ratings)
assert np.all(ballots1.ratings <= ballots2.ratings)
zero_locations = ballots1.ratings == ballots3.ratings
assert np.all(ballots1.ratings[zero_locations] == 0)
assert np.all(ballots2.ratings[zero_locations] == 0)
assert np.all(ballots3.ratings[zero_locations] == 0)
assert np.any(ballots1.ratings > 0)
assert np.any(ballots2.ratings > 0)
assert np.any(ballots3.ratings > 0)
if __name__ == '__main__':
test_rank_cut()
test_rating_shapes()
```
#### File: votesim/utilities/decorators.py
```python
from __future__ import print_function, absolute_import, division
from functools import wraps, partial
import logging
from votesim.utilities import misc
logger = logging.getLogger(__name__)
class memoize:
"""
Decorator used to store past calls.
"""
def __init__(self, function):
self.function = function
self.memoized = {}
def __call__(self, *args, **kwargs):
key = (args, frozenset(kwargs.items()))
try:
return self.memoized[key]
except KeyError:
self.memoized[key] = self.function(*args, **kwargs)
return self.memoized[key]
class method_memoize(object):
"""cache the return value of a method
This class is meant to be used as a decorator of methods. The return value
from a given method invocation will be cached on the instance whose method
was invoked. All arguments passed to a method decorated with memoize must
be hashable.
If a memoized method is invoked directly on its class the result will not
be cached. Instead the method will be invoked like a static method:
class Obj(object):
@memoize
def add_to(self, arg):
return self + arg
Obj.add_to(1) # not enough arguments
Obj.add_to(1, 2) # returns 3, result is not cached
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, objtype=None):
if obj is None:
return self.func
return partial(self, obj)
def __call__(self, *args, **kw):
obj = args[0]
try:
cache = obj.__cache
except AttributeError:
cache = obj.__cache = {}
key = (self.func, args[1:], frozenset(kw.items()))
try:
res = cache[key]
except KeyError:
res = cache[key] = self.func(*args, **kw)
return res
#
#def lazyprop(fn):
# """
# Decorator used to cache property results
#
# From stack overflow. Author <NAME>
# https://stackoverflow.com/questions/3012421/python-memoising-deferred-lookup-property-decorator
# """
#
# attr_name = '_lazy_' + fn.__name__
# @property
# def _lazyprop(self):
# if not hasattr(self, attr_name):
# setattr(self, attr_name, fn(self))
# return getattr(self, attr_name)
# return _lazyprop
#
### Lazy Property decorator
# Property name to hold all lazy data
_data_holder_attr = '_cache_properties'
def clean_lazy_properties(instance):
'''Clean all lazy properties'''
setattr(instance, _data_holder_attr, {})
def clean_some_lazy_properties(instance, names):
"""Clean properties in iterable names"""
try:
cache = getattr(instance, _data_holder_attr)
except AttributeError:
return
if isinstance(names, str):
names = [names]
for name in names:
try:
del cache[name]
except KeyError:
pass
setattr(instance, _data_holder_attr, cache)
return
def modify_lazy_property(instance, name, value, dictname=_data_holder_attr):
"""Modify a lazy property"""
cache = getattr(instance, dictname)
cache[name] = value
setattr(instance, _data_holder_attr, cache)
return
def lazy_property(fn):
"""
Version of lazy_property by <NAME>.
Decorator used to cache property results into dictionary.
The cache can be clered using clean_lazy_properties.
"""
cache_name = _data_holder_attr
attr_name = fn.__name__
def get_cache(instance):
if not hasattr(instance, cache_name):
setattr(instance, cache_name, {})
return getattr(instance, cache_name)
@property
@wraps(fn)
def get_attr(self):
cache = get_cache(self)
if attr_name not in cache:
cache[attr_name] = fn(self)
return cache[attr_name]
return get_attr
def lazy_property2(name=_data_holder_attr):
"""
Version of lazy_property by <NAME>.
Decorator used to cache property results into dictionary.
The cache can be cleared using clean_lazy_properties.
Decorator must be called as a function.
Parameters
----------
name : str
Name of cache dictionary
Example
---------
Set the lazy property
>>> class class1(object):
>>> @lazy_property2('my_cache')
>>> def property(self):
>>> x = 2.0
>>> return x
Delete the lazy property
>>> a = class1()
>>> del a.my_cache
"""
def decorator(fn):
cache_name = name
attr_name = fn.__name__
def get_cache(instance):
if not hasattr(instance, cache_name):
setattr(instance, cache_name, {})
return getattr(instance, cache_name)
@property
@wraps(fn)
def get_attr(self):
cache = get_cache(self)
if attr_name not in cache:
cache[attr_name] = fn(self)
return cache[attr_name]
return get_attr
return decorator
def reuse_doc(f):
"""Reuse the docstring from f on the decorated function
Parameters
----------
f : func or class
Desired func/class whose __doc__ you want to reuse
Returns
-------
out : decorator
Example
--------
Here we decorate class B with class A's docstring
>>> class A(object):
>>> '''I got A docstring'''
>>> def __init__(self):
>>> self.x = 10
>>> @reuse_doc(A)
>>> class B(A):
>>> pass
>>> B.__doc__ == 'I got A docstring'
"""
doc = f.__doc__
def decorator(fn):
fn.__doc__ = doc
return fn
return decorator
```
#### File: votesim/utilities/misc.py
```python
import os
import fnmatch
import errno
import collections
def detectfiles(folder1, pattern):
"""Recursively detect files in path of folder1 using a pattern as
recognized by fnmatch"""
matches = []
for root, dirnames, filenames in os.walk(folder1):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return matches
def create_file_dirs(filename):
"""
Construct directories for file recursively.
From stackoverflow
https://stackoverflow.com/questions/12517451/automatically-creating-directories-with-file-output
"""
if not os.path.exists(os.path.dirname(filename)):
try:
os.makedirs(os.path.dirname(filename))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
return
def execfile(filepath, globals=None, locals=None):
"""Replace python2 execfile, from stack overflow.
https://stackoverflow.com/questions/436198/what-is-an-alternative-to-execfile-in-python-3"""
if globals is None:
globals = {}
globals.update({
"__file__": filepath,
"__name__": "__main__",
})
with open(filepath, 'rb') as file:
exec(compile(file.read(), filepath, 'exec'), globals, locals)
# def create_dirs(path):
# """Create directories recursively"""
# if not os.path.exists(path):
# try:
# os.makedirs(path)
# except OSError as exc:
# if exc.errno != errno.EEXIST:
# raise
# return
def flatten_dict(d, parent_key='', sep='.'):
"""Flatten a nested dictionary of dictionaries.
Parameters
----------
d : dict
Dictionary of dictionaries to flatten
sep : str
Symbol used to separate appended key names
Returns
---------
out : dict
Flattened dictionary where all sub-dictionaries are flattened into out.
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.abc.MutableMapping):
items.extend(flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
def unflatten_dict(dictionary, sep='.'):
"""Unflatten a dictionary and convert into nested dictionaries
https://stackoverflow.com/questions/6037503/python-unflatten-dict
Parameters
-----------
d : dict
Dictionary
Returns
--------
out : dict
Unflattened dictionary including sub-dictionaries are unflattened.
"""
resultDict = dict()
for key, value in dictionary.items():
parts = key.split(sep)
d = resultDict
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return resultDict
```
#### File: votesim/utilities/recorder.py
```python
import logging
import math
from functools import wraps
from votesim.utilities import misc
logger = logging.getLogger(__name__)
def record_actions(name='_method_records', replace=False, exclude=()):
"""
Decord used to record method actions in object created in parent object.
Parameters
-----------
name : str (default '_method_records')
Name of RecordActionCache used to record method actions.
This shall be created in the parent object.
replace : bool (default = False)
- If False, append to the previously recorded arguments for the method
- If True, replace the previous recorded arguments for the method.
exclude : list[str]
Arguments to exclude from record.
Returns
-------
out : decorator
Function used to decorate class methods
Reset Record
-------------
Reset the record for object `a` using:
>>> del a._method_records
"""
def decorator(fn):
"""Cache a method's arguments in a dict created in parent object"""
funcname = fn.__name__
varnames = fn.__code__.co_varnames
def get_cache(instance):
"""Retrieve records cache of object"""
if not hasattr(instance, name):
cache = RecordActionCache()
setattr(instance, name, cache)
return getattr(instance, name)
@wraps(fn)
def func(self, *args, **kwargs):
"""Call function, record arguments"""
cache = get_cache(self)
argnum = len(args)
argvarnames = varnames[1 : argnum + 1]
kwargs0 = dict(zip(argvarnames, args))
kwargs0.update(kwargs)
# if funcname == 'set_strategy':
# import pdb
# pdb.set_trace()
for arg in exclude:
try:
kwargs0.pop(arg)
except KeyError:
pass
if replace:
cache.replace_record(funcname, kwargs0)
else:
cache.append_record(funcname, kwargs0)
return fn(self, *args, **kwargs)
return func
return decorator
def flatten_record_actions(a):
"""Flatten the records list generated from `record_actions`
Parameters
-----------
a : list
Attribute created by decorator `record_actions`
Returns
--------
out : dict
Dictionary of record, hopefully easier to read.
key is now 'number.funcname.argument'
"""
newdict = {}
for i, ai in enumerate(a):
funcname = ai[0]
adict = ai[1]
# Record data even if method has no arguments.
if len(adict) == 0:
newkey = '%s.%s.' % (i, funcname)
newdict[newkey] = None
# Record all arguments of method
for k, v in adict.items():
newkey = '%s.%s.%s' % (i, funcname, k)
newdict[newkey] = v
return newdict
class RecordActionCache(object):
"""
RecordActionCache records two kinds of method actions
- Method calls that are recorded sequentially
- Method calls that replace the input of previous calls
Attributes
----------
dict : dict
Dictionary containing all recorded method actions and arguments.
"""
def __init__(self):
self.reset()
return
def replace_record(self, funcname, kwargs):
"""Modify records that replace the previous method record"""
if funcname not in self._replace:
fnum = len(self._appended)
self._replace[funcname] = fnum
self.append_record(funcname, kwargs)
else:
fnum = self._replace[funcname]
self._appended[fnum] = (funcname, kwargs)
def append_record(self, funcname, kwargs):
"""Modify records that append to previous records"""
record = (funcname, kwargs)
self._appended.append(record)
def reset(self):
"""Clear all records"""
self._replace = {}
self._appended = []
return
@property
def dict(self):
"""dict of records.
For appended, ordered method calls:
- keys = callnum.funcname.kwarg
- values = keyword argument values
For replaced method calls:
- keys = funcname.kwarg
- value = keyword argument values
"""
d = flatten_record_actions(self._appended)
return d
def repeat(self, obj):
"""Repeat the recorded method calls on the input object.
Parameters
-----------
obj : object
Parent object of stored methods
"""
for (funcname, kwargs) in self._appended:
method = getattr(obj, funcname)
method(**kwargs)
return
@staticmethod
def run_dict(d, obj):
run_dict(d, obj)
def run_dict(d: dict, obj):
"""Take RecordActionCache.dict and convert it to RecordActionCache
data
Parameters
------------
d : dict
RecordActionCache.dict
obj : object
Parent object to re-run method calls.
"""
d = misc.unflatten_dict(d, sep='.')
d = filter_args(d)
logger.debug('running record for %s, %s', obj, d)
for key, value in d.items():
## Ordered append records start with numeric
if key[0].isnumeric():
keys = list(value.keys())
values = list(value.values())
funcname = keys[0]
kwargs = values[0]
else:
funcname = key
kwargs = value
method = getattr(obj, funcname)
### method calls with no arguments have argument named ''
### Filter them out.
try:
kwargs.pop('')
except KeyError:
pass
method(**kwargs)
return
def filter_args(d: dict) -> dict:
"""Filter arguments for invalid entries such as '' and nan.
This is a sub-function for `run_dict`. """
new = {}
for key, value in d.items():
keep = True
if key == '':
keep = False
elif hasattr(value, 'items'):
value = filter_args(value)
else:
try:
isnan = math.isnan(value)
if isnan:
keep = False
except TypeError:
pass
if keep:
new[key] = value
return new
```
#### File: votesim/votemethods/__init__.py
```python
from votesim.votemethods import (irv,
plurality,
score,
ranked,
tools,
condorcet,
condcalcs,
)
TYPE_RANK = 'rank'
TYPE_SCORE = 'score'
TYPE_RATE = 'rate'
TYPE_VOTE = 'vote'
SMITH_MINIMAX = 'smith_minimax'
RANKED_PAIRS = 'ranked_pairs'
BLACK = 'black'
COPELAND = 'copeland'
IRV = 'irv'
IRV_STV = 'irv_stv'
STV_GREGORY = 'stv_gregory'
TOP_TWO = 'top_two'
BORDA = 'borda'
ranked_methods = {}
ranked_methods[SMITH_MINIMAX] = condorcet.smith_minimax
ranked_methods[RANKED_PAIRS] = condorcet.ranked_pairs
ranked_methods[BLACK] = condorcet.black
ranked_methods[IRV] = irv.irv
ranked_methods[IRV_STV] = irv.irv_stv
ranked_methods[STV_GREGORY] = irv.stv_gregory
ranked_methods[TOP_TWO] = irv.top2runoff
ranked_methods[BORDA] = ranked.borda
ranked_methods[COPELAND] = condorcet.copeland
SCORE = 'score'
STAR = 'star'
REWEIGHTED_RANGE = 'rrv'
SEQUENTIAL_MONROE = 'seq_monroe'
MAJORITY_JUDGMENT = 'maj_judge'
SMITH_SCORE = 'smith_score'
PLURALITY = 'plurality'
scored_methods = {}
scored_methods[REWEIGHTED_RANGE] = score.reweighted_range
scored_methods[SEQUENTIAL_MONROE] = score.sequential_monroe
scored_methods[SCORE] = score.score
scored_methods[STAR] = score.star
scored_methods[MAJORITY_JUDGMENT] = score.majority_judgment
scored_methods[SMITH_SCORE] = condorcet.smith_score
APPROVAL100 = 'approval100'
APPROVAL75 = 'approval75'
APPROVAL50 = 'approval50'
APPROVAL25 = 'approval25'
SCORE5 = 'score5'
SCORE10 = 'score10'
STAR5 = 'star5'
STAR10 = 'star10'
rated_methods = {}
rated_methods[APPROVAL100] = score.approval100
rated_methods[APPROVAL75] = score.approval75
rated_methods[APPROVAL50] = score.approval50
rated_methods[APPROVAL25] = score.approval25
rated_methods[SCORE5] = score.score5
rated_methods[SCORE10] = score.score10
rated_methods[STAR5] = score.star5
rated_methods[STAR10] = score.star10
vote_methods = {}
vote_methods[PLURALITY] = plurality.plurality
all_methods = {}
all_methods.update(ranked_methods)
all_methods.update(scored_methods)
all_methods.update(rated_methods)
all_methods.update(vote_methods)
# eRunner is reliant on some of dict definitions. Import after.
from votesim.votemethods.voterunner import eRunner
def get_ballot_type(etype: str):
"""Retrieve ballot type of the election type.
Parameters
----------
etype : str
Election method name, see `all_methods.keys()` for all options.
Returns
-------
out : str
String of either
- 'rank'
- 'score'
- 'rate'
- 'vote'
"""
if etype in ranked_methods:
return TYPE_RANK
elif etype in scored_methods:
return TYPE_SCORE
elif etype in rated_methods:
return TYPE_RATE
elif etype in vote_methods:
return TYPE_VOTE
```
#### File: votesim/votemethods/plurality.py
```python
import numpy as np
import logging
from votesim.votemethods import tools
logger = logging.getLogger(__name__)
__all__ = ['plurality']
def plurality(data, numwin=1):
"""Run plurality election.
Parameters
----------
data : array shape (a, b)
Election scoring data, 0 to 1. If rating data is input, plurality will find
maximum rated candidate.
numwin : int
Number of winners. For numwin > 1, plurality turns into Single No Transferable
Vote multi-winner election.
Returns
-------
winners : array shape (numwin,)
Winning candidate indices
ties: array shaped(numties,)
If there are tied candidates, return candidate indices here.
If no ties, return empty array
results : array shaped(b,)
End vote count
"""
new = tools.getplurality(ratings=data)
sums = np.sum(new, axis=0)
winners, ties = tools.winner_check(sums, numwin=numwin)
output = {}
output['tally'] = sums
return winners, ties, output
def plurality1(data, numwin=1):
"""Run plurality election.
Parameters
----------
data : array shape (a, b)
Election scoring data, 0 to 1. If rating data is input, plurality will find
maximum rated candidate.
numwin : int
Number of winners. For numwin > 1, plurality turns into Single No Transferable
Vote multi-winner election.
Returns
-------
winners : array shape (numwin,)
Winning candidate indices
ties: array shaped(numties,)
If there are tied candidates, return candidate indices here.
If no ties, return empty array
results : array shaped(b,)
End vote count
"""
# Variable descriptions
# sums : array shape (b,)
# Vote totals for all candidates
# convert possible cardinal data to a single choice.
new = tools.getplurality(ratings=data)
# Get the winner
logger.debug('vote data new:')
logger.debug('\n%s' % new)
sums = np.sum(new, axis=0)
sums_out = sums.copy().astype(int)
logger.info('vote totals:')
logger.info(sums_out)
ranking = np.argsort(sums)[::-1]
logger.info('candidate # sorted list (first with most votes, last with least:')
logger.info(ranking)
# smax = sums[ranking[numwin - 1]]
# check for tie condition
# ties = np.where(sums == smax)[0]
# tienum = len(ties)
# logger.info('%s ties found' % tienum)
winners = []
# Loop through number of winners.
for j in range(numwin):
winners_left = numwin - len(winners)
candidate = ranking[j]
cvotes = sums[candidate]
logger.info('candidate #%d' % candidate)
logger.info(' - number of votes = %d' % cvotes)
ties = np.where(cvotes == sums)[0]
tienum = len(ties)
if tienum > winners_left:
logger.info(' - tie detected for winner slot #%d out of %d' % (j, numwin))
logger.info(' - tie candidates = %s', ties)
logger.info('winners=%s', winners)
return winners, ties, sums
logger.info(' - winner detected for candidate %d' % candidate)
winners.append(candidate)
# After winner has been added to list, zero out his votes
sums[candidate] = 0
logger.info('winners=%s', winners)
return winners, np.array([]), sums_out
```
#### File: votemethods/tests/test_irv.py
```python
import unittest
import logging
import numpy as np
from votesim.votemethods import irv
import votesim
logger = logging.getLogger(__name__)
class TestIRV(unittest.TestCase):
def test_tie(self):
print('TEST TIE #1')
d = [[1, 2,],
[2, 1,]]
winners1, ties1, h = irv.irv_stv(d, 1)
winners2, ties2, output = irv.irv(d, 1)
print('winners1', winners1)
print('winners2', winners2)
print('ties1', ties1)
print('ties2', ties2)
self.assertTrue(len(winners1) == 0)
self.assertTrue(len(winners2) == 0)
self.assertTrue(
np.all(np.in1d(ties1, ties2))
)
self.assertEqual(len(winners1), 0)
self.assertEqual(len(ties1), 2)
self.assertTrue(0 in ties1)
self.assertTrue(1 in ties1)
winners2, ties2, o = irv.irv(d, 1)
return
def test_tie2(self):
print('TEST TIE #2')
d = [[1,2,3],
[1,3,2]]
# winners, ties, h = irv.IRV_STV(d, 2)
winners, ties, h = irv.irv_stv(d, 2)
print('winners', winners)
print('ties', ties)
self.assertTrue(0 in winners)
return
def test_eliminate(self):
d = [[1, 2, 3, 4],
[1, 3, 2, 4],
[3, 2, 1, 4],
[2, 3, 1, 4],
[3, 0, 2, 1]]
d = np.array(d)
first_round = [
[1, 0, 2, 3],
[1, 0, 2, 3],
[2, 0, 1, 3],
[2, 0, 1, 3],
[3, 0, 2, 1],
]
second_round = [
[1, 0, 2, 0],
[1, 0, 2, 0],
[2, 0, 1, 0],
[2, 0, 1, 0],
[2, 0, 1, 0]]
third_round = [
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0],
[0, 0, 1, 0]]
first_round = np.array(first_round)
second_round = np.array(second_round)
logger.info('start votes\n%s', d)
logger.info(d)
d1, loser, ties, h = irv.irv_eliminate(d)
logger.info('1st round results\n%s', d1)
self.assertTrue(np.all(first_round == d1))
d2, loser, ties, h = irv.irv_eliminate(d1)
logger.info('2nd round results\n%s', d2)
self.assertTrue(np.all(second_round == d2))
d3, loser, ties, h = irv.irv_eliminate(d2)
logger.info('3rd round results\n%s', d3)
self.assertTrue(np.all(third_round == d3))
w, t, h = irv.irv_stv(d, numwin=1)
self.assertIn(2, w)
return
def test_stv(self):
print('TEST STV')
d = [[1, 2, 3, 4],
[1, 3, 2, 4],
[3, 2, 1, 4],
[2, 3, 1, 4],
[3, 0, 2, 1]]
d = np.array(d)
winners, ties, h = irv.irv_stv(d, 2)
self.assertTrue(0 in winners)
self.assertTrue(2 in winners)
return
def test_RCVReorder(self):
print('\nTEST RCV ReOrder')
a = [[1, 5, 2, 0, 4, 10],
[2, 3, 4, 5, 6, 7],
[0, 0, 0, 5, 6, 7]]
a = np.array(a)
b = irv.rcv_reorder(a)
correct = [
[1, 4, 2, 0, 3, 5],
[1, 2, 3, 4, 5, 6],
[0, 0, 0, 1, 2, 3]
]
correct = np.array(correct)
compare = np.all(correct == b)
self.assertTrue(compare)
return
def test_wiki(self):
"""
Test example from wikipedia, retrieved Dec 19, 2019.
Correct results taken from wikipedia (winner knoxville K)
https://en.wikipedia.org/wiki/Instant-runoff_voting
"""
# M N C K
d = [[1, 2, 3, 4]]*42 + \
[[4, 1, 2, 3]]*26 + \
[[4, 3, 1, 2]]*15 + \
[[4, 3, 2, 1]]*17
d = np.array(d)
winners, ties, output = irv.irv_stv(d, 1)
history = output['round_history']
# print('test wiki')
# print('winners=\n', winners)
# print('history=\n', history)
#
correct_history = [[42, 26, 15, 17],
[42, 26, 0, 32],
[42, 0, 0, 58]]
correct_history = np.array(correct_history)
self.assertTrue(np.all(correct_history == history))
self.assertEqual(winners[0], 3)
def test_irv2(self):
success_count = 0
fail_count = 0
# print('test_irv2 -- compared STV vs IRV')
rstate = np.random.RandomState()
for seed in range(60):
rstate.seed(seed)
ratings = rstate.rand(100, 5)
ranks = votesim.votemethods.tools.score2rank(ratings)
# print(seed)
w1, t1, o1 = irv.irv_stv(ranks)
w2, t2, o2 = irv.irv(ranks)
w1 = np.sort(w1)
w2 = np.sort(w2)
t1 = np.sort(t1)
t2 = np.sort(t2)
# print('Seed # %s' % seed)
success = np.all(w1 == w2) & np.all(t1 == t2)
# print('Methods same result?', success)
if success:
success_count += 1
else:
fail_count += 1
#
# print('FAILED METHOD IRV INPUT')
# print(ranks)
# print('\n\nRUNNING STV RESULTS')
#
# print('\n\nRUNNING IRV RESULTS')
#
# print('history')
# print(o1)
# print(o2['round_history'])
# print('winners=%s', w1)
# print('ties=%s', t1)
# print('winners=%s', w2)
# print('ties=%s', t2)
#
# print('# of successes =', success_count)
# print('# of fails =', fail_count)
self.assertTrue(fail_count == 0)
return
def test_irv_tie3(self):
d = [[5,2,1,4,3],
[3,5,2,1,4],
[2,3,1,5,4],
[2,3,5,1,4],
[5,4,1,3,2],
[3,2,5,4,1],
[1,4,3,5,2],
[5,2,3,1,4],
[3,5,4,2,1],
[1,4,3,2,5],
]
d = np.array(d)
w2, t2, o2 = irv.irv(d)
def test_stv_tie3(self):
d = [[5,2,1,4,3],
[3,5,2,1,4],
[2,3,1,5,4],
[2,3,5,1,4],
[5,4,1,3,2],
[3,2,5,4,1],
[1,4,3,5,2],
[5,2,3,1,4],
[3,5,4,2,1],
[1,4,3,2,5],
]
d = np.array(d)
w2, t2, o2 = irv.irv_stv(d)
def test_stv_tie4(self):
d = [[2,4,3,1,5]
,[5,2,1,4,3]
,[1,3,4,5,2]
,[1,3,5,2,4]
,[3,1,2,4,5]
,[2,5,3,1,4]
,[2,1,4,3,5]
,[3,1,5,2,4]
,[1,2,3,5,4]
,[3,2,5,4,1]]
d = np.array(d)
w2, t2, o2 = irv.irv_stv(d)
if __name__ == '__main__':
pass
# logging.basicConfig()
# logger = logging.getLogger('votesim.votemethods.irv')
# logger.setLevel(logging.DEBUG)
t = TestIRV()
# t.test_tie()
# unittest.main(exit=False)
# a = TestIRV()
# a.test_eliminate()
# a.test_irv2()
t.test_wiki()
# a.test_stv_tie4()
```
#### File: votesim/votemethods/voterunner.py
```python
import pdb
import logging
import numpy as np
from votesim.votemethods import tools
from votesim.votemethods import (
ranked_methods,
rated_methods,
scored_methods,
vote_methods,
# all_methods,
)
import traceback
__all__ = [
# 'ranked_methods',
# 'rated_methods',
# 'scored_methods',
# 'vote_methods',
# 'all_methods',
'eRunner',
]
logger = logging.getLogger(__name__)
# ranked_methods = {}
# ranked_methods['smith_minimax'] = condorcet.smith_minimax
# ranked_methods['ranked_pairs'] = condorcet.ranked_pairs
# ranked_methods['irv'] = irv.irv
# ranked_methods['irv_stv'] = irv.irv_stv
# ranked_methods['top_two'] = irv.top2runoff
# rated_methods = {}
# rated_methods['approval100'] = score.approval100
# rated_methods['approval75'] = score.approval75
# rated_methods['approval50'] = score.approval50
# rated_methods['approval25'] = score.approval25
# rated_methods['score5'] = score.score5
# rated_methods['score10'] = score.score10
# rated_methods['star5'] = score.star5
# rated_methods['star10'] = score.star10
# scored_methods = {}
# scored_methods['rrv'] = score.reweighted_range
# scored_methods['seq_monroe'] = score.sequential_monroe
# scored_methods['score'] = score.score
# scored_methods['star'] = score.star
# scored_methods['maj_judge'] = score.majority_judgment
# scored_methods['smith_score'] = condorcet.smith_score
# vote_methods = {}
# vote_methods['plurality'] = plurality.plurality
# all_methods = {}
# all_methods.update(ranked_methods)
# all_methods.update(scored_methods)
# all_methods.update(rated_methods)
# all_methods.update(vote_methods)
class eRunner(object):
"""Run the election & obtain results. For ties, randomly choose winner.
Parameters
----------
etype : str
Name of election type.
Mutually exclusive with `method` and `btype`
Supports the following election types:
- 'approval100' - approval voting, 100% acceptance of nonzero score
- 'approval50' - approval voting, 50% acceptance of nonzero score
- 'irv' -- Instant runoff.
- 'irv_stv' -- Instant runoff with single-transferable vote.
- 'rrv' -- Reweighted range voting.
- 'plurality' -- Traditional plurality & Single No Transferable Vote.
- 'sequential_monroe' -- PR scored method
- 'score' -- traditional score voting
- 'smith_minimax' - Smith minimax condorcet voting
- 'star' -- STAR voting variant of score
method : func
Voting method function. Takes in argument `data` array shaped (a, b)
for (voters, candidates) as well as additional kwargs.
Mutually exclusive with `etype`.
>>> out = method(data, numwin=self.numwinneres, **kwargs)
btype : str
Voting method's ballot type.
Mutually exclusive with `etype`, use with `method`.
- 'rank' -- Use candidate ranking from 1 (first place) to n (last plast), with 0 for unranked.
- 'score' -- Use candidate integer rating/scored method.
- 'vote' -- Use traditional, single-selection vote. Vote for one (1), everyone else zero (0).
- 'rating' -- Use raw ratings data.
numwinners : int
Number of winners to consider. Defaults to 1.
ballots : array shape (a, b)
Ballots to use in election.
seed : int or None or array-like
Seed to input into numpy RandomState.
rstate : RandomState
numpy.random.RandomState object
Attributes
----------
winners : array shape (c,)
Winning candidate indices, including broken ties.
winnners_no_ties : array shaped (e,)
Winning candidate indices without including randomly broken ties
ties : array shape (d,)
Tie candidate indices
output : dict
Election output
ballots : array shape (a, b)
Voter ballots
btype : str
Ballot type
"""
def __init__(self,
etype=None, method=None, btype=None,
numwinners=1, ballots=None,
seed=None, rstate=None, kwargs=None):
logger.debug('eRunner: etype=%s, method=%s', etype, method)
if ballots is None:
raise ValueError("ballots keyword must be specified")
ballots = np.copy(ballots)
if rstate is None:
rstate = np.random.RandomState(seed=seed)
if kwargs is None:
kwargs = {}
## Run canned election systems with prefilled parameters
if method is None:
if etype in ranked_methods:
btype = 'rank'
method = ranked_methods[etype]
elif etype in scored_methods:
btype = 'score'
method = scored_methods[etype]
elif etype in rated_methods:
btype = 'rating'
method = rated_methods[etype]
elif etype in vote_methods:
btype = 'vote'
method = vote_methods[etype]
else:
raise ValueError('%s type not a valid voting method.' % etype)
# Check if 'seed' is a keyword argument and therefore the voting
# method may need random number generation.
argnum = method.__code__.co_argcount
fargs = method.__code__.co_varnames[0 : argnum]
if 'rstate' in fargs:
kwargs['rstate'] = rstate
elif 'seed' in fargs:
kwargs['seed'] = seed
if 'numwin' in fargs:
kwargs['numwin'] = numwinners
# Check for empty ballot
if np.all(ballots == 0):
winners = np.array([])
ties = np.arange(ballots.shape[1])
output = None
else:
# Run the election method
try:
out1 = method(ballots, **kwargs)
except Exception as exc:
logging.error('Voting method %s failed.', method)
logging.error('Ballots = ')
logging.error(ballots)
logging.error(traceback.format_exc())
raise exc
winners = out1[0]
ties = out1[1]
try:
output = out1[2]
except IndexError:
output = None
######################################################################
self.winners_no_ties = winners
try:
winners = tools.handle_ties(winners, ties, numwinners, rstate=rstate)
except:
pdb.set_trace()
# winners = tools.handle_ties(winners, ties, numwinners, rstate=rstate)
self.etype = etype
self.winners = winners
self.ties = ties
self.output = output
self.ballots = ballots
self.btype = btype
self._method = method
self._kwargs = kwargs
return
``` |
{
"source": "johnh865/globalcache",
"score": 3
} |
#### File: globalcache/globalcache/__init__.py
```python
from globalcache.cache import Cache, Settings
``` |
{
"source": "johnhalbert/oso",
"score": 2
} |
#### File: expenses-flask/app/authorization.py
```python
from dataclasses import dataclass
from flask import current_app, g, request, Blueprint
from oso import Oso, OsoError
from oso.extras import Http
from werkzeug.exceptions import BadRequest, Forbidden
bp = Blueprint("authorization", __name__)
@bp.before_app_request
def authorize_request():
"""Authorize the incoming request"""
http = Http(path=request.path)
if not current_app.oso.is_allowed(g.current_user, request.method, http):
return Forbidden("Not Authorized!")
def authorize(action, resource):
"""Authorize whether the current user can perform `action` on `resource`"""
if current_app.oso.is_allowed(g.current_user, action, resource):
return resource
else:
raise Forbidden("Not Authorized!")
def init_oso(app):
from .expense import Expense
from .organization import Organization
from .user import Actor, Guest, User
oso = Oso()
oso.register_class(Actor)
oso.register_class(Guest)
oso.register_class(User)
oso.register_class(Expense)
oso.register_class(Organization)
for policy in app.config.get("OSO_POLICIES", []):
oso.load_file(policy)
app.oso = oso
```
#### File: django-oso/django_oso/partial.py
```python
from django.db.models import Q, Model
from django.apps import apps
from polar.expression import Expression
from polar.variable import Variable
from polar.exceptions import UnsupportedError, UnexpectedPolarTypeError
from .oso import polar_model_name, django_model_name
def partial_to_query_filter(partial: Expression, model: Model, **kwargs):
"""
Convert a partial expression to a django query ``Q`` object.
Example expression structure::
Expression(And, [
Expression(Isa, [
Variable('_this'),
Pattern(test_app::Post, {})]),
Expression(Isa, [
Variable('_this'),
Pattern(test_app::Post, {})]),
Expression(Unify, [
False,
Expression(
Dot, [
Variable('_this'),
'is_private'])])])
Output::
Q(is_private=False)
"""
q = translate_expr(partial, model, **kwargs)
if q is None:
return Q()
return q
COMPARISONS = {
"Unify": lambda q, f, v: Q(**{f: v}),
"Eq": lambda q, f, v: Q(**{f: v}),
"Neq": lambda q, f, v: ~Q(**{f: v}),
"Geq": lambda q, f, v: Q(**{f"{f}__gte": v}),
"Gt": lambda q, f, v: Q(**{f"{f}__gt": v}),
"Leq": lambda q, f, v: Q(**{f"{f}__leq": v}),
"Lt": lambda q, f, v: Q(**{f"{f}__lt": v}),
}
def translate_expr(expr: Expression, model: Model, **kwargs):
"""Translate a Polar expression to a Django Q object."""
assert isinstance(expr, Expression), "expected a Polar expression"
if expr.operator in COMPARISONS:
return compare_expr(expr, model, **kwargs)
elif expr.operator == "And":
return and_expr(expr, model, **kwargs)
elif expr.operator == "Isa":
return isa_expr(expr, model, **kwargs)
elif expr.operator == "In":
return in_expr(expr, model, **kwargs)
else:
raise UnsupportedError(f"Unimplemented partial operator {expr.operator}")
def isa_expr(expr: Expression, model: Model, **kwargs):
(left, right) = expr.args
for attr in dot_op_path(left):
model = getattr(model, attr).field.related_model
constraint_type = apps.get_model(django_model_name(right.tag))
if not issubclass(model, constraint_type):
# Always false.
return Q(pk__in=[])
else:
# Always true.
return None
def and_expr(expr: Expression, model: Model, **kwargs):
assert expr.operator == "And"
q = Q()
for arg in expr.args:
expr = translate_expr(arg, model, **kwargs)
if expr:
q = q & expr
return q
def compare_expr(expr: Expression, _model: Model, path=(), **kwargs):
q = Q()
(left, right) = expr.args
left_path = dot_op_path(left)
if left_path:
return COMPARISONS[expr.operator](q, "__".join(path + left_path), right)
else:
if isinstance(right, Model):
right = right.pk
else:
raise UnsupportedError(f"Unsupported comparison: {expr}")
return COMPARISONS[expr.operator](q, "__".join(path + ("pk",)), right)
def in_expr(expr: Expression, model: Model, path=(), **kwargs):
assert expr.operator == "In"
q = Q()
(left, right) = expr.args
right_path = dot_op_path(right)
assert right_path, "RHS of in must be a dot lookup"
right_path = path + right_path
if isinstance(left, Expression):
return translate_expr(left, model, path=right_path, **kwargs)
else:
return COMPARISONS["Unify"](q, "__".join(right_path), left)
# TODO (dhatch): Move this helper into base.
def dot_op_path(expr):
"""Get the path components of a lookup.
The path is returned as a tuple.
_this.created_by => ('created_by',)
_this.created_by.username => ('created_by', 'username')
Empty tuple is returned if input is not a dot operation.
"""
if not (isinstance(expr, Expression) and expr.operator == "Dot"):
return ()
assert len(expr.args) == 2
if expr.args[0] == Variable("_this"):
return (expr.args[1],)
return dot_op_path(expr.args[0]) + (expr.args[1],)
```
#### File: sqlalchemy-oso/sqlalchemy_oso/auth.py
```python
from oso import Oso
from polar.partial import Partial, TypeConstraint
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.util import class_mapper
from sqlalchemy.sql import expression as sql
from sqlalchemy_oso.partial import partial_to_filter
def null_query(session: Session, model) -> Query:
"""Return an intentionally empty query."""
# TODO (dhatch): Make this not hit the database.
return session.query(model).filter(sql.false())
def register_models(oso: Oso, base):
"""Register all models in model base class ``base`` with oso as classes."""
# TODO (dhatch): Not sure this is legit b/c it uses an internal interface?
for name, model in base._decl_class_registry.items():
if name == "_sa_module_registry":
continue
oso.register_class(model)
def authorize_model(oso: Oso, actor, action, session: Session, model) -> Query:
"""Return a query containing filters that apply the policy to ``model``.
Executing this query will return only authorized objects. If the request is
not authorized, a query that always contains no result will be returned.
:param oso: The oso class to use for evaluating the policy.
:param actor: The actor to authorize.
:param action: The action to authorize.
:param session: The SQLAlchemy session.
:param model: The model to authorize, must be a SQLAlchemy model.
"""
filters = authorize_model_filter(oso, actor, action, session, model)
if filters is None:
return session.query(model)
return session.query(model).filter(filters)
def authorize_model_filter(oso: Oso, actor, action, session: Session, model):
"""Return SQLAlchemy expression that applies the policy to ``model``.
Executing this query will return only authorized objects. If the request is
not authorized, a query that always contains no result will be returned.
:param oso: The oso class to use for evaluating the policy.
:param actor: The actor to authorize.
:param action: The action to authorize.
:param session: The SQLAlchemy session.
:param model: The model to authorize, must be a SQLAlchemy model.
"""
# TODO (dhatch): Check that model is a model.
# TODO (dhatch): More robust name mapping?
assert class_mapper(model), f"Expected a model; received: {model}"
partial_resource = Partial("resource", TypeConstraint(model.__name__))
results = oso.query_rule("allow", actor, action, partial_resource)
combined_filter = None
has_result = False
for result in results:
has_result = True
resource_partial = result["bindings"]["resource"]
filter = partial_to_filter(resource_partial, session, model)
if combined_filter is None:
combined_filter = filter
elif filter is not None:
combined_filter = combined_filter | filter
if not has_result:
return sql.false()
return combined_filter
```
#### File: sqlalchemy-oso/tests/test_partial.py
```python
from polar.expression import Expression
from polar.variable import Variable
from sqlalchemy_oso.partial import dot_op_path
def test_dot_op_path():
single = Expression("Dot", [Variable("_this"), "created_by"])
assert dot_op_path(single) == ["created_by"]
double = Expression("Dot", [single, "username"])
assert dot_op_path(double) == ["created_by", "username"]
triple = Expression("Dot", [double, "first"])
assert dot_op_path(triple) == ["created_by", "username", "first"]
``` |
{
"source": "johnhalloran321/crux-toolkit",
"score": 4
} |
#### File: crux-toolkit/bin/ppnet.py
```python
import sys
import os
import subprocess
PEPTIDE_COLUMN = "sequence"
PROTEIN_COLUMN = "protein id"
USAGE = """USAGE: ppnet.py [options] <file> <root>
Takes as input a Crux text output file, and produces as output a
series of plots showing connected components in the bipartite graph
that connects peptides to proteins.
The input file is tab-delimited, with peptides in a column named
"%s" and comma-delimited lists of protein IDs in a column
named "%s". Specifying "-" will read from stdin.
The output is a series of pairs of files with names of the form
<root>.<int>.gvz and <root>.<int>.png, where <root> is given on the
command line and <int> is an ascending integer. The gvz file
contains a graphviz description of one component of the graph, and
the png file contains a picture of the graph. An HTML file named
<root>.html is also created, showing all of the PNG images.
Options:
--min-nodes <int> Skip components with fewer than <int> nodes.
--min-peptides <int> Skip components with fewer than <int> peptides.
--min-proteins <int> Skip components with fewer than <int> proteins.
--protein-mapping <file> Convert protein names.
""" % (PEPTIDE_COLUMN, PROTEIN_COLUMN)
# Define the graph data structure.
# N.B. These are global variables.
edges = {} # Key = peptide sequence or protein ID, value = list of
# peptide sequences or protein IDs
isPrinted = {}# Key = peptide sequence, protein ID or (peptide, protein) pair.
# Value = True
###############################################################################
# Add one edge to the graph. Each edge is represented twice (once for each
# direction) with a node as the key and a list of nodes as the value.
def addEdge(node1, node2):
global edges
if (edges.has_key(node1)):
edges[node1].append(node2)
else:
edges[node1] = [node2]
if (edges.has_key(node2)):
edges[node2].append(node1)
else:
edges[node2] = [node1]
###############################################################################
def printConnectedPeptides(startProtein, graphvizString, stats):
global edges, isPrinted
if (startProtein not in isPrinted):
graphvizString = "%s\"%s\";\n" % (graphvizString, startProtein)
isPrinted[startProtein] = True # Mark this protein as printed.
stats[0] += 1
for peptide in edges[startProtein]:
if ((peptide, startProtein) not in isPrinted):
graphvizString = "%s\"%s\" -- \"%s\";\n" % (graphvizString,
startProtein, peptide)
isPrinted[(peptide, startProtein)] = True # Mark this edge as printed.
stats[2] += 1
graphvizString = printConnectedProteins(peptide, graphvizString, stats)
return(graphvizString)
###############################################################################
def printConnectedProteins(startPeptide, graphvizString, stats):
global edges, isPrinted
if (startPeptide not in isPrinted):
graphvizString = "%s\"%s\";\n" % (graphvizString, startPeptide)
isPrinted[startPeptide] = True # Mark this peptide as printed.
stats[1] += 1
for proteinID in edges[startPeptide]:
if ((startPeptide, proteinID) not in isPrinted):
graphvizString = "%s\"%s\" -- \"%s\";\n" % (graphvizString,
proteinID, startPeptide)
isPrinted[(startPeptide, proteinID)] = True # Mark this edge as printed.
stats[2] += 1
graphvizString = \
printConnectedPeptides(proteinID, graphvizString, stats)
return(graphvizString)
#############################################################################
# Run a command with error checking.
def runCommand(command):
sys.stderr.write("RUN: %s\n" % command)
try:
returnCode = subprocess.call(command, shell=True)
if (returnCode != 0):
sys.stderr.write("Child was terminated by signal %d\n" % -returnCode)
sys.exit(1)
except OSError, e:
sys.stderr.write("Execution failed: %s\n" % e)
sys.exit(1)
###############################################################################
# MAIN
###############################################################################
def main():
global USAGE, PROTEIN_COLUMN, PEPTIDE_COLUMN
global edges, isPrinted
# Parse the command line.
minNodes = 1
minPeptides = 1
minProteins = 1
proteinMappingFileName = ""
sys.argv = sys.argv[1:]
while (len(sys.argv) > 2):
nextArg = sys.argv[0]
sys.argv = sys.argv[1:]
if (nextArg == "--min-nodes"):
minNodes = int(sys.argv[0])
sys.argv = sys.argv[1:]
elif (nextArg == "--min-peptides"):
minPeptides = int(sys.argv[0])
sys.argv = sys.argv[1:]
elif (nextArg == "--min-proteins"):
minProteins = int(sys.argv[0])
sys.argv = sys.argv[1:]
elif (nextArg == "--protein-mapping"):
proteinMappingFileName = sys.argv[0]
sys.argv = sys.argv[1:]
else:
sys.stderr.write("Invalid option (%s).\n" % nextArg)
sys.exit(1)
if (len(sys.argv) != 2):
sys.stderr.write(USAGE)
sys.exit(1)
inputFileName = sys.argv[0]
outputFileRoot = sys.argv[1]
# If provided, read the protein mapping into a dictionary.
proteinMapping = {} # Key = old name, value = new name
if (proteinMappingFileName != ""):
proteinMappingFile = open(proteinMappingFileName, "r")
for line in proteinMappingFile:
(oldName, newName) = line.rstrip().split()
proteinMapping[oldName] = newName
proteinMappingFile.close()
# Read the header line and identify the target columns.
if (inputFileName == "-"):
inputFile = sys.stdin
else:
inputFile = open(inputFileName, "r")
headerLine = inputFile.readline().rstrip()
colIndex = 0
peptideColumn = -1
proteinColumn = -1
for column in headerLine.split("\t"):
if (column == PEPTIDE_COLUMN):
peptideColumn = colIndex
elif (column == PROTEIN_COLUMN):
proteinColumn = colIndex
colIndex += 1
if (peptideColumn == -1):
sys.stderr.write("Cannot find column with header %s.\n" % PEPTIDE_COLUMN)
sys.exit(1)
sys.stderr.write("Reading peptides from column %d.\n" % peptideColumn)
if (proteinColumn == -1):
sys.stderr.write("Cannot find column with header %s.\n" % PROTEIN_COLUMN)
sys.exit(1)
sys.stderr.write("Reading protein IDs from column %d.\n" % proteinColumn)
# Nodes of the graph.
peptides = {} # Key = peptide sequence, value = True
proteins = {} # Key = protein ID, value = True
# N.B. Edges are stored in a global variable.
# Read the graph from the input file.
lineNum = 0
numEdges = 0
for line in inputFile:
line = line.rstrip()
words = line.split("\t")
peptideSequence = words[peptideColumn]
proteinIDs = words[proteinColumn].split(",")
peptides[peptideSequence] = False
for proteinID in proteinIDs:
proteinID = proteinID.split("(")[0] # Get rid of (<int>) on each ID.
if (proteinID in proteinMapping):
proteinID = proteinMapping[proteinID]
proteins[proteinID] = False
addEdge(peptideSequence, proteinID)
numEdges += 1
lineNum += 1
inputFile.close()
sys.stderr.write("Read %d peptides, %d proteins and %d edges from %s.\n"
% (len(peptides), len(proteins), numEdges, inputFileName))
# Initialize the HTML
htmlFileName = "%s.html" % outputFileRoot
htmlFile = open(htmlFileName, "w")
htmlFile.write("<html><body>\n")
graphNumber = 0
for startProtein in proteins:
if (startProtein not in isPrinted):
sys.stderr.write("Starting with %s.\n" % startProtein)
# Initialize a counter of number of proteins, peptides, edges.
stats = [0, 0, 0]
# Create the graphviz-formated graph.
graphvizString = "graph G {\n"
graphvizString = printConnectedPeptides(startProtein,
graphvizString, stats)
graphvizString = "%s}\n" % graphvizString
# Is this component big enough?
if ((stats[0] + stats[1] >= minNodes) and
(stats[0] >= minProteins) and
(stats[1] >= minPeptides)):
# Print the graphviz file.
graphFileName = "%s.%d.gvz" % (outputFileRoot, graphNumber)
graphFile = open(graphFileName, "w")
graphFile.write(graphvizString)
graphFile.close()
# Create the PNG file.
pngFileName = "%s.%d.png" % (outputFileRoot, graphNumber)
runCommand("dot -Tpng %s > %s" % (graphFileName, pngFileName))
# Add it to the HTML page.
htmlFile.write("<img src=\"%s\"><br>\n" % pngFileName)
message = "%d: %d proteins, %d peptides, %d edges.\n" % \
(graphNumber, stats[0], stats[1], stats[2])
htmlFile.write("%s<hr></hr>\n" % message)
sys.stderr.write(message)
graphNumber += 1
else:
sys.stderr.write("Skipping component with %d proteins and %d peptides.\n"
% (stats[0], stats[1]))
sys.stderr.write("Printed %d graphs.\n" % graphNumber)
htmlFile.write("</body></html>\n")
htmlFile.close()
if __name__ == "__main__":
main()
```
#### File: bin/python/compare_score.py
```python
from pylab import *
import os.path
import commands
import sys
from optparse import OptionParser
from parse_sqt_file import SqtObject
from parse_sqt_file import Spectrum
from parse_sqt_file import Peptide
#-------------------
def plot_compare_data(result_array, score_type="sp"):
""" plots the results of two compared as a scattered plot """
prefix = "Sequest-vs-CRUX-for-" + score_type
title(prefix, size=20)
xlabel("Sequest", size=15)
ylabel("CRUX", size=15)
# plot each point
#for (sequest, crux) in result_array:
scatter(result_array[0], result_array[1])
# plot y=x
t = range(0, 300)
y = t
plot(t, y, color='r')
#legend(loc='lower right')
axis([0, 300, 0, 300])
xticks(size=10)
yticks(size=10)
savefig(prefix + ".eps")
savefig(prefix + ".png")
#-------------------
def plot_compare_rank(result_array, score_type="sp"):
prefix = "Sequest_Xcore_rank-vs-CRUX-for-" + score_type
title(prefix, size=20)
xlabel("Sequest Xcore rank", size=15)
ylabel("CRUX", size=15)
# plot each point
#for (sequest, crux) in result_array:
scatter(result_array[0], result_array[1])
# plot y=x
#t = range(0, 300)
#y = t
#plot(t, y, color='r')
#legend(loc='lower right')
axis([0, 300, 0, 300])
xticks(size=10)
yticks(size=10)
savefig(prefix + ".eps")
savefig(prefix + ".png")
#-------------------
# Process command line options
usage = "Usage: compare_score <score_type> <sqt_file> <ms2_file>"
option_parser = OptionParser(usage)
(options, args) = option_parser.parse_args()
if not len(args) == 3:
print usage
sys.exit(1)
#set sp_score type
score_type = args[0]
# add more score
if not (score_type == "sp" or score_type == "xcorr"):
print usage
sys.exit(1)
#check if sqt file and ms2 can be accesseed
sqt_file = args[1]
ms2_file = args[2]
sqt_object = SqtObject(sqt_file)
if sqt_object == None:
sys.exit(1)
result_array = ([],[])
result_array2 = ([],[])
totalCount = 0
for working_spectrum in sqt_object.spectrums:
if totalCount >= 500:
break
scanNum = working_spectrum.fields["scan"]
charge = working_spectrum.fields["charge"]
for working_peptide in working_spectrum.peptides:
(exit_code, result) = \
commands.getstatusoutput("score_peptide_spectrum " + \
"--charge " + `charge` + " --score-type " + score_type + " " + \
working_peptide.components["sequence"] + " " + \
`scanNum` + " " + \
ms2_file
) #add in parameters
if exit_code == "1":
print "failed to run score_peptide_spectrum"
sys.exit(1)
#(real result, result) store in result array
#for line in result:
# if line.startswith('I'):
# print line
# continue
# elif line.startswith('S'):
print result
result = result.split(': ')
#print result
#if working_peptide.components["xcore_rank"] < 10:
#if abs(working_peptide.components[score_type] - float(result[1])) > 10:
result_array[0].append(working_peptide.components[score_type])
result_array[1].append(float(result[1]))
# result_array2[0].append(working_peptide.components["xcore_rank"])
# result_array2[1].append(float(result[2]))
#totalCount += 1
if abs(working_peptide.components[score_type] - float(result[1])) > 0.09:
print "Sequest: %.1f, CRUX: %.1f, sequence: %s" % (working_peptide.components[score_type], float(result[1]),working_peptide.components["sequence"])
#print "Scan number: %s, charge: %s" % scanNum, charge
totalCount += 1
if totalCount % 10 == 0:
print "totalCount: %d" % totalCount
#break
#plot the data
plot_compare_data(result_array, score_type)
```
#### File: bin/python/crux-generate-decoys.py
```python
import sys
import random
# Minimum tryptic peptide length.
MIN_PEPTIDE = 7
# Maximum number of times to attempt to shuffle each peptide.
NUM_SHUFFLES = 10
USAGE = """USAGE: crux-generate-decoys.py <file> <output>
This program takes as input a protein FASTA file and produces as
output four files:
- <output>.peptide.target.txt: A list of tryptic peptides (using
the simple KR cleavage rule) derived from the proteins.
- <output>.peptide.decoy.txt: A matched list of shuffled tryptic
peptides, where the N-term and C-term amino acids remain in
place.
- <output>.protein.decoy.fa: A decoy protein FASTA file that
matches the input file, but with shuffled peptides in place of
the original peptides.
- <output>.log.txt: A log file with various information about the
run.
The program attempts to ensure that there are no duplicate peptides
in the union of the target and decoy peptide lists. Peptides for
which a decoy is not successfully created (e.g., homopolymers) are
indicated in the log file. Peptides shorter than %d amino acids are
not shuffled.
""" % MIN_PEPTIDE
##############################################################################
def read_fasta_sequence (fasta_file):
# Read 1 byte.
first_char = fasta_file.read(1)
# If it's empty, we're done.
if (first_char == ""):
return(["", ""])
# If it's ">", then this is the first sequence in the file.
elif (first_char == ">"):
line = ""
else:
line = first_char
# Read the rest of the header line.
line = line + fasta_file.readline()
# Get the rest of the ID.
words = line.split()
if (len(words) == 0):
sys.stderr.write("No words in header line (%s)\n" % line)
sys.exit(1)
id = words[0]
# Read the sequence, through the next ">".
first_char = fasta_file.read(1)
sequence = ""
while ((first_char != ">") and (first_char != "")):
if (first_char != "\n"): # Handle blank lines.
line = fasta_file.readline()
sequence = sequence + first_char + line
first_char = fasta_file.read(1)
# Remove EOLs.
clean_sequence = ""
for letter in sequence:
if (letter != "\n"):
clean_sequence = clean_sequence + letter
sequence = clean_sequence
# Remove spaces.
clean_sequence = ""
for letter in sequence:
if (letter != " "):
clean_sequence = clean_sequence + letter
return([id, sequence.upper()])
##############################################################################
# Write a message both to stderr and a globally defined log file.
def log(message):
sys.stdout.write(message)
logFile.write(message)
##############################################################################
# Convert an amino acid sequence into a list of tryptic peptides,
# cleaving at every K or R (irrespective of P).
def cleaveTryptically(sequence):
returnValue = []
peptideStart = 0
for position in range(0, len(sequence)):
if (sequence[position] == "K") or (sequence[position] == "R"):
returnValue.append(sequence[peptideStart:position+1])
peptideStart = position+1
returnValue.append(sequence[peptideStart:position+1])
#log("%s -> %s\n" % (sequence, "|".join(returnValue)))
return(returnValue)
##############################################################################
### MAIN
##############################################################################
# Parse the command line.
if (len(sys.argv) != 3):
sys.stderr.write(USAGE)
sys.exit(1)
inputFileName = sys.argv[1]
root = sys.argv[2]
# Open the log file for output.
logFileName = "%s.log.txt" % root
logFile = open(logFileName, "w")
# Ordered list of protein IDs.
proteinIDs = []
proteinSeqs = {} # Key = ID, value = list of tryptic peptides
targetPeptides = {} # Key = peptide, value = True
# Read the file sequence by sequence.
inputFile = open(inputFileName, "r")
[id, sequence] = read_fasta_sequence(inputFile)
while (id != ""):
proteinIDs.append(id)
proteinSeqs[id] = cleaveTryptically(sequence)
# Read the target peptides into a hash.
for peptide in proteinSeqs[id]:
targetPeptides[peptide] = True
# Read the next sequence.
[id, sequence] = read_fasta_sequence(inputFile)
inputFile.close()
log("Read %d proteins and %d peptides from %s.\n" %
(len(proteinIDs), len(targetPeptides), inputFileName))
# Open the peptide output lists.
targetPeptideFile = open("%s.peptide.target.txt" % root, "w")
decoyPeptideFile = open("%s.peptide.decoy.txt" % root, "w")
# Create the decoys.
decoyPeptides = {} # Key = peptide, value = True
targetToDecoy = {} # Key = target peptide, value = decoy peptide
for targetPeptide in targetPeptides.keys():
# Don't bother with short peptides.
if (len(targetPeptide) <= MIN_PEPTIDE):
continue
success = False
for shuffle in range(0, NUM_SHUFFLES):
decoyList = list(targetPeptide)[1:-1] # Don't shuffle terminal AAs.
random.shuffle(decoyList)
decoyList.insert(0,targetPeptide[0])
decoyList.append(targetPeptide[-1])
decoyPeptide = ''.join(decoyList)
if ((not targetPeptides.has_key(decoyPeptide)) and
(not decoyPeptides.has_key(decoyPeptide))):
decoyPeptides[decoyPeptide] = True
success = True
break
if not success:
if targetPeptides.has_key(decoyPeptide):
log("overlap decoy %s\n" % decoyPeptide)
if decoyPeptides.has_key(decoyPeptide):
log("duplicate decoy %s\n" % decoyPeptide)
continue
targetPeptideFile.write("%s\n" % targetPeptide)
decoyPeptideFile.write("%s\n" % decoyPeptide)
targetToDecoy[targetPeptide] = decoyPeptide
log("Printed %d peptides.\n" % len(targetToDecoy))
# Print the decoy proteins.
decoyProteinFileName = "%s.protein.decoy.fa" % root
decoyProteinFile = open(decoyProteinFileName, "w")
for proteinID in proteinIDs:
decoyProteinFile.write(">%s\n" % proteinID)
for targetPeptide in proteinSeqs[proteinID]:
if targetPeptide in targetToDecoy:
decoyProteinFile.write("%s" % targetToDecoy[targetPeptide])
else:
decoyProteinFile.write("%s" % targetPeptide)
decoyProteinFile.write("\n")
decoyProteinFile.close()
log("Printed %d decoy proteins to %s.\n" %
(len(proteinIDs), decoyProteinFileName))
logFile.close()
```
#### File: test/calibration/make-qq-plot.py
```python
import sys
import os
import math
usage = """USAGE: make-qq-plot.py <p-values> <root>
Compare a given set of p-values to the uniform distribution by
creating a QQ plot with log-log axes. The program outputs three
files: a gnuplot script (<root>.gnuplot), the data to be plotted
(<root>.txt) and the plot itself (<root>.png). Note that the stored
values are downsampled to avoid having too many points in the plot.
Options:
--no-log-scale
--column-header <string> Header of column from which to get p-values.
--minus-natural-log Input values are negative log base e.
--format png|eps (default=png)
--fontsize <int> (only effective with "-format eps")
--title <title>
If the p-value file is specified as "-", then the program reads from
standard input.
"""
###############################################################################
# Find a given word in a tab-delimited string of words.
# Return the index.
def findWord(header, word):
words = header.split("\t")
for index in range(0, len(words)):
if (words[index] == word):
return(index)
sys.stderr.write("Can't find %s in %s.\n" % (word, header))
sys.exit(1)
###############################################################################
# MAIN
###############################################################################
# Set default values.
log_scale = 1
column_header = ""
log_values = 0
file_format = "png"
font_size = 24
title = ""
# Parse the command line.
sys.argv = sys.argv[1:]
while (len(sys.argv) > 2):
next_arg = sys.argv[0]
sys.argv = sys.argv[1:]
if (next_arg == "--no-log-scale"):
log_scale = 0
elif (next_arg == "--column-header"):
column_header = sys.argv[0]
sys.argv = sys.argv[1:]
elif (next_arg == "--minus-natural-log"):
log_values = 1
elif (next_arg == "--format"):
file_format = sys.argv[0]
sys.argv = sys.argv[1:]
elif (next_arg == "--fontsize"):
font_size = int(sys.argv[0])
sys.argv = sys.argv[1:]
elif (next_arg == "--title"):
title = sys.argv[0]
sys.argv = sys.argv[1:]
else:
sys.stderr.write("Invalid option (%s).\n" % next_arg)
sys.exit(1)
if (len(sys.argv) != 2):
sys.stderr.write(usage)
sys.exit(1)
pvalue_filename = sys.argv[0]
fileroot = sys.argv[1]
# Open the file for reading.
if (pvalue_filename == "-"):
pvalue_file = sys.stdin
else:
pvalue_file = open(pvalue_filename, "r")
# If a header string was specified, find the relevant column.
if (column_header != ""):
header = pvalue_file.readline().rstrip()
column_index = findWord(header, column_header)
sys.stderr.write("Reading p-values from column %d.\n" % column_index)
else:
column_index = 0
# Read the p-values from the specified column.
pvalues = []
numZeroes = 0
for line in pvalue_file:
line = line.rstrip()
words = line.split("\t")
# Skip comment lines.
if (line[0] == "#"):
continue
# Crash if the line is too short.
if (len(words) <= column_index):
sys.stderr.write("Too few columns (%d < %d).\n%s\n"
% (len(words), column_index, line))
sys.exit(1)
# Skip NaNs.
if ((words[column_index] == "NaN") or
(words[column_index] == "nan")):
continue
pvalue = float(words[column_index])
if (log_values):
pvalue = math.exp(-1.0 * pvalue)
# Count zero p-values.
if (pvalue == 0):
numZeroes += 1
# Store this p-value.
pvalues.append(pvalue)
pvalue_file.close()
num_pvalues = len(pvalues)
if (numZeroes != 0):
sys.stderr.write("Warning: Found %d zero p-values.\n" % numZeroes)
sys.stderr.write("Read %d p-values from %s.\n" % (num_pvalues,
pvalue_filename))
# Sort the values.
pvalues.sort()
# Open the data file.
data_filename = "%s.txt" % fileroot
data_file = open(data_filename, "w")
sys.stderr.write("Creating %s.\n" % data_filename)
# We will only print with this density along the x-axis.
if (log_scale):
increment = 0.01
else:
increment = 0.001
current_value = 0
# Print the values to a file.
rank = 1.0
num_printed = 0
for pvalue in pvalues:
if (log_scale):
new_value = math.log(rank / num_pvalues)
else:
new_value = rank / num_pvalues
if (current_value == 0) or (new_value >= current_value + increment):
data_file.write("%g\t%g\n" % (rank / num_pvalues, pvalue))
current_value = new_value
num_printed += 1
rank += 1.0
data_file.close()
sys.stderr.write("Printed %d p-values.\n" % num_printed)
# Find the first non-zero p-value.
for index in range(0, len(pvalues)):
min_pvalue = pvalues[index]
if (min_pvalue != 0):
break
# Set the range.
sys.stderr.write("Minimum p-value=%g\n" % min_pvalue)
if (1.0 / num_pvalues < min_pvalue):
min_pvalue = 1.0 / num_pvalues
sys.stderr.write("Minimum rank p-value=%g\n" % min_pvalue)
if (min_pvalue == 0):
min_value = "1e-10"
else:
min_value = "1e%d" % (int(math.log(min_pvalue, 10.0)) - 1)
sys.stderr.write("Minimum x-axis value=%s\n" % min_value)
# Open the gnuplot file.
gnuplot_filename = "%s.gnuplot" % fileroot
gnuplot_file = open(gnuplot_filename, "w")
sys.stderr.write("Creating %s.\n" % gnuplot_filename)
# Print the gnuplot file.
gnuplot_file.write("set output '/dev/null'\n")
if (file_format == "png"):
gnuplot_file.write("set terminal png\n")
elif (file_format == "eps"):
gnuplot_file.write("set terminal postscript eps %s\n" % font_size)
else:
sys.stderr.write("Invalid file format (%s).\n" % file_format)
sys.exit(1)
gnuplot_file.write("set xlabel 'Rank p-value'\n")
gnuplot_file.write("set ylabel 'Calculated p-value'\n")
gnuplot_file.write("set xrange [%s:1]\n" % min_value)
gnuplot_file.write("set yrange [%s:1]\n" % min_value)
if (log_scale):
gnuplot_file.write("set logscale xy\n")
if (title != ""):
gnuplot_file.write("set title '%s'\n" % title)
gnuplot_file.write("plot x notitle with lines lt 1\n")
gnuplot_file.write("replot 0.5*x notitle with lines lt 2\n")
gnuplot_file.write("replot 2.0*x notitle with lines lt 2\n")
gnuplot_file.write("replot '%s' notitle with points\n" % data_filename)
gnuplot_file.write("set output\n")
gnuplot_file.write("replot\n")
gnuplot_file.close()
# Make the image.
sys.stderr.write("Creating %s.%s.\n" % (fileroot, file_format))
os.system("gnuplot %s > %s.%s" % (gnuplot_filename, fileroot, file_format))
``` |
{
"source": "John-Halter/Image_Classification",
"score": 3
} |
#### File: John-Halter/Image_Classification/calculated_values.py
```python
import numpy as np
def calc_polyfit(num_of_epochs, accuracy):
"""
Function to calculate regression line
:param num_of_epochs: length of x axis to use
:param accuracy: The accuracy of the cnn model
:return: Two new values used to plot regression line
"""
coefficients = np.polyfit(num_of_epochs, accuracy, 3)
poly = np.poly1d(coefficients)
new_x = np.linspace(num_of_epochs[0], num_of_epochs[-1])
new_y = poly(new_x)
return new_x, new_y
``` |
{
"source": "johnhanson/nicehash_monitoring",
"score": 2
} |
#### File: johnhanson/nicehash_monitoring/algo.py
```python
algo = dict([
(0, 'Scrypt'),
(1, 'SHA256'),
(2, 'ScryptNf'),
(3, 'X11'),
(4, 'X13'),
(5, 'Keccak'),
(6, 'X15'),
(7, 'Nist5'),
(8, 'NeoScrypt'),
(9, 'Lyra2RE'),
(10, 'WhirlpoolX'),
(11, 'Qubit'),
(12, 'Quark'),
(13, 'Axiom'),
(14, 'Lyra2REv2'),
(15, 'ScryptJaneNf16'),
(16, 'Blake256r8'),
(17, 'Blake256r14'),
(18, 'Blake256r8vnl'),
(19, 'Hodl'),
(20, 'DaggerHashimoto'),
(21, 'Decred'),
(22, 'CryptoNight'),
(23, 'Lbry'),
(24, 'Equihash'),
(25, 'Pascal'),
(26, 'X11Gost'),
(27, 'Sia'),
(28, 'Blake2s'),
(29, 'Skunk')
])
# got this from https://www.nicehash.com/profitability-calculator
# note: nicehash is a butt and doesn't signify a change in the units by a bump in the API version
# have to fucking check twitter. goddamn.
# couldn't find it in the API, except in the stats.provider.ex call
# ~~might be wrong~~ probably is wrong. no way to verify for 100% sure.
speed = dict([
(0, 'TH/s'),
(1, 'TH/s'),
(2, 'TH/s'),
(3, 'MH/s'),
(4, 'MH/s'),
(5, 'MH/s'),
(6, 'MH/s'),
(7, 'MH/s'),
(8, 'MH/s'),
(9, 'TH/s'),
(10, 'MH/s'),
(11, 'MH/s'),
(12, 'MH/s'),
(13, 'kH/s'),
(14, 'TH/s'),
(15, 'TH/s'),
(16, 'GH/s'),
(17, 'GH/s'),
(18, 'GH/s'),
(19, 'kH/s'),
(20, 'MH/s'),
(21, 'GH/s'),
(22, 'kH/s'),
(23, 'GH/s'),
(24, 'Sol/s'),
(25, 'GH/s'),
(26, 'MH/s'),
(27, 'GH/s'),
(28, 'GH/s'),
(29, 'MH/s')
])
# in seconds
fast = 60 * 1 -1
slow = 60 * 5 -1
refresh_rate = dict([
('Axiom', slow),
('Blake256r14', slow),
('Blake256r8', slow),
('Blake256r8vnl', slow),
('Blake2s', fast),
('CryptoNight', fast),
('DaggerHashimoto', fast),
('Decred', fast),
('Equihash', fast),
('Hodl', slow),
('Keccak', fast),
('Lbry', slow),
('Lyra2RE', slow),
('Lyra2REv2', fast),
('NeoScrypt', fast),
('Nist5', fast),
('Pascal', slow),
('Quark', slow),
('Qubit', slow),
('SHA256', slow),
('Scrypt', slow),
('ScryptJaneNf16', slow),
('ScryptNf', slow),
('Sia', fast),
('Skunk', slow),
('WhirlpoolX', slow),
('X11', slow),
('X11Gost', fast),
('X13', slow),
('X15', slow)
])
def get_algo():
return algo
def num_to_algo(num):
return algo[num]
def get_speed():
return speed
def num_to_speed(num):
return speed[num]
def algo_to_refresh_rate(algo):
return refresh_rate[algo]
``` |
{
"source": "johnhany/leetcode",
"score": 2
} |
#### File: johnhany/leetcode/debugvis.py
```python
import io
import lldb
import debugger
import base64
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
def show():
image_bytes = io.BytesIO()
plt.savefig(image_bytes, format='png', bbox_inches='tight')
document = '<html><img src="data:image/png;base64,%s"></html>' % base64.b64encode(image_bytes.getvalue()).decode('utf-8')
debugger.display_html(document, position=2)
def plot_image(image, xdim, ydim, cmap='nipy_spectral_r'):
image = debugger.unwrap(image)
if image.TypeIsPointerType():
image_addr = image.GetValueAsUnsigned()
else:
image_addr = image.AddressOf().GetValueAsUnsigned()
data = lldb.process.ReadMemory(image_addr, int(xdim * ydim) * 4, lldb.SBError())
data = np.frombuffer(data, dtype=np.int32).reshape((ydim,xdim))
plt.imshow(data, cmap=cmap, interpolation='nearest')
show()
``` |
{
"source": "johnhany/MegEngine",
"score": 2
} |
#### File: megengine/core/tensor.py
```python
import collections
import functools
import itertools
from typing import Union
import numpy as np
import megengine._internal as mgb
from .graph import _use_default_if_none, get_default_graph
def wrap_io_tensor(func):
r"""A wrapper to make ``func`` compatible with functions in ``_internal.opr``.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
comp_graph = None
for i in itertools.chain(args, kwargs.values()):
if isinstance(i, Tensor) and i._comp_graph:
comp_graph = i._comp_graph
break
else:
comp_graph = get_default_graph()
new_args = (
arg._attach(comp_graph) if isinstance(arg, Tensor) else arg for arg in args
)
new_kwargs = {
k: v._attach(comp_graph) if isinstance(v, Tensor) else v
for k, v in kwargs.items()
}
ret = func(*new_args, **new_kwargs)
if isinstance(ret, mgb.SymbolVar):
ret = Tensor(ret)
elif isinstance(ret, list):
ret = [Tensor(t) if isinstance(t, mgb.SymbolVar) else t for t in ret]
elif isinstance(ret, tuple):
ret = tuple(Tensor(t) if isinstance(t, mgb.SymbolVar) else t for t in ret)
return ret
return wrapper
def _wrap_symbolvar_binary_op(f):
@functools.wraps(f)
def wrapped(self, other):
comp_graph = (
isinstance(other, Tensor)
and other._comp_graph
or self._comp_graph
or get_default_graph()
)
if isinstance(other, Tensor):
other = other._attach(comp_graph)
return Tensor(f(self._attach(comp_graph), other))
return wrapped
def wrap_slice(inp):
start = inp.start._symvar if isinstance(inp.start, Tensor) else inp.start
stop = inp.stop._symvar if isinstance(inp.stop, Tensor) else inp.stop
step = inp.step._symvar if isinstance(inp.step, Tensor) else inp.step
return slice(start, stop, step)
def wrap_idx(idx):
if not isinstance(idx, tuple):
idx = (idx,)
idx = tuple(i._symvar if isinstance(i, Tensor) else i for i in idx)
idx = tuple(wrap_slice(i) if isinstance(i, slice) else i for i in idx)
return idx
class MGBIndexWrapper:
def __init__(self, dest, mgb_index, val=None):
self.dest = dest
self.val = val
self.mgb_index = mgb_index
def __getitem__(self, idx):
if self.val is None:
return wrap_io_tensor(self.mgb_index(self.dest._symvar).__getitem__)(
wrap_idx(idx)
)
else:
return wrap_io_tensor(
self.mgb_index(self.dest._symvar, self.val._symvar).__getitem__
)(wrap_idx(idx))
class Tensor:
r"""The main data container in MegEngine.
Use :func:`~.tensor` to create a Tensor with existed data.
"""
requires_grad = False
grad = None
def __init__(self, val=None, *, requires_grad=None):
self._reset(val, requires_grad=requires_grad)
def _reset(self, val=None, *, requires_grad=None):
if val is None:
self.__val = None
self.__sym = None
elif isinstance(val, mgb.SharedND):
self.__val = val
self.__sym = None
elif isinstance(val, mgb.SymbolVar):
self.__val = None
self.__sym = val
else:
raise TypeError("must be initialized with SymbolVar or SharedND")
self.requires_grad = requires_grad
def _as_tensor(self, obj):
r"""Convert the data into a ``Tensor``. If the data is already a Tensor
with the same dtype and device, no copy will be performed. Otherwise a
new Tensor will be returned with computational graph retained.
"""
if isinstance(obj, Tensor):
return obj
if isinstance(obj, mgb.SymbolVar):
return Tensor(obj)
if isinstance(obj, mgb.SharedScalar):
return Tensor(obj._as_sym_var(self._comp_graph, self._comp_node))
return tensor(data=obj, device=self.device)
def numpy(self):
r"""Return the tensor value in numpy.ndarray format.
"""
if self.__val is not None:
assert self.__sym is None
return self.__val.get_value()
if self.__sym is None:
raise ValueError("uninitialized")
if self.__sym.eager_val is not None:
return self.__sym.eager_val.get_value()
return self.__sym.inferred_value
def item(self):
return self.numpy().item()
def _attach(self, comp_graph, *, volatile=True):
if self.__val:
return self.__val.symvar(comp_graph, volatile=volatile)
if self.__sym:
if self.__sym.owner_graph != comp_graph:
raise RuntimeError("internal error")
return self.__sym
else:
raise ValueError("uninitialized")
@property
def _symvar(self):
if self.__sym:
assert not self.__val
return self.__sym
if not self.__val:
raise ValueError("uninitialized")
return self._attach(get_default_graph())
def __mgb_symvar__(self, comp_graph=None, **_):
if self.__val and comp_graph:
return self._attach(comp_graph)
return self._symvar # read by mgb.opr
@property
def dtype(self):
r"""Return the data type of the tensor.
"""
if self.__val is not None:
return self.__val.dtype
return self._symvar.dtype
@property
def _comp_node(self):
if self.__val is not None:
return self.__val.comp_node
return self._symvar.comp_node
device = _comp_node
@property
def _comp_graph(self):
if self.__sym is not None:
return self.__sym.owner_graph
return None
@property
def shape(self):
r"""Return an int tuple that is the shape/layout of the tensor.
Could be invalid in static graph mode.
"""
from ..jit import trace
if trace._active_instance: # pylint: disable=protected-access
# NOTE: this is an hack
shape = mgb.opr.get_var_shape(self._symvar)
return tuple(Tensor(shape[i]) for i in range(self.ndim))
return self._symvar.imm_shape
def set_value(self, value, *, sync=True, inplace=False, share=False):
r"""Set value to the tensor.
"""
if not self.__val:
raise ValueError("not detached")
if isinstance(value, Tensor):
value = value.__val or value.__sym.eager_val
self.__val.set_value(value, sync=sync, inplace=inplace, share=share)
def fill(self, value):
r"""Fills the tensor with the specified value.
"""
self.set_value(np.full(self.shape, value, dtype=self.dtype))
def reset_zero(self):
r"""Reset the tensor and fills with zeros.
"""
if not self.__val:
raise ValueError("not detached")
self.__val.reset_zero()
def to(self, device):
r"""Performs Tensor device conversion, returns Tensor with the specified device.
"""
return wrap_io_tensor(mgb.opr.copy)(self, comp_node=device)
# https://docs.python.org/3/reference/datamodel.html#object.__hash__
# > If a class does not define an __eq__() method it should not define a
# > __hash__() operation either
__hash__ = None # type: ignore[assignment]
def __eq__(self, rhs):
rhs = self._as_tensor(rhs)
return Tensor(self._symvar._binary_opr("EQ", rhs._symvar))
def __ne__(self, rhs):
return 1 - self.__eq__(rhs)
def __len__(self):
if self._symvar.eager_val is not None:
return self._symvar.eager_val.shape[0]
raise TypeError(
"__len__ and __iter__ is not available for tensors on non eager graph."
)
__add__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__add__)
__radd__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__radd__)
__sub__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__sub__)
__rsub__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rsub__)
__mul__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__mul__)
__rmul__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rmul__)
__matmul__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__matmul__)
__rmatmul__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rmatmul__)
__lshift__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__lshift__)
__rshift__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rshift__)
__truediv__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__truediv__)
__rtruediv__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rtruediv__)
__floordiv__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__floordiv__)
__rfloordiv__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rfloordiv__)
__mod__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__mod__)
__rmod__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rmod__)
__pow__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__pow__)
__rpow__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__rpow__)
__lt__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__lt__)
__gt__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__gt__)
__le__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__le__)
__ge__ = _wrap_symbolvar_binary_op(mgb.SymbolVar.__ge__)
__neg__ = wrap_io_tensor(mgb.SymbolVar.__neg__)
sum = wrap_io_tensor(mgb.SymbolVar.sum)
"""
Sum up the given tensors.
"""
max = wrap_io_tensor(mgb.SymbolVar.max)
"""
Return the maximum value of given tensor.
"""
min = wrap_io_tensor(mgb.SymbolVar.min)
"""
Return the minimum value of given tensor.
"""
prod = wrap_io_tensor(mgb.SymbolVar.prod)
"""
Return the product value of the given tensor.
"""
mean = wrap_io_tensor(mgb.SymbolVar.mean)
"""
Return the mean value of the given tensor.
"""
dimshuffle = wrap_io_tensor(mgb.SymbolVar.dimshuffle)
"""
See more details in :func:`~.functional.tensor.dimshuffle`.
"""
astype = wrap_io_tensor(mgb.SymbolVar.astype)
"""
Cast the tensor to a specified type.
"""
def reshape(self, *target_shape):
r"""Return a tensor which has given target shape
Examples:
.. testcode::
import numpy as np
from megengine import tensor
inp = tensor(np.arange(1, 17, dtype=np.int32).reshape(4,4))
out = tensor(np.arange(100, 116, dtype=np.int32).reshape(1,16))
out = out.reshape(inp.shape)
print(out.numpy())
.. testoutput::
[[100 101 102 103]
[104 105 106 107]
[108 109 110 111]
[112 113 114 115]]
"""
if isinstance(target_shape[0], tuple):
if len(target_shape) > 1:
raise ValueError("Only single tuple is accepted in reshape")
target_shape = target_shape[0]
target_shape = (t._symvar if isinstance(t, Tensor) else t for t in target_shape)
return Tensor(mgb.SymbolVar.reshape(self._symvar, *target_shape))
def broadcast(self, *target_shape):
r"""Return a tesnor broadcasted by current tensor to given target shape
Examples:
.. testcode::
import numpy as np
from megengine import tensor
data = tensor(np.arange(100, 104, dtype=np.int32).reshape(1,4))
data = data.broadcast((4,4))
print(data.numpy())
.. testoutput::
[[100 101 102 103]
[100 101 102 103]
[100 101 102 103]
[100 101 102 103]]
"""
if isinstance(target_shape[0], tuple):
if len(target_shape) > 1:
raise ValueError("Only single tuple is accepted in broadcast")
target_shape = target_shape[0]
target_shape = (t._symvar if isinstance(t, Tensor) else t for t in target_shape)
return Tensor(mgb.SymbolVar.broadcast(self._symvar, *target_shape))
# Prefer operators on Tensor instead of convert to numpy
__array_priority__ = 1000
# mgb indexing family
def __getitem__(self, idx):
return wrap_io_tensor(self._symvar.__getitem__)(wrap_idx(idx))
def set_subtensor(self, val):
return MGBIndexWrapper(self, mgb.opr.set_subtensor, val)
def incr_subtensor(self, val):
return MGBIndexWrapper(self, mgb.opr.incr_subtensor, val)
@property
def ai(self):
return MGBIndexWrapper(self, mgb.opr.advanced_indexing)
def set_ai(self, val):
return MGBIndexWrapper(self, mgb.opr.set_advanced_indexing, val)
def incr_ai(self, val):
return MGBIndexWrapper(self, mgb.opr.incr_advanced_indexing, val)
@property
def mi(self):
return MGBIndexWrapper(self, mgb.opr.mesh_indexing)
def set_mi(self, val):
return MGBIndexWrapper(self, mgb.opr.set_mesh_indexing, val)
def incr_mi(self, val):
return MGBIndexWrapper(self, mgb.opr.incr_mesh_indexing, val)
@property
def batched_mi(self):
return MGBIndexWrapper(self, mgb.opr.batched_mesh_indexing)
def batched_set_mi(self, val):
return MGBIndexWrapper(self, mgb.opr.batched_set_mesh_indexing, val)
def batched_incr_mi(self, val):
return MGBIndexWrapper(self, mgb.opr.batched_incr_mesh_indexing, val)
def __array__(self, dtype=None):
if dtype is None:
return self.numpy()
else:
return self.numpy().astype(dtype, copy=False)
def __int__(self):
return int(self.item())
def __index__(self):
return int(self.item())
def __round__(self, ndigits=0):
if ndigits != 0:
raise ValueError("ndigits must be 0 for Tensor.round")
return Tensor(mgb.opr.elemwise([self._symvar], mode="ROUND"))
round = __round__
def sqrt(self):
r"""Return a tensor that each element is the square root of its
original value.
"""
return Tensor(mgb.opr.sqrt(self._symvar))
def shapeof(self, axis=None):
r"""Return a Tensor that represent the shape of the tensor.
"""
return Tensor(mgb.opr.get_var_shape(self._symvar, axis=axis))
@property
def ndim(self):
r"""Return the number of dimensions of the tensor.
"""
return len(self._symvar.imm_shape)
def __repr__(self):
piece = "Tensor("
with np.printoptions(precision=4, suppress=True):
piece += "{}".format(str(self.numpy()))
if self.dtype != np.float32:
piece += ", dtype={}".format(np.dtype(self.dtype).name)
if self._comp_node.locator_logical != ("XPU", -1, 0):
piece += ", device={}".format(self.device)
piece += ")"
return piece
def __bool__(self):
raise RuntimeError(
"Tensor object should not be converted to bool or used in a if statement. Use .numpy(), int() or float() if you want to use its value in if statement, be aware that this may lead to incorrect result in non-eager mode."
)
def __getstate__(self):
assert (self.__val is not None) and (self.__sym is None)
metadata = {"requires_grad": self.requires_grad}
state = {
"data": self.numpy(),
"device": self.device,
"dtype": self.dtype,
"metadata": metadata,
}
return state
def __setstate__(self, state):
data = state.pop("data")
device = state.pop("device")
dtype = state.pop("dtype")
metadata = state.pop("metadata", {})
requires_grad = metadata.pop("requires_grad", None)
snd = mgb.make_shared(device, value=data, dtype=dtype)
self._reset(snd, requires_grad=requires_grad)
def tensor(
data: Union[list, np.ndarray] = None,
*,
dtype: str = None,
device: mgb.CompNode = None,
requires_grad: bool = None
):
r"""A helper function to create a :class:`~.Tensor` using existing data.
:param data: an existing data array, must be Python list, NumPy array or None.
:param dtype: target Tensor data type, one of ``("uint8", "int8", "int16", "int32", "float32", "float16")``.
:param device: target device for Tensor storing.
:param requires_grad: whether its gradiant will be calculated during :meth:`~.Optimizer.backward`
"""
supported_dtypes = ("uint8", "int8", "int16", "int32", "float32", "float16")
if isinstance(data, Tensor):
raise NotImplementedError
if dtype is not None and np.dtype(dtype).name not in supported_dtypes:
raise TypeError("unsupported dtype {}".format(dtype))
if data is not None:
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=dtype)
# In order to accept tensor([1]),
# Automaticlly convert to 32-bit number instead of numpy's default 64-bit when input data is not nparray.
dtype = mgb.to_mgb_supported_dtype(data.dtype)
if dtype is None:
if data.dtype.name not in supported_dtypes:
raise TypeError("unsupported dtype {}".format(data.dtype))
device, _ = _use_default_if_none(device, None)
shared_nd = mgb.make_shared(device, value=data, dtype=dtype)
return Tensor(shared_nd, requires_grad=requires_grad)
class Dict(collections.MutableMapping):
def __init__(self, *args, key=None, **kwargs):
self.data = {}
if key:
self.keyfn = key
for i in args:
self.update(i)
self.update(**kwargs)
@staticmethod
def keyfn(key): # pylint: disable=method-hidden
return key
def __getitem__(self, key):
_, v = self.data[self.keyfn(key)]
return v
def __setitem__(self, key, value):
self.data[self.keyfn(key)] = key, value
def __delitem__(self, key):
del self.data[self.keyfn(key)]
def __iter__(self):
for _, (k, _) in self.data.items():
yield k
def __len__(self):
return len(self.data)
class TensorDict(Dict): # pylint: disable=too-many-ancestors
class keyfn:
def __new__(cls, x: Tensor):
if not isinstance(x, Tensor):
return x
return super().__new__(cls)
def __init__(self, x: Tensor):
self._data = x # do not save id directly to make pickle work
def __hash__(self):
return id(self._data)
def __eq__(self, other):
# pylint: disable=undefined-variable
return isinstance(other, __class__) and id(self._data) == id(other._data)
def __init__(self, *args):
super().__init__(*args)
```
#### File: megengine/functional/math.py
```python
from typing import Optional
import megengine._internal as mgb
from ..core import Tensor, wrap_io_tensor
from .elemwise import clamp
@wrap_io_tensor
def sum(inp: Tensor, axis: Optional[int] = None, keepdims: bool = False) -> Tensor:
r"""Returns the sum of each row of the ``inp`` tensor in the given ``axis``.
:param inp: The input tensor.
:param axis: The dimension to reduce. If None, all the dimensions will be reduced.
Default: None
:param keepdims: Whether the output tensor has ``axis`` retained or not.
Default: False
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.sum(data)
print(out.numpy())
.. testoutput::
[21]
"""
return mgb.opr.reduce_(inp, "SUM", axis, keepdims)
@wrap_io_tensor
def prod(inp: Tensor, axis: Optional[int] = None, keepdims=False) -> Tensor:
r"""
Returns the element product of input tensor along given *axis*.
:param inp: The input tensor
:param axis: The dimension to reduce. If None, all the dimensions will be reduced. Default: ``None``
:param keepdims: Whether the output tensor has *axis* retained or not. Default: ``False``
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.prod(data)
print(out.numpy())
Outputs:
.. testoutput::
[720]
"""
return mgb.opr.reduce_(inp, "PRODUCT", axis, keepdims)
@wrap_io_tensor
def mean(inp: Tensor, axis: Optional[int] = None, keepdims: bool = False) -> Tensor:
"""Returns the mean value of each row of the ``inp`` tensor in
the given ``axis``. If axis is a list of dimensions,
reduce over all of them.
:param inp: The input tensor
:param axis: The dimension to reduce. If None, all the dimensions will be reduced. Default: None
:param keepdims: Whether the output tensor has ``axis`` retained or not. Default: False
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
data = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
out = F.mean(data)
print(out.numpy())
.. testoutput::
[3.5]
"""
return mgb.opr.mean(inp, axis, keepdims)
@wrap_io_tensor
def min(inp: Tensor, axis: Optional[int] = None, keepdims: bool = False) -> Tensor:
r"""
Returns the min value of input tensor along given *axis*.
:param inp: The input tensor
:param axis: The dimension to reduce. If None, all the dimensions will be reduced. Default: None
:param keepdims: Whether the output tensor has *axis* retained or not. Default: False
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
y = F.min(x)
print(y.numpy())
Outputs:
.. testoutput::
[1]
"""
return mgb.opr.reduce_(inp, "MIN", axis, keepdims)
@wrap_io_tensor
def max(inp: Tensor, axis: Optional[int] = None, keepdims: bool = False) -> Tensor:
r"""Returns the max value of the input tensor along given *axis*.
:param inp: The input tensor
:param axis: The dimension to reduce. If None, all the dimensions will be reduced. Default: None
:param keepdims: Whether the output tensor has *axis* retained or not. Default: False
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
y = F.max(x)
print(y.numpy())
.. testoutput::
[6]
"""
return mgb.opr.reduce_(inp, "MAX", axis, keepdims)
@wrap_io_tensor
def sqrt(inp: Tensor) -> Tensor:
"""
Return a new tensor with the square-root of the elements of ``inp``
:param inp: The input tensor
:return: The computed tensor
Examples:
.. testcode::
import numpy as np
import megengine as mge
import megengine.functional as F
data = mge.tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
out = F.sqrt(data)
print(out.numpy())
Outputs:
.. testoutput::
[[0. 1. 1.4142]
[1.7321 2. 2.2361 ]]
"""
return mgb.opr.sqrt(inp)
def norm(inp: Tensor, p: int = 2, axis: Optional[int] = None, keepdims=False):
"""Calculate ``p``-norm of input tensor along certain axis.
:param inp: The input tensor
:param p: power of value ``p`` applied to ``inp``. Default: 2
:param axis: The dimension to reduce. If None, all the dimensions will be reduced. Default: None
:param keepdims: Whether the output tensor has ``axis`` retained or not. Default: False
:return: The output tensor
"""
if axis is None:
inp = inp.reshape(-1)
return (inp ** p).sum(axis=axis, keepdims=keepdims) ** (1.0 / p)
@wrap_io_tensor
def argmin(inp: Tensor, axis: Optional[int] = None, keepdims: bool = False) -> Tensor:
r"""Returns the indices of the minimum values along an axis
:param inp: The input tensor
:param axis: The dimension to reduce. If None, all the dimensions will be reduced. Default: None
:param keepdims: Whether the output tensor has *axis* retained or not. Default: False
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
y = F.argmin(x)
print(y.numpy())
.. testoutput::
[0]
"""
return mgb.opr.argmin(inp, axis, keepdims)
@wrap_io_tensor
def argmax(inp: Tensor, axis: Optional[int] = None, keepdims: bool = False) -> Tensor:
r"""Returns the indices of the maximum values along an axis
:param inp: The input tensor
:param axis: The dimension to reduce. If None, all the dimensions will be reduced. Default: None
:param keepdims: Whether the output tensor has *axis* retained or not. Default: False
:return: The output tensor
Examples:
.. testcode::
import numpy as np
from megengine import tensor
import megengine.functional as F
x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
y = F.argmax(x)
print(y.numpy())
.. testoutput::
[5]
"""
return mgb.opr.argmax(inp, axis, keepdims)
def normalize(
inp: Tensor, p: int = 2, axis: Optional[int] = None, eps: float = 1e-12
) -> Tensor:
r"""Perform :math:`L_p` normalization of input tensor along certain axis.
For a tensor :attr:`inp` of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
:math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as:
.. math::
v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
:param inp: the input tensor
:param p: power of value ``p`` applied to ``inp``. Default: 2
:param axis: The dimension to reduce. If None, all the dimensions will be reduced
to calculate the norm. Default: None
:param eps: a small value to avoid division by zero. Default: 1e-12
:return: the normalized output tensor
"""
if axis is None:
return inp / clamp(norm(inp, p), lower=eps)
else:
return inp / clamp(norm(inp, p, axis, keepdims=True), lower=eps)
```
#### File: megengine/module/concat.py
```python
from typing import Iterable
from .. import functional as F
from ..core.tensor import Tensor
from .module import QATModule
class Concat(QATModule):
r"""
A :class:`~.QATModule` to do functional concat, should replace concat with this module,
supporting ``qat`` mode and ``quantized`` mode.
"""
def forward(self, inps: Iterable[Tensor], axis: int = 0):
return F.concat(inps, axis)
def forward_qat(self, inps: Iterable[Tensor], axis: int = 0):
return self.apply_fakequant_with_observer(
self.forward(inps, axis), self.act_fake_quant, self.act_observer
)
```
#### File: megengine/quantization/observer.py
```python
from abc import abstractmethod
import numpy as np
from .. import functional as F
from .._internal.dtype import _metadata_dict, get_quantized_dtype
from ..core import Buffer, Function, ones, tensor, zeros
from ..module import Module
class Round(Function):
def forward(self, x):
return x.round()
def backward(self, output_grads):
return output_grads
class Observer(Module):
r"""
A base class for Observer Module.
:param dtype: a string indicating to collect scale and zero_point of which dtype
"""
def __init__(self, dtype="qint8"):
super().__init__()
if dtype not in _metadata_dict.keys():
raise ValueError(
"unknown dtype: {}, only support {}".format(
dtype, _metadata_dict.keys()
)
)
self.dtype = dtype
self.qmin = _metadata_dict[dtype].qmin
self.qmax = _metadata_dict[dtype].qmax
self.zero_point, self.scale = None, None
self.enabled = True
def get_dtype(self):
scale, zero_point = self.get_qparams()
numpy_scale = None if scale is None else scale.numpy()[0]
numpy_zero_point = None if zero_point is None else zero_point.numpy()[0]
return get_quantized_dtype(self.dtype, numpy_scale, numpy_zero_point)
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
@abstractmethod
def forward(self, x):
pass
@abstractmethod
def get_qparams(self, **kwargs):
pass
class IdentityObserver(Observer):
r"""
An test Observer that always return scale:1 and zero_point:0.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.zero_point = ones((1), dtype="float32")
self.scale = zeros((1), dtype="float32")
def forward(self, x):
return x
def get_qparams(self):
return self.scale, self.zero_point
class MinMaxObserver(Observer):
def __init__(self, symmetric=True, eps=0.00001, *args, **kwargs):
super().__init__(*args, **kwargs)
self.symmetric = symmetric
if self.symmetric:
# assert qmin + qmax == -1, 'when reduce_range, qmin + qmax shoule equal -1'
self.zero_point = tensor((self.qmin + self.qmax + 1) // 2)
self.min_val = Buffer(0.0, dtype=np.float32)
self.max_val = Buffer(0.0, dtype=np.float32)
self.scale_limit = eps
# flag is used by cond_take, first time will be first flag, and after will be set as not_flag
self.first_flag = Buffer(np.array([1, 0], dtype=np.int32))
self.not_flag = Buffer(np.array([0, 1], dtype=np.int32))
def set_min_max(self, tmp_min, tmp_max):
# FIXME: cond_take will destory shape, use reshape to reset shape
tmp_min = tmp_min.reshape(1)
tmp_max = tmp_max.reshape(1)
if self.training:
F.zero_grad(
F.add_update(self.min_val, tmp_min, alpha=0.0, beta=1.0, bias=0.0)
)
F.zero_grad(
F.add_update(self.max_val, tmp_max, alpha=0.0, beta=1.0, bias=0.0)
)
F.zero_grad(
F.add_update(
self.first_flag, self.not_flag, alpha=0.0, beta=1.0, bias=0.0
)
)
# FIXME: add_update is applied after the whole trace procedure in `symbolic=True`
# mode. So use tmp_min/tmp_max to calc and save scale/zero_point for further
# calculation in FakeQuant.
self.set_scale_zero_point(tmp_min, tmp_max)
def set_scale_zero_point(self, tmp_min, tmp_max):
if self.symmetric:
symmetric_max_vals = F.maximum(-tmp_min, tmp_max)
# use maximun to avoid scale too small at the begin
self.scale = F.maximum(
symmetric_max_vals / ((self.qmax - self.qmin) / 2), self.scale_limit
)
# zero_point = self.zero_point
else:
# use maximun to avoid scale too small at the begin
self.scale = F.maximum(
(tmp_max - tmp_min) / (self.qmax - self.qmin), self.scale_limit
)
# caculate zero_point
self.zero_point = self.qmin - Round()((tmp_min / self.scale))
def get_qparams(self):
# scale and zero_point is runtime tensor rather than Buffer,
# so need to re-calc if min_val and max_val are loaded.
if self.scale is None:
self.set_scale_zero_point(self.min_val, self.max_val)
return self.scale, self.zero_point
def forward(self, x_orig):
if self.enabled:
# stop gradient
x = F.zero_grad(x_orig)
# find max and min
tmp_min, _ = F.cond_take(
self.first_flag, F.concat([x.min(), F.minimum(self.min_val, x.min())])
)
tmp_max, _ = F.cond_take(
self.first_flag, F.concat([x.max(), F.maximum(self.max_val, x.max())])
)
self.set_min_max(tmp_min, tmp_max)
return x_orig
class ExponentialMovingAverageObserver(MinMaxObserver):
def __init__(self, momentum=0.9, *args, **kwargs):
super().__init__(*args, **kwargs)
self.momentum = Buffer(momentum)
def set_momentum(self, momentum):
self.momentum.set_value(momentum)
def forward(self, x_orig):
if self.enabled:
# stop gradient
x = F.zero_grad(x_orig)
# Exponential Moving Average
tmp_min, _ = F.cond_take(
self.first_flag,
F.concat(
[
x.min(),
self.momentum * self.min_val + (1 - self.momentum) * x.min(),
]
),
)
tmp_max, _ = F.cond_take(
self.first_flag,
F.concat(
[
x.max(),
self.momentum * self.max_val + (1 - self.momentum) * x.max(),
]
),
)
self.set_min_max(tmp_min, tmp_max)
return x_orig
```
#### File: megengine/quantization/quantize.py
```python
from copy import deepcopy
from ..module import Module, QATModule, Sequential, quantized
from .qconfig import QConfig, ema_fakequant_qconfig
def quantize(module: Module, inplace=True):
r"""
Recursively convert `module` to `quantized` mode through :meth:`~.Module.apply`.
:param module: root module to do convert recursively.
"""
if not inplace:
module = deepcopy(module)
def is_qat_module(obj):
return isinstance(obj, QATModule)
# no need to pass prefix and get pure key of parent Module.
for key, submodule, parent in module._flatten(
with_key=True, with_parent=True, predicate=is_qat_module
):
if isinstance(parent, Sequential):
# cannnot use setattr to be compatible with Sequential's ``__setitem__``
parent[int(key.split(".")[-1])] = submodule.to_quantized()
else:
setattr(parent, key.split(".")[-1], submodule.to_quantized())
def quantize_qat(module: Module, qconfig: QConfig = ema_fakequant_qconfig):
r"""
Recursively convert `module` to `qat` mode through :meth:`~.Module.apply`
and set qconfig relatively.
:param module: root module to do convert recursively.
:param qconfig: a instance of :class:`~.QConfig` to be set as submodules' qconfig.
default is :any:`~.qconfig.ema_fakequant_qconfig`.
"""
def fn(mod: Module):
if isinstance(mod, QATModule):
mod.set_qat_mode(QATModule.QATMode.QAT)
mod.set_qconfig(qconfig)
module.apply(fn)
def disable_fake_quant(module: Module):
r"""
Recursively disable `module` fake quantization in QATModule through :meth:`~.Module.apply`
:param module: root module to do disable fake quantization recursively.
"""
def fn(mod):
if isinstance(mod, QATModule):
mod.act_fake_quant.disable()
mod.weight_fake_quant.disable()
mod.inp_fake_quant.disable()
module.apply(fn)
def disable_observer(module: Module):
r"""
Recursively disable `module` observer in QATModule through :meth:`~.Module.apply`
:param module: root module to do disable observer recursively.
"""
def fn(mod):
if isinstance(mod, QATModule):
mod.act_observer.disable()
module.apply(fn)
def enable_fake_quant(module: Module):
r"""
Recursively enable `module` fake quantization in QATModule through :meth:`~.Module.apply`
:param module: root module to do enable fake quantization recursively.
"""
def fn(mod):
if isinstance(mod, QATModule):
mod.act_fake_quant.enable()
mod.weight_fake_quant.enable()
mod.inp_fake_quant.enable()
module.apply(fn)
def enable_observer(module: Module):
r"""
Recursively enable `module` observer in QATModule through :meth:`~.Module.apply`
:param module: root module to do enable observer recursively.
"""
def fn(mod):
if isinstance(mod, QATModule):
mod.act_observer.enable()
module.apply(fn)
```
#### File: unit/functional/test_functional.py
```python
import numpy as np
import pytest
from helpers import opr_test
import megengine._internal as mgb
import megengine.functional as F
from megengine import Buffer, Parameter, is_cuda_available, jit, tensor
from megengine.test import assertTensorClose
def test_flatten():
data0_shape = (2, 3, 4, 5)
data1_shape = (4, 5, 6, 7)
data0 = np.random.random(data0_shape).astype(np.float32)
data1 = np.random.random(data1_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
output0 = (2 * 3 * 4 * 5,)
output1 = (4 * 5 * 6 * 7,)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn)
output0 = (2, 3 * 4 * 5)
output1 = (4, 5 * 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
output0 = (2, 3, 4 * 5)
output1 = (4, 5, 6 * 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
output0 = (2, 3 * 4, 5)
output1 = (4, 5 * 6, 7)
cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
def test_where():
maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
cases = [
{"input": [maskv0, xv0, yv0]},
{"input": [maskv1, xv1, yv1]},
]
opr_test(cases, F.where, ref_fn=np.where)
maskv2 = np.array([1, 1, 1], dtype=np.int32)
xv2 = np.array([1, 3, 2], dtype=np.float32)
yv2 = np.array([5, 6, 9], dtype=np.float32)
maskv3 = np.array([0, 0, 0], dtype=np.int32)
xv3 = np.array([1, 3, 2], dtype=np.float32)
yv3 = np.array([5, 6, 9], dtype=np.float32)
cases = [
{"input": [maskv2, xv2, yv2]},
{"input": [maskv3, xv3, yv3]},
]
opr_test(cases, F.where, ref_fn=np.where)
def test_eye():
dtype = np.float32
cases = [{"input": [10, 20]}, {"input": [20, 30]}]
opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
def test_concat():
def get_data_shape(length: int):
return (length, 2, 3)
data1 = np.random.random(get_data_shape(5)).astype("float32")
data2 = np.random.random(get_data_shape(6)).astype("float32")
data3 = np.random.random(get_data_shape(7)).astype("float32")
def run(data1, data2):
return F.concat([data1, data2])
cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
def test_matrix_mul():
shape1 = (2, 3)
shape2 = (3, 4)
shape3 = (4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
def test_batched_matrix_mul():
batch_size = 10
shape1 = (batch_size, 2, 3)
shape2 = (batch_size, 3, 4)
shape3 = (batch_size, 4, 5)
data1 = np.random.random(shape1).astype("float32")
data2 = np.random.random(shape2).astype("float32")
data3 = np.random.random(shape3).astype("float32")
cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
for i in range(0, batch_size):
def compare_fn(x, y):
x.numpy()[i, ...] == y
opr_test(
cases,
F.batched_matrix_mul,
compare_fn=compare_fn,
ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
)
def test_sort():
data1_shape = (10, 3)
data2_shape = (12, 2)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
cases = [
{"input": data1, "output": output0},
{"input": data2, "output": output1},
]
opr_test(cases, F.sort)
def test_round():
data1_shape = (15,)
data2_shape = (25,)
data1 = np.random.random(data1_shape).astype(np.float32)
data2 = np.random.random(data2_shape).astype(np.float32)
cases = [{"input": data1}, {"input": data2}]
opr_test(cases, F.round, ref_fn=np.round)
def test_broadcast_to():
input1_shape = (20, 30)
output1_shape = (30, 20, 30)
data1 = np.random.random(input1_shape).astype(np.float32)
input2_shape = (10, 20)
output2_shape = (20, 10, 20)
data2 = np.random.random(input2_shape).astype(np.float32)
def compare_fn(x, y):
assert x.numpy().shape == y
cases = [
{"input": [data1, output1_shape], "output": output1_shape},
{"input": [data2, output2_shape], "output": output2_shape},
]
opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
def test_linspace():
cases = [
{"input": [1, 9, 9]},
{"input": [3, 10, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, 9]},
{"input": [10, 3, 8]},
]
opr_test(
cases,
F.linspace,
ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
)
def test_arange():
cases = [
{"input": [1, 9, 1]},
{"input": [2, 10, 2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9, 1, -1]},
{"input": [10, 2, -2]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
cases = [
{"input": [9.3, 1.2, -0.5]},
{"input": [10.3, 2.1, -1.7]},
]
opr_test(
cases,
F.arange,
ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
)
def test_add_update():
shape = (2, 3)
v = np.random.random(shape).astype(np.float32)
b = Buffer(v)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 1)
u = F.add_update(b, 1)
assertTensorClose(u.numpy(), v + 2)
x = np.ones((2, 2), dtype=np.float32)
y = x * 0.5
dest = tensor(x)
delta = tensor(y)
r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
def test_add_update_params():
b = np.random.random((2, 3)).astype(np.float32)
y = Buffer(b)
@jit.trace
def f(x):
return F.add_update(y, x)
f(np.zeros((2, 3)).astype(np.float32))
z = Buffer(np.zeros((2, 3)).astype(np.float32))
F.add_update(y, z, beta=0.1)
res = f(np.ones((2, 3)).astype(np.float32))
assertTensorClose(res, b + 1)
def test_cross_entropy_with_softmax():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy_with_softmax)
def test_cross_entropy():
data1_shape = (1, 2)
label1_shape = (1,)
data2_shape = (1, 3)
label2_shape = (1,)
data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
expect1 = np.array([-np.log(0.5)], dtype=np.float32)
data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
expect2 = np.array([-np.log(0.4)], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.cross_entropy)
def test_binary_cross_entropy():
data1_shape = (2, 2)
label1_shape = (2, 2)
data2_shape = (2, 3)
label2_shape = (2, 3)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compare_fn(x, y):
assertTensorClose(x.numpy(), y, max_err=5e-4)
np.random.seed(123)
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
label1 = np.random.uniform(size=label1_shape).astype(np.float32)
expect1 = np.array([0.6361], dtype=np.float32)
np.random.seed(123)
data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
label2 = np.random.uniform(size=label2_shape).astype(np.float32)
expect2 = np.array([0.6750], dtype=np.float32)
cases = [
{"input": [data1, label1], "output": expect1,},
{"input": [data2, label2], "output": expect2,},
]
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
@pytest.mark.skip
def test_conv_bias():
inp_scale = 0.01
w_scale = 0.02
outp_scale = 0.1
inp_dtype = mgb.dtype.qint8(inp_scale)
w_dtype = mgb.dtype.qint8(w_scale)
b_dtype = mgb.dtype.qint32(inp_scale * w_scale)
out_dtype = mgb.dtype.qint8(outp_scale)
def run(
N,
IC,
OC,
IH,
IW,
KH,
KW,
PH,
PW,
SH,
SW,
has_bias=True,
nonlinear_mode="IDENTITY",
):
inp_v = np.random.normal(size=(N, IC, IH, IW))
w_v = np.random.normal(size=(OC, IC, KW, KW))
b_v = np.random.normal(size=(1, OC, 1, 1))
inp_scale = mgb.dtype.get_scale(inp_dtype)
w_scale = mgb.dtype.get_scale(w_dtype)
b_scale = mgb.dtype.get_scale(b_dtype)
inpv = mgb.dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
wv = mgb.dtype.convert_to_qint8(w_v * w_scale, w_dtype)
bv = mgb.dtype.convert_to_qint32(b_v * b_scale, b_dtype)
inp_int8 = tensor(inpv, dtype=inp_dtype)
w_int8 = Parameter(wv, dtype=w_dtype)
b_int32 = Parameter(bv, dtype=b_dtype)
inp_fp32 = inp_int8.astype("float32")
w_fp32 = w_int8.astype("float32")
b_fp32 = b_int32.astype("float32")
jit.trace.enabled = True
b_symbolic = True
def convert_to_nchw4(var):
return var.reshape(
var.shapeof(0), var.shapeof(1) // 4, 4, var.shapeof(2), var.shapeof(3)
).dimshuffle(0, 1, 3, 4, 2)
@jit.trace(symbolic=b_symbolic)
def run_conv2d(inp, w, b):
O = F.conv2d(
inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
)
if nonlinear_mode == "RELU":
return F.relu(O)
else:
return O
@jit.trace(symbolic=b_symbolic)
def run_conv_bias(inp, w, b, format="NCHW"):
b = b if has_bias else np.zeros_like(b)
if format == "NCHW4":
inp = convert_to_nchw4(inp)
w = convert_to_nchw4(w)
b = F.flatten(b)
return F.conv_bias_activation(
inp,
w,
b,
stride=(SH, SW),
padding=(PH, PW),
dtype=out_dtype,
nonlinear_mode=nonlinear_mode,
)
format = "NCHW4" if is_cuda_available() else "NCHW"
expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
expected = expected.astype(out_dtype).astype("float32")
result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
"float32"
)
if format == "NCHW4":
result = result.dimshuffle(0, 1, 4, 2, 3)
expected = F.flatten(expected)
result = F.flatten(result)
assertTensorClose(result.numpy(), expected.numpy())
if not is_cuda_available():
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "RELU")
run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "RELU")
``` |
{
"source": "johnhany/StackGAN-v2",
"score": 3
} |
#### File: StackGAN-v2/code/model.py
```python
import torch
import torch.nn as nn
import torch.nn.parallel
from miscc.config import cfg
from torch.autograd import Variable
import torch.nn.functional as F
from torchvision import models
import torch.utils.model_zoo as model_zoo
# ############################## For Compute inception score ##############################
# Besides the inception score computed by pretrained model, especially for fine-grained datasets (such as birds, bedroom),
# it is also good to compute inception score using fine-tuned model and manually examine the image quality.
class INCEPTION_V3(nn.Module):
def __init__(self):
super(INCEPTION_V3, self).__init__()
self.model = models.inception_v3()
url = 'https://download.pytorch.org/models/inception_v3_google-1a9a5a14.pth'
# print(next(model.parameters()).data)
state_dict = \
model_zoo.load_url(url, map_location=lambda storage, loc: storage)
self.model.load_state_dict(state_dict)
for param in self.model.parameters():
param.requires_grad = False
print('Load pretrained model from ', url)
# print(next(self.model.parameters()).data)
# print(self.model)
def forward(self, input):
# [-1.0, 1.0] --> [0, 1.0]
x = input * 0.5 + 0.5
# mean=[0.485, 0.456, 0.406] and std=[0.229, 0.224, 0.225]
# --> mean = 0, std = 1
x[:, 0] = (x[:, 0] - 0.485) / 0.229
x[:, 1] = (x[:, 1] - 0.456) / 0.224
x[:, 2] = (x[:, 2] - 0.406) / 0.225
#
# --> fixed-size input: batch x 3 x 299 x 299
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
# 299 x 299 x 3
x = self.model(x)
x = F.softmax(x, dim=1)
return x
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, x):
nc = x.size(1)
assert nc % 2 == 0, 'channels dont divide 2!'
nc = int(nc/2)
return x[:, :nc] * torch.sigmoid(x[:, nc:])
def conv3x3(in_planes, out_planes):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
# ############## G networks ################################################
# Upsale the spatial size by a factor of 2
def upBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Upsample(scale_factor=2, mode='nearest'),
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU()
)
return block
# Keep the spatial size
def Block3x3_relu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes * 2),
nn.BatchNorm2d(out_planes * 2),
GLU()
)
return block
class ResBlock(nn.Module):
def __init__(self, channel_num):
super(ResBlock, self).__init__()
self.block = nn.Sequential(
conv3x3(channel_num, channel_num * 2),
nn.BatchNorm2d(channel_num * 2),
GLU(),
conv3x3(channel_num, channel_num),
nn.BatchNorm2d(channel_num)
)
def forward(self, x):
residual = x
out = self.block(x)
out += residual
return out
class CA_NET(nn.Module):
# some code is modified from vae examples
# (https://github.com/pytorch/examples/blob/master/vae/main.py)
def __init__(self):
super(CA_NET, self).__init__()
self.t_dim = cfg.TEXT.DIMENSION
self.ef_dim = cfg.GAN.EMBEDDING_DIM
self.fc = nn.Linear(self.t_dim, self.ef_dim * 4, bias=True)
self.relu = GLU()
def encode(self, text_embedding):
x = self.relu(self.fc(text_embedding))
mu = x[:, :self.ef_dim]
logvar = x[:, self.ef_dim:]
return mu, logvar
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if cfg.CUDA:
eps = torch.cuda.FloatTensor(std.size()).normal_()
else:
eps = torch.FloatTensor(std.size()).normal_()
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, text_embedding):
mu, logvar = self.encode(text_embedding)
c_code = self.reparametrize(mu, logvar)
return c_code, mu, logvar
class INIT_STAGE_G(nn.Module):
def __init__(self, ngf):
super(INIT_STAGE_G, self).__init__()
self.gf_dim = ngf
if cfg.GAN.B_CONDITION:
self.in_dim = cfg.GAN.Z_DIM + cfg.GAN.EMBEDDING_DIM
else:
self.in_dim = cfg.GAN.Z_DIM
self.define_module()
def define_module(self):
in_dim = self.in_dim
ngf = self.gf_dim
self.fc = nn.Sequential(
nn.Linear(in_dim, ngf * 4 * 4 * 2, bias=False),
nn.BatchNorm1d(ngf * 4 * 4 * 2),
GLU())
self.upsample1 = upBlock(ngf, ngf // 2)
self.upsample2 = upBlock(ngf // 2, ngf // 4)
self.upsample3 = upBlock(ngf // 4, ngf // 8)
self.upsample4 = upBlock(ngf // 8, ngf // 16)
def forward(self, z_code, c_code=None):
if cfg.GAN.B_CONDITION and c_code is not None:
in_code = torch.cat((c_code, z_code), 1)
else:
in_code = z_code
# state size 16ngf x 4 x 4
out_code = self.fc(in_code)
out_code = out_code.view(-1, self.gf_dim, 4, 4)
# state size 8ngf x 8 x 8
out_code = self.upsample1(out_code)
# state size 4ngf x 16 x 16
out_code = self.upsample2(out_code)
# state size 2ngf x 32 x 32
out_code = self.upsample3(out_code)
# state size ngf x 64 x 64
out_code = self.upsample4(out_code)
return out_code
class NEXT_STAGE_G(nn.Module):
def __init__(self, ngf, num_residual=cfg.GAN.R_NUM):
super(NEXT_STAGE_G, self).__init__()
self.gf_dim = ngf
if cfg.GAN.B_CONDITION:
self.ef_dim = cfg.GAN.EMBEDDING_DIM
else:
self.ef_dim = cfg.GAN.Z_DIM
self.num_residual = num_residual
self.define_module()
def _make_layer(self, block, channel_num):
layers = []
for i in range(self.num_residual):
layers.append(block(channel_num))
return nn.Sequential(*layers)
def define_module(self):
ngf = self.gf_dim
efg = self.ef_dim
self.jointConv = Block3x3_relu(ngf + efg, ngf)
self.residual = self._make_layer(ResBlock, ngf)
self.upsample = upBlock(ngf, ngf // 2)
def forward(self, h_code, c_code):
s_size = h_code.size(2)
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, s_size, s_size)
# state size (ngf+egf) x in_size x in_size
h_c_code = torch.cat((c_code, h_code), 1)
# state size ngf x in_size x in_size
out_code = self.jointConv(h_c_code)
out_code = self.residual(out_code)
# state size ngf/2 x 2in_size x 2in_size
out_code = self.upsample(out_code)
return out_code
class GET_IMAGE_G(nn.Module):
def __init__(self, ngf):
super(GET_IMAGE_G, self).__init__()
self.gf_dim = ngf
self.img = nn.Sequential(
conv3x3(ngf, 3),
nn.Tanh()
)
def forward(self, h_code):
out_img = self.img(h_code)
return out_img
class G_NET(nn.Module):
def __init__(self):
super(G_NET, self).__init__()
self.gf_dim = cfg.GAN.GF_DIM
self.define_module()
def define_module(self):
if cfg.GAN.B_CONDITION:
self.ca_net = CA_NET()
if cfg.TREE.BRANCH_NUM > 0:
self.h_net1 = INIT_STAGE_G(self.gf_dim * 16)
self.img_net1 = GET_IMAGE_G(self.gf_dim)
if cfg.TREE.BRANCH_NUM > 1:
self.h_net2 = NEXT_STAGE_G(self.gf_dim)
self.img_net2 = GET_IMAGE_G(self.gf_dim // 2)
if cfg.TREE.BRANCH_NUM > 2:
self.h_net3 = NEXT_STAGE_G(self.gf_dim // 2)
self.img_net3 = GET_IMAGE_G(self.gf_dim // 4)
if cfg.TREE.BRANCH_NUM > 3: # Recommended structure (mainly limited by GPU memory), and not test yet
self.h_net4 = NEXT_STAGE_G(self.gf_dim // 4, num_residual=1)
self.img_net4 = GET_IMAGE_G(self.gf_dim // 8)
if cfg.TREE.BRANCH_NUM > 4:
self.h_net4 = NEXT_STAGE_G(self.gf_dim // 8, num_residual=1)
self.img_net4 = GET_IMAGE_G(self.gf_dim // 16)
def forward(self, z_code, text_embedding=None):
if cfg.GAN.B_CONDITION and text_embedding is not None:
c_code, mu, logvar = self.ca_net(text_embedding)
else:
c_code, mu, logvar = z_code, None, None
fake_imgs = []
if cfg.TREE.BRANCH_NUM > 0:
h_code1 = self.h_net1(z_code, c_code)
fake_img1 = self.img_net1(h_code1)
fake_imgs.append(fake_img1)
if cfg.TREE.BRANCH_NUM > 1:
h_code2 = self.h_net2(h_code1, c_code)
fake_img2 = self.img_net2(h_code2)
fake_imgs.append(fake_img2)
if cfg.TREE.BRANCH_NUM > 2:
h_code3 = self.h_net3(h_code2, c_code)
fake_img3 = self.img_net3(h_code3)
fake_imgs.append(fake_img3)
if cfg.TREE.BRANCH_NUM > 3:
h_code4 = self.h_net4(h_code3, c_code)
fake_img4 = self.img_net4(h_code4)
fake_imgs.append(fake_img4)
return fake_imgs, mu, logvar
# ############## D networks ################################################
def Block3x3_leakRelu(in_planes, out_planes):
block = nn.Sequential(
conv3x3(in_planes, out_planes),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 2
def downBlock(in_planes, out_planes):
block = nn.Sequential(
nn.Conv2d(in_planes, out_planes, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2, inplace=True)
)
return block
# Downsale the spatial size by a factor of 16
def encode_image_by_16times(ndf):
encode_img = nn.Sequential(
# --> state size. ndf x in_size/2 x in_size/2
nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 2ndf x x in_size/4 x in_size/4
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 4ndf x in_size/8 x in_size/8
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# --> state size 8ndf x in_size/16 x in_size/16
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True)
)
return encode_img
# For 64 x 64 images
class D_NET64(nn.Module):
def __init__(self):
super(D_NET64, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.EMBEDDING_DIM
self.define_module()
def define_module(self):
ndf = self.df_dim
efg = self.ef_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
if cfg.GAN.B_CONDITION:
self.jointConv = Block3x3_leakRelu(ndf * 8 + efg, ndf * 8)
self.uncond_logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, x_var, c_code=None):
x_code = self.img_code_s16(x_var)
if cfg.GAN.B_CONDITION and c_code is not None:
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((c_code, x_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = x_code
output = self.logits(h_c_code)
if cfg.GAN.B_CONDITION:
out_uncond = self.uncond_logits(x_code)
return [output.view(-1), out_uncond.view(-1)]
else:
return [output.view(-1)]
# For 128 x 128 images
class D_NET128(nn.Module):
def __init__(self):
super(D_NET128, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.EMBEDDING_DIM
self.define_module()
def define_module(self):
ndf = self.df_dim
efg = self.ef_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s32_1 = Block3x3_leakRelu(ndf * 16, ndf * 8)
self.logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
if cfg.GAN.B_CONDITION:
self.jointConv = Block3x3_leakRelu(ndf * 8 + efg, ndf * 8)
self.uncond_logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, x_var, c_code=None):
x_code = self.img_code_s16(x_var)
x_code = self.img_code_s32(x_code)
x_code = self.img_code_s32_1(x_code)
if cfg.GAN.B_CONDITION and c_code is not None:
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((c_code, x_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = x_code
output = self.logits(h_c_code)
if cfg.GAN.B_CONDITION:
out_uncond = self.uncond_logits(x_code)
return [output.view(-1), out_uncond.view(-1)]
else:
return [output.view(-1)]
# For 256 x 256 images
class D_NET256(nn.Module):
def __init__(self):
super(D_NET256, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.EMBEDDING_DIM
self.define_module()
def define_module(self):
ndf = self.df_dim
efg = self.ef_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s64_1 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s64_2 = Block3x3_leakRelu(ndf * 16, ndf * 8)
self.logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
if cfg.GAN.B_CONDITION:
self.jointConv = Block3x3_leakRelu(ndf * 8 + efg, ndf * 8)
self.uncond_logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, x_var, c_code=None):
x_code = self.img_code_s16(x_var)
x_code = self.img_code_s32(x_code)
x_code = self.img_code_s64(x_code)
x_code = self.img_code_s64_1(x_code)
x_code = self.img_code_s64_2(x_code)
if cfg.GAN.B_CONDITION and c_code is not None:
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((c_code, x_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = x_code
output = self.logits(h_c_code)
if cfg.GAN.B_CONDITION:
out_uncond = self.uncond_logits(x_code)
return [output.view(-1), out_uncond.view(-1)]
else:
return [output.view(-1)]
# For 512 x 512 images: Recommended structure, not test yet
class D_NET512(nn.Module):
def __init__(self):
super(D_NET512, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.EMBEDDING_DIM
self.define_module()
def define_module(self):
ndf = self.df_dim
efg = self.ef_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s128 = downBlock(ndf * 32, ndf * 64)
self.img_code_s128_1 = Block3x3_leakRelu(ndf * 64, ndf * 32)
self.img_code_s128_2 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s128_3 = Block3x3_leakRelu(ndf * 16, ndf * 8)
self.logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
if cfg.GAN.B_CONDITION:
self.jointConv = Block3x3_leakRelu(ndf * 8 + efg, ndf * 8)
self.uncond_logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, x_var, c_code=None):
x_code = self.img_code_s16(x_var)
x_code = self.img_code_s32(x_code)
x_code = self.img_code_s64(x_code)
x_code = self.img_code_s128(x_code)
x_code = self.img_code_s128_1(x_code)
x_code = self.img_code_s128_2(x_code)
x_code = self.img_code_s128_3(x_code)
if cfg.GAN.B_CONDITION and c_code is not None:
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((c_code, x_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = x_code
output = self.logits(h_c_code)
if cfg.GAN.B_CONDITION:
out_uncond = self.uncond_logits(x_code)
return [output.view(-1), out_uncond.view(-1)]
else:
return [output.view(-1)]
# For 1024 x 1024 images: Recommended structure, not test yet
class D_NET1024(nn.Module):
def __init__(self):
super(D_NET1024, self).__init__()
self.df_dim = cfg.GAN.DF_DIM
self.ef_dim = cfg.GAN.EMBEDDING_DIM
self.define_module()
def define_module(self):
ndf = self.df_dim
efg = self.ef_dim
self.img_code_s16 = encode_image_by_16times(ndf)
self.img_code_s32 = downBlock(ndf * 8, ndf * 16)
self.img_code_s64 = downBlock(ndf * 16, ndf * 32)
self.img_code_s128 = downBlock(ndf * 32, ndf * 64)
self.img_code_s256 = downBlock(ndf * 64, ndf * 128)
self.img_code_s256_1 = Block3x3_leakRelu(ndf * 128, ndf * 64)
self.img_code_s256_2 = Block3x3_leakRelu(ndf * 64, ndf * 32)
self.img_code_s256_3 = Block3x3_leakRelu(ndf * 32, ndf * 16)
self.img_code_s256_4 = Block3x3_leakRelu(ndf * 16, ndf * 8)
self.logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
if cfg.GAN.B_CONDITION:
self.jointConv = Block3x3_leakRelu(ndf * 8 + efg, ndf * 8)
self.uncond_logits = nn.Sequential(
nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
nn.Sigmoid())
def forward(self, x_var, c_code=None):
x_code = self.img_code_s16(x_var)
x_code = self.img_code_s32(x_code)
x_code = self.img_code_s64(x_code)
x_code = self.img_code_s128(x_code)
x_code = self.img_code_s256(x_code)
x_code = self.img_code_s256_1(x_code)
x_code = self.img_code_s256_2(x_code)
x_code = self.img_code_s256_3(x_code)
x_code = self.img_code_s256_4(x_code)
if cfg.GAN.B_CONDITION and c_code is not None:
c_code = c_code.view(-1, self.ef_dim, 1, 1)
c_code = c_code.repeat(1, 1, 4, 4)
# state size (ngf+egf) x 4 x 4
h_c_code = torch.cat((c_code, x_code), 1)
# state size ngf x in_size x in_size
h_c_code = self.jointConv(h_c_code)
else:
h_c_code = x_code
output = self.logits(h_c_code)
if cfg.GAN.B_CONDITION:
out_uncond = self.uncond_logits(x_code)
return [output.view(-1), out_uncond.view(-1)]
else:
return [output.view(-1)]
``` |
{
"source": "JOHNHARDCORE/PythonMiniProjects",
"score": 4
} |
#### File: PythonMiniProjects/binarysearch/binarysearch.py
```python
def binarySearch(start, end, needle, list):
if end < start:
return -1
mid = (end + start) // 2
val = list[mid]
if val == needle:
return mid
if val < needle:
return binarySearch(mid + 1, end, needle, list)
else:
return binarySearch(start, mid - 1, needle, list)
user_list = list(map(lambda x: int(x), \
input("Enter a comma separated list of ints (ex: 1, 2, 3, 4)\n") \
.split(','))) \
.sort()
needle = int(input("Enter the number you want to search for\n"))
index = binarySearch(0, len(user_list), needle, user_list)
print(f'Found {needle} at index {index}' if index != -1 else f'{needle} not in list')
``` |
{
"source": "John-HarringtonNZ/overcooked_ai",
"score": 2
} |
#### File: overcooked_ai_py/mdp/overcooked_mdp.py
```python
import itertools, copy, math
import numpy as np
from functools import reduce
from collections import defaultdict, Counter
from overcooked_ai_py.utils import pos_distance, read_layout_dict, classproperty
from overcooked_ai_py.mdp.actions import Action, Direction
class Recipe:
MAX_NUM_INGREDIENTS = 3
TOMATO = 'tomato'
ONION = 'onion'
ALL_INGREDIENTS = [ONION, TOMATO]
ALL_RECIPES_CACHE = {}
STR_REP = {'tomato': "†", 'onion': "o"}
_computed = False
_configured = False
_conf = {}
def __new__(cls, ingredients):
if not cls._configured:
raise ValueError("Recipe class must be configured before recipes can be created")
# Some basic argument verification
if not ingredients or not hasattr(ingredients, '__iter__') or len(ingredients) == 0:
raise ValueError("Invalid input recipe. Must be ingredients iterable with non-zero length")
for elem in ingredients:
if not elem in cls.ALL_INGREDIENTS:
raise ValueError("Invalid ingredient: {0}. Recipe can only contain ingredients {1}".format(elem, cls.ALL_INGREDIENTS))
if not len(ingredients) <= cls.MAX_NUM_INGREDIENTS:
raise ValueError("Recipe of length {0} is invalid. Recipe can contain at most {1} ingredients".format(len(ingredients), cls.MAX_NUM_INGREDIENTS))
key = hash(tuple(sorted(ingredients)))
if key in cls.ALL_RECIPES_CACHE:
return cls.ALL_RECIPES_CACHE[key]
cls.ALL_RECIPES_CACHE[key] = super(Recipe, cls).__new__(cls)
return cls.ALL_RECIPES_CACHE[key]
def __init__(self, ingredients):
self._ingredients = ingredients
def __getnewargs__(self):
return (self._ingredients,)
def __int__(self):
num_tomatoes = len([_ for _ in self.ingredients if _ == Recipe.TOMATO])
num_onions = len([_ for _ in self.ingredients if _ == Recipe.ONION])
mixed_mask = int(bool(num_tomatoes * num_onions))
mixed_shift = (Recipe.MAX_NUM_INGREDIENTS + 1)**len(Recipe.ALL_INGREDIENTS)
encoding = num_onions + (Recipe.MAX_NUM_INGREDIENTS + 1) * num_tomatoes
return mixed_mask * encoding * mixed_shift + encoding
def __hash__(self):
return hash(self.ingredients)
def __eq__(self, other):
# The ingredients property already returns sorted items, so equivalence check is sufficient
return self.ingredients == other.ingredients
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return int(self) < int(other)
def __le__(self, other):
return int(self) <= int(other)
def __gt__(self, other):
return int(self) > int(other)
def __ge__(self, other):
return int(self) >= int(other)
def __repr__(self):
return self.ingredients.__repr__()
def __iter__(self):
return iter(self.ingredients)
def __copy__(self):
return Recipe(self.ingredients)
def __deepcopy__(self, memo):
ingredients_cpy = copy.deepcopy(self.ingredients)
return Recipe(ingredients_cpy)
@classmethod
def _compute_all_recipes(cls):
for i in range(cls.MAX_NUM_INGREDIENTS):
for ingredient_list in itertools.combinations_with_replacement(cls.ALL_INGREDIENTS, i + 1):
cls(ingredient_list)
@property
def ingredients(self):
return tuple(sorted(self._ingredients))
@ingredients.setter
def ingredients(self, _):
raise AttributeError("Recpes are read-only. Do not modify instance attributes after creation")
@property
def value(self):
if self._delivery_reward:
return self._delivery_reward
if self._value_mapping and self in self._value_mapping:
return self._value_mapping[self]
if self._onion_value and self._tomato_value:
num_onions = len([ingredient for ingredient in self.ingredients if ingredient == self.ONION])
num_tomatoes = len([ingredient for ingredient in self.ingredients if ingredient == self.TOMATO])
return self._tomato_value * num_tomatoes + self._onion_value * num_onions
return 20
@property
def time(self):
if self._cook_time:
return self._cook_time
if self._time_mapping and self in self._time_mapping:
return self._time_mapping[self]
if self._onion_time and self._tomato_time:
num_onions = len([ingredient for ingredient in self.ingredients if ingredient == self.ONION])
num_tomatoes = len([ingredient for ingredient in self.ingredients if ingredient == self.TOMATO])
return self._onion_time * num_onions + self._tomato_time * num_tomatoes
return 20
def to_dict(self):
return { 'ingredients' : self.ingredients }
def neighbors(self):
"""
Return all "neighbor" recipes to this recipe. A neighbor recipe is one that can be obtained
by adding exactly one ingredient to the current recipe
"""
neighbors = []
if len(self.ingredients) == self.MAX_NUM_INGREDIENTS:
return neighbors
for ingredient in self.ALL_INGREDIENTS:
new_ingredients = [*self.ingredients, ingredient]
new_recipe = Recipe(new_ingredients)
neighbors.append(new_recipe)
return neighbors
@classproperty
def ALL_RECIPES(cls):
if not cls._computed:
cls._compute_all_recipes()
cls._computed = True
return set(cls.ALL_RECIPES_CACHE.values())
@classproperty
def configuration(cls):
if not cls._configured:
raise ValueError("Recipe class not yet configured")
return cls._conf
@classmethod
def configure(cls, conf):
cls._conf = conf
cls._configured = True
cls._computed = False
cls.MAX_NUM_INGREDIENTS = conf.get('max_num_ingredients', 3)
cls._cook_time = None
cls._delivery_reward = None
cls._value_mapping = None
cls._time_mapping = None
cls._onion_value = None
cls._onion_time = None
cls._tomato_value = None
cls._tomato_time = None
## Basic checks for validity ##
# Mutual Exclusion
if 'tomato_time' in conf and not 'onion_time' in conf or 'onion_time' in conf and not 'tomato_time' in conf:
raise ValueError("Must specify both 'onion_time' and 'tomato_time'")
if 'tomato_value' in conf and not 'onion_value' in conf or 'onion_value' in conf and not 'tomato_value' in conf:
raise ValueError("Must specify both 'onion_value' and 'tomato_value'")
if 'tomato_value' in conf and 'delivery_reward' in conf:
raise ValueError("'delivery_reward' incompatible with '<ingredient>_value'")
if 'tomato_value' in conf and 'recipe_values' in conf:
raise ValueError("'recipe_values' incompatible with '<ingredient>_value'")
if 'recipe_values' in conf and 'delivery_reward' in conf:
raise ValueError("'delivery_reward' incompatible with 'recipe_values'")
if 'tomato_time' in conf and 'cook_time' in conf:
raise ValueError("'cook_time' incompatible with '<ingredient>_time")
if 'tomato_time' in conf and 'recipe_times' in conf:
raise ValueError("'recipe_times' incompatible with '<ingredient>_time'")
if 'recipe_times' in conf and 'cook_time' in conf:
raise ValueError("'delivery_reward' incompatible with 'recipe_times'")
# recipe_ lists and orders compatibility
if 'recipe_values' in conf:
if not 'all_orders' in conf or not conf['all_orders']:
raise ValueError("Must specify 'all_orders' if 'recipe_values' specified")
if not len(conf['all_orders']) == len(conf['recipe_values']):
raise ValueError("Number of recipes in 'all_orders' must be the same as number in 'recipe_values")
if 'recipe_times' in conf:
if not 'all_orders' in conf or not conf['all_orders']:
raise ValueError("Must specify 'all_orders' if 'recipe_times' specified")
if not len(conf['all_orders']) == len(conf['recipe_times']):
raise ValueError("Number of recipes in 'all_orders' must be the same as number in 'recipe_times")
## Conifgure ##
if 'cook_time' in conf:
cls._cook_time = conf['cook_time']
if 'delivery_reward' in conf:
cls._delivery_reward = conf['delivery_reward']
if 'recipe_values' in conf:
cls._value_mapping = {
cls.from_dict(recipe) : value for (recipe, value) in zip(conf['all_orders'], conf['recipe_values'])
}
if 'recipe_times' in conf:
cls._time_mapping = {
cls.from_dict(recipe) : time for (recipe, time) in zip(conf['all_orders'], conf['recipe_times'])
}
if 'tomato_time' in conf:
cls._tomato_time = conf['tomato_time']
if 'onion_time' in conf:
cls._onion_time = conf['onion_time']
if 'tomato_value' in conf:
cls._tomato_value = conf['tomato_value']
if 'onion_value' in conf:
cls._onion_value = conf['onion_value']
@classmethod
def generate_random_recipes(cls, n=1, min_size=2, max_size=3, ingredients=None, recipes=None, unique=True):
"""
n (int): how many recipes generate
min_size (int): min generated recipe size
max_size (int): max generated recipe size
ingredients (list(str)): list of ingredients used for generating recipes (default is cls.ALL_INGREDIENTS)
recipes (list(Recipe)): list of recipes to choose from (default is cls.ALL_RECIPES)
unique (bool): if all recipes are unique (without repeats)
"""
if recipes is None: recipes = cls.ALL_RECIPES
ingredients = set(ingredients or cls.ALL_INGREDIENTS)
choice_replace = not(unique)
assert 1 <= min_size <= max_size <= cls.MAX_NUM_INGREDIENTS
assert all(ingredient in cls.ALL_INGREDIENTS for ingredient in ingredients)
def valid_size(r):
return min_size <= len(r.ingredients) <= max_size
def valid_ingredients(r):
return all(i in ingredients for i in r.ingredients)
relevant_recipes = [r for r in recipes if valid_size(r) and valid_ingredients(r)]
assert choice_replace or (n <= len(relevant_recipes))
return np.random.choice(relevant_recipes, n, replace=choice_replace)
@classmethod
def from_dict(cls, obj_dict):
return cls(**obj_dict)
class ObjectState(object):
"""
State of an object in OvercookedGridworld.
"""
def __init__(self, name, position, **kwargs):
"""
name (str): The name of the object
position (int, int): Tuple for the current location of the object.
"""
self.name = name
self._position = tuple(position)
@property
def position(self):
return self._position
@position.setter
def position(self, new_pos):
self._position = new_pos
def is_valid(self):
return self.name in ['onion', 'tomato', 'dish']
def deepcopy(self):
return ObjectState(self.name, self.position)
def __eq__(self, other):
return isinstance(other, ObjectState) and \
self.name == other.name and \
self.position == other.position
def __gt__(self, other):
return self.position > other.position
def __hash__(self):
return hash((self.name, self.position))
def __repr__(self):
return '{}@{}'.format(
self.name, self.position)
def to_dict(self):
return {
"name": self.name,
"position": self.position
}
@classmethod
def from_dict(cls, obj_dict):
obj_dict = copy.deepcopy(obj_dict)
return ObjectState(**obj_dict)
class SoupState(ObjectState):
def __init__(self, position, ingredients=[], cooking_tick=-1, cook_time=None, **kwargs):
"""
Represents a soup object. An object becomes a soup the instant it is placed in a pot. The
soup's recipe is a list of ingredient names used to create it. A soup's recipe is undetermined
until it has begun cooking.
position (tupe): (x, y) coordinates in the grid
ingrdients (list(ObjectState)): Objects that have been used to cook this soup. Determiens @property recipe
cooking (int): How long the soup has been cooking for. -1 means cooking hasn't started yet
cook_time(int): How long soup needs to be cooked, used only mostly for getting soup from dict with supplied cook_time, if None self.recipe.time is used
"""
super(SoupState, self).__init__("soup", position)
self._ingredients = ingredients
self._cooking_tick = cooking_tick
self._recipe = None
self._cook_time = cook_time
def __eq__(self, other):
return isinstance(other, SoupState) and self.name == other.name and self.position == other.position and self._cooking_tick == other._cooking_tick and \
all([this_i == other_i for this_i, other_i in zip(self._ingredients, other._ingredients)])
def __hash__(self):
ingredient_hash = hash(tuple([hash(i) for i in self._ingredients]))
supercls_hash = super(SoupState, self).__hash__()
return hash((supercls_hash, self._cooking_tick, ingredient_hash))
def __repr__(self):
supercls_str = super(SoupState, self).__repr__()
ingredients_str = self._ingredients.__repr__()
return "{}\nIngredients:\t{}\nCooking Tick:\t{}".format(supercls_str, ingredients_str, self._cooking_tick)
#Only works for onion based recipes
def soup_str(self):
num_ingredient = len(self.ingredients)
if num_ingredient == 0:
ingredient_str = "___"
elif num_ingredient == 1:
ingredient_str = "o__"
elif num_ingredient == 2:
ingredient_str = "oo_"
else:
ingredient_str = "ooo"
if self.is_cooking:
cooking_time = str(self._cooking_tick)
elif self.is_ready:
cooking_time = "20"
else:
cooking_time = "00"
if self.is_ready:
ready = "1"
else:
ready = "0"
return ingredient_str + cooking_time + ready
def __str__(self):
res = "{"
for ingredient in sorted(self.ingredients):
res += Recipe.STR_REP[ingredient]
if self.is_cooking:
res += str(self._cooking_tick)
elif self.is_ready:
res += str("y")
return res
@ObjectState.position.setter
def position(self, new_pos):
self._position = new_pos
for ingredient in self._ingredients:
ingredient.position = new_pos
@property
def ingredients(self):
return [ingredient.name for ingredient in self._ingredients]
@property
def is_cooking(self):
return not self.is_idle and not self.is_ready
@property
def recipe(self):
if self.is_idle:
raise ValueError("Recipe is not determined until soup begins cooking")
if not self._recipe:
self._recipe = Recipe(self.ingredients)
return self._recipe
@property
def value(self):
return self.recipe.value
@property
def cook_time(self):
# used mostly when cook time is supplied by state dict
if self._cook_time is not None:
return self._cook_time
else:
return self.recipe.time
@property
def cook_time_remaining(self):
return max(0, self.cook_time - self._cooking_tick)
@property
def is_ready(self):
if self.is_idle:
return False
return self._cooking_tick >= self.cook_time
@property
def is_idle(self):
return self._cooking_tick < 0
@property
def is_full(self):
return not self.is_idle or len(self.ingredients) == Recipe.MAX_NUM_INGREDIENTS
def is_valid(self):
if not all([ingredient.position == self.position for ingredient in self._ingredients]):
return False
if len(self.ingredients) > Recipe.MAX_NUM_INGREDIENTS:
return False
return True
def auto_finish(self):
if len(self.ingredients) == 0:
raise ValueError("Cannot finish soup with no ingredients")
self._cooking_tick = 0
self._cooking_tick = self.cook_time
def add_ingredient(self, ingredient):
if not ingredient.name in Recipe.ALL_INGREDIENTS:
raise ValueError("Invalid ingredient")
if self.is_full:
raise ValueError("Reached maximum number of ingredients in recipe")
ingredient.position = self.position
self._ingredients.append(ingredient)
def add_ingredient_from_str(self, ingredient_str):
ingredient_obj = ObjectState(ingredient_str, self.position)
self.add_ingredient(ingredient_obj)
def pop_ingredient(self):
if not self.is_idle:
raise ValueError("Cannot remove an ingredient from this soup at this time")
if len(self._ingredients) == 0:
raise ValueError("No ingredient to remove")
return self._ingredients.pop()
def begin_cooking(self):
if not self.is_idle:
raise ValueError("Cannot begin cooking this soup at this time")
if len(self.ingredients) == 0:
raise ValueError("Must add at least one ingredient to soup before you can begin cooking")
self._cooking_tick = 0
#self._cooking_tick = self.cook_time #another way to have begin_cooking also finish cooking
def cook(self):
if self.is_idle:
raise ValueError("Must begin cooking before advancing cook tick")
if self.is_ready:
raise ValueError("Cannot cook a soup that is already done")
self._cooking_tick += 1
def deepcopy(self):
return SoupState(self.position, [ingredient.deepcopy() for ingredient in self._ingredients], self._cooking_tick)
def to_dict(self):
info_dict = super(SoupState, self).to_dict()
ingrdients_dict = [ingredient.to_dict() for ingredient in self._ingredients]
info_dict['_ingredients'] = ingrdients_dict
info_dict['cooking_tick'] = self._cooking_tick
info_dict['is_cooking'] = self.is_cooking
info_dict['is_ready'] = self.is_ready
info_dict['is_idle'] = self.is_idle
info_dict['cook_time'] = -1 if self.is_idle else self.cook_time
# This is for backwards compatibility w/ overcooked-demo
# Should be removed once overcooked-demo is updated to use 'cooking_tick' instead of '_cooking_tick'
info_dict['_cooking_tick'] = self._cooking_tick
return info_dict
@classmethod
def from_dict(cls, obj_dict):
obj_dict = copy.deepcopy(obj_dict)
if obj_dict['name'] != 'soup':
return super(SoupState, cls).from_dict(obj_dict)
if 'state' in obj_dict:
# Legacy soup representation
ingredient, num_ingredient, time = obj_dict['state']
cooking_tick = -1 if time == 0 else time
finished = time >= 20
if ingredient == Recipe.TOMATO:
return SoupState.get_soup(obj_dict['position'], num_tomatoes=num_ingredient, cooking_tick=cooking_tick, finished=finished)
else:
return SoupState.get_soup(obj_dict['position'], num_onions=num_ingredient, cooking_tick=cooking_tick, finished=finished)
ingredients_objs = [ObjectState.from_dict(ing_dict) for ing_dict in obj_dict['_ingredients']]
obj_dict['ingredients'] = ingredients_objs
return cls(**obj_dict)
@classmethod
def get_soup(cls, position, num_onions=1, num_tomatoes=0, cooking_tick=-1, finished=False, **kwargs):
if num_onions < 0 or num_tomatoes < 0:
raise ValueError("Number of active ingredients must be positive")
if num_onions + num_tomatoes > Recipe.MAX_NUM_INGREDIENTS:
raise ValueError("Too many ingredients specified for this soup")
if cooking_tick >= 0 and num_tomatoes + num_onions == 0:
raise ValueError("_cooking_tick must be -1 for empty soup")
if finished and num_tomatoes + num_onions == 0:
raise ValueError("Empty soup cannot be finished")
onions = [ObjectState(Recipe.ONION, position) for _ in range(num_onions)]
tomatoes = [ObjectState(Recipe.TOMATO, position) for _ in range(num_tomatoes)]
ingredients = onions + tomatoes
soup = cls(position, ingredients, cooking_tick)
if finished:
soup.auto_finish()
return soup
class PlayerState(object):
"""
State of a player in OvercookedGridworld.
position: (x, y) tuple representing the player's location.
orientation: Direction.NORTH/SOUTH/EAST/WEST representing orientation.
held_object: ObjectState representing the object held by the player, or
None if there is no such object.
"""
def __init__(self, position, orientation, held_object=None):
self.position = tuple(position)
self.orientation = tuple(orientation)
self.held_object = held_object
assert self.orientation in Direction.ALL_DIRECTIONS
if self.held_object is not None:
assert isinstance(self.held_object, ObjectState)
assert self.held_object.position == self.position
@property
def pos_and_or(self):
return (self.position, self.orientation)
def has_object(self):
return self.held_object is not None
def get_object(self):
assert self.has_object()
return self.held_object
def set_object(self, obj):
assert not self.has_object()
obj.position = self.position
self.held_object = obj
def remove_object(self):
assert self.has_object()
obj = self.held_object
self.held_object = None
return obj
def update_pos_and_or(self, new_position, new_orientation):
self.position = new_position
self.orientation = new_orientation
if self.has_object():
self.get_object().position = new_position
def deepcopy(self):
new_obj = None if self.held_object is None else self.held_object.deepcopy()
return PlayerState(self.position, self.orientation, new_obj)
def __eq__(self, other):
return isinstance(other, PlayerState) and \
self.position == other.position and \
self.orientation == other.orientation and \
self.held_object == other.held_object
def __hash__(self):
return hash((self.position, self.orientation, self.held_object))
def __repr__(self):
return '{} facing {} holding {}'.format(
self.position, self.orientation, str(self.held_object))
def to_dict(self):
return {
"position": self.position,
"orientation": self.orientation,
"held_object": self.held_object.to_dict() if self.held_object is not None else None
}
@staticmethod
def from_dict(player_dict):
player_dict = copy.deepcopy(player_dict)
held_obj = player_dict["held_object"]
if held_obj is not None:
player_dict["held_object"] = SoupState.from_dict(held_obj)
return PlayerState(**player_dict)
class OvercookedState(object):
"""A state in OvercookedGridworld."""
def __init__(self, players, objects, bonus_orders=[], all_orders=[], timestep=0, **kwargs):
"""
players (list(PlayerState)): Currently active PlayerStates (index corresponds to number)
objects (dict({tuple:list(ObjectState)})): Dictionary mapping positions (x, y) to ObjectStates.
NOTE: Does NOT include objects held by players (they are in
the PlayerState objects).
bonus_orders (list(dict)): Current orders worth a bonus
all_orders (list(dict)): Current orders allowed at all
timestep (int): The current timestep of the state
"""
bonus_orders = [Recipe.from_dict(order) for order in bonus_orders]
all_orders = [Recipe.from_dict(order) for order in all_orders]
for pos, obj in objects.items():
assert obj.position == pos
self.players = tuple(players)
self.objects = objects
self._bonus_orders = bonus_orders
self._all_orders = all_orders
self.timestep = timestep
assert len(set(self.bonus_orders)) == len(self.bonus_orders), "Bonus orders must not have duplicates"
assert len(set(self.all_orders)) == len(self.all_orders), "All orders must not have duplicates"
assert set(self.bonus_orders).issubset(set(self.all_orders)), "Bonus orders must be a subset of all orders"
@property
def player_positions(self):
return tuple([player.position for player in self.players])
@property
def player_orientations(self):
return tuple([player.orientation for player in self.players])
@property
def players_pos_and_or(self):
"""Returns a ((pos1, or1), (pos2, or2)) tuple"""
return tuple(zip(*[self.player_positions, self.player_orientations]))
@property
def unowned_objects_by_type(self):
"""
Returns dictionary of (obj_name: ObjState)
for all objects in the environment, NOT including
ones held by players.
"""
objects_by_type = defaultdict(list)
for _pos, obj in self.objects.items():
objects_by_type[obj.name].append(obj)
return objects_by_type
@property
def player_objects_by_type(self):
"""
Returns dictionary of (obj_name: ObjState)
for all objects held by players.
"""
player_objects = defaultdict(list)
for player in self.players:
if player.has_object():
player_obj = player.get_object()
player_objects[player_obj.name].append(player_obj)
return player_objects
@property
def all_objects_by_type(self):
"""
Returns dictionary of (obj_name: ObjState)
for all objects in the environment, including
ones held by players.
"""
all_objs_by_type = self.unowned_objects_by_type.copy()
for obj_type, player_objs in self.player_objects_by_type.items():
all_objs_by_type[obj_type].extend(player_objs)
return all_objs_by_type
@property
def all_objects_list(self):
all_objects_lists = list(self.all_objects_by_type.values()) + [[], []]
return reduce(lambda x, y: x + y, all_objects_lists)
@property
def all_orders(self):
return sorted(self._all_orders) if self._all_orders else sorted(Recipe.ALL_RECIPES)
@property
def bonus_orders(self):
return sorted(self._bonus_orders)
def has_object(self, pos):
return pos in self.objects
def get_object(self, pos):
assert self.has_object(pos)
return self.objects[pos]
def add_object(self, obj, pos=None):
if pos is None:
pos = obj.position
assert not self.has_object(pos)
obj.position = pos
self.objects[pos] = obj
def remove_object(self, pos):
assert self.has_object(pos)
obj = self.objects[pos]
del self.objects[pos]
return obj
@classmethod
def from_players_pos_and_or(cls, players_pos_and_or, bonus_orders=[], all_orders=[]):
"""
Make a dummy OvercookedState with no objects based on the passed in player
positions and orientations and order list
"""
return cls(
[PlayerState(*player_pos_and_or) for player_pos_and_or in players_pos_and_or],
objects={}, bonus_orders=bonus_orders, all_orders=all_orders)
@classmethod
def from_player_positions(cls, player_positions, bonus_orders=[], all_orders=[]):
"""
Make a dummy OvercookedState with no objects and with players facing
North based on the passed in player positions and order list
"""
dummy_pos_and_or = [(pos, Direction.NORTH) for pos in player_positions]
return cls.from_players_pos_and_or(dummy_pos_and_or, bonus_orders, all_orders)
def deepcopy(self):
return OvercookedState(
players=[player.deepcopy() for player in self.players],
objects={pos:obj.deepcopy() for pos, obj in self.objects.items()},
bonus_orders=[order.to_dict() for order in self.bonus_orders],
all_orders=[order.to_dict() for order in self.all_orders],
timestep=self.timestep)
def time_independent_equal(self, other):
order_lists_equal = self.all_orders == other.all_orders and self.bonus_orders == other.bonus_orders
return isinstance(other, OvercookedState) and \
self.players == other.players and \
set(self.objects.items()) == set(other.objects.items()) and \
order_lists_equal
def __eq__(self, other):
return self.time_independent_equal(other) and self.timestep == other.timestep
def __hash__(self):
order_list_hash = hash(tuple(self.bonus_orders)) + hash(tuple(self.all_orders))
return hash(
(self.players, tuple(self.objects.values()), order_list_hash)
)
def __str__(self):
return 'Players: {}, Objects: {}'.format(
str(self.players), str(list(self.objects.values())))
def to_dict(self):
return {
"players": [p.to_dict() for p in self.players],
"objects": [obj.to_dict() for obj in self.objects.values()],
"bonus_orders": [order.to_dict() for order in self.bonus_orders],
"all_orders" : [order.to_dict() for order in self.all_orders],
"timestep" : self.timestep
}
@staticmethod
def from_dict(state_dict):
state_dict = copy.deepcopy(state_dict)
state_dict["players"] = [PlayerState.from_dict(p) for p in state_dict["players"]]
object_list = [SoupState.from_dict(o) for o in state_dict["objects"]]
state_dict["objects"] = { ob.position : ob for ob in object_list }
return OvercookedState(**state_dict)
class ReducedOvercookedState(object):
"""A reduced state in OvercookedGridworld. Used to specify only properties RL agents care about."""
def __init__(self, full_overcooked_state):
"""
players (list(PlayerState)): Currently active PlayerStates (index corresponds to number)
objects (dict({tuple:list(ObjectState)})): Dictionary mapping positions (x, y) to ObjectStates.
NOTE: Does NOT include objects held by players (they are in the PlayerState objects).
"""
self.players = full_overcooked_state.players
self.objects = full_overcooked_state.objects
def __eq__(self, other):
return isinstance(other, ReducedOvercookedState) and \
self.players == other.players and \
set(self.objects.items()) == set(other.objects.items())
def __hash__(self):
objects = {k: v for k, v in sorted(self.objects.items(), key=lambda item: item[1])}
return hash(
(self.players, tuple(objects))
)
def __str__(self):
objects = {k: v for k, v in sorted(self.objects.items(), key=lambda item: item[1])}
return 'Players: , Objects: {}'.format(
str(self.players), str(list(objects)))
BASE_REW_SHAPING_PARAMS = {
"PLACEMENT_IN_POT_REW": 3,
"DISH_PICKUP_REWARD": 3,
"SOUP_PICKUP_REWARD": 5,
"DISH_DISP_DISTANCE_REW": 0,
"POT_DISTANCE_REW": 0,
"SOUP_DISTANCE_REW": 0,
"DROP_REW": -1
}
EVENT_TYPES = [
# Tomato events
'tomato_pickup',
'useful_tomato_pickup',
'tomato_drop',
'useful_tomato_drop',
'potting_tomato',
# Onion events
'onion_pickup',
'useful_onion_pickup',
'onion_drop',
'useful_onion_drop',
'potting_onion',
# Dish events
'dish_pickup',
'useful_dish_pickup',
'dish_drop',
'useful_dish_drop',
# Soup events
'soup_pickup',
'soup_delivery',
'soup_drop',
# Potting events
'optimal_onion_potting',
'optimal_tomato_potting',
'viable_onion_potting',
'viable_tomato_potting',
'catastrophic_onion_potting',
'catastrophic_tomato_potting',
'useless_onion_potting',
'useless_tomato_potting'
]
POTENTIAL_CONSTANTS = {
'default' : {
'max_delivery_steps' : 10,
'max_pickup_steps' : 10,
'pot_onion_steps' : 10,
'pot_tomato_steps' : 10
},
'mdp_test_tomato' : {
'max_delivery_steps' : 4,
'max_pickup_steps' : 4,
'pot_onion_steps' : 5,
'pot_tomato_steps' : 6
}
}
# WARNING: Behavior with multiple sparse rewards active at once is undefined.
# Some of the sparse reward settings below have side-effects that must be considered.
DEFAULT_SPARSE_REWARD_OPTS = {
'deliver_soup': 20,
'add_onion_to_pot': 0,
'pickup_onion': 0,
'add_soup_to_plate': 0
}
class OvercookedGridworld(object):
"""
An MDP grid world based off of the Overcooked game.
TODO: clean the organization of this class further.
"""
#########################
# INSTANTIATION METHODS #
#########################
def __init__(self, terrain, start_player_positions, start_bonus_orders=[], sparse_reward_opts=DEFAULT_SPARSE_REWARD_OPTS, rew_shaping_params=None, layout_name="unnamed_layout", start_all_orders=[], num_items_for_soup=3, order_bonus=2, start_state=None, **kwargs):
"""
terrain: a matrix of strings that encode the MDP layout
layout_name: string identifier of the layout
start_player_positions: tuple of positions for both players' starting positions
start_bonus_orders: List of recipes dicts that are worth a bonus
rew_shaping_params: reward given for completion of specific subgoals
all_orders: List of all available order dicts the players can make, defaults to all possible recipes if empy list provided
num_items_for_soup: Maximum number of ingredients that can be placed in a soup
order_bonus: Multiplicative factor for serving a bonus recipe
start_state: Default start state returned by get_standard_start_state
"""
self._configure_recipes(start_all_orders, num_items_for_soup, **kwargs)
self.start_all_orders = [r.to_dict() for r in Recipe.ALL_RECIPES] if not start_all_orders else start_all_orders
self.height = len(terrain)
self.width = len(terrain[0])
self.shape = (self.width, self.height)
self.terrain_mtx = terrain
self.terrain_pos_dict = self._get_terrain_type_pos_dict()
self.start_player_positions = start_player_positions
self.num_players = len(start_player_positions)
self.start_bonus_orders = start_bonus_orders
self.reward_shaping_params = BASE_REW_SHAPING_PARAMS if rew_shaping_params is None else rew_shaping_params
self.layout_name = layout_name
self.order_bonus = order_bonus
self.start_state = start_state
self._opt_recipe_discount_cache = {}
self._opt_recipe_cache = {}
self._prev_potential_params = {}
self.sparse_reward_opts = sparse_reward_opts
@staticmethod
def from_layout_name(layout_name, **params_to_overwrite):
"""
Generates a OvercookedGridworld instance from a layout file.
One can overwrite the default mdp configuration using partial_mdp_config.
"""
params_to_overwrite = params_to_overwrite.copy()
base_layout_params = read_layout_dict(layout_name)
grid = base_layout_params['grid']
del base_layout_params['grid']
base_layout_params['layout_name'] = layout_name
if 'start_state' in base_layout_params:
base_layout_params['start_state'] = OvercookedState.from_dict(base_layout_params['start_state'])
# Clean grid
grid = [layout_row.strip() for layout_row in grid.split("\n")]
return OvercookedGridworld.from_grid(grid, base_layout_params, params_to_overwrite)
@staticmethod
def from_grid(layout_grid, base_layout_params={}, params_to_overwrite={}, debug=False):
"""
Returns instance of OvercookedGridworld with terrain and starting
positions derived from layout_grid.
One can override default configuration parameters of the mdp in
partial_mdp_config.
"""
mdp_config = copy.deepcopy(base_layout_params)
layout_grid = [[c for c in row] for row in layout_grid]
OvercookedGridworld._assert_valid_grid(layout_grid)
if "layout_name" not in mdp_config:
layout_name = "|".join(["".join(line) for line in layout_grid])
mdp_config["layout_name"] = layout_name
player_positions = [None] * 9
for y, row in enumerate(layout_grid):
for x, c in enumerate(row):
if c in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:
layout_grid[y][x] = ' '
# -1 is to account for fact that player indexing starts from 1 rather than 0
assert player_positions[int(c) - 1] is None, 'Duplicate player in grid'
player_positions[int(c) - 1] = (x, y)
num_players = len([x for x in player_positions if x is not None])
player_positions = player_positions[:num_players]
# After removing player positions from grid we have a terrain mtx
mdp_config["terrain"] = layout_grid
mdp_config["start_player_positions"] = player_positions
for k, v in params_to_overwrite.items():
curr_val = mdp_config.get(k, None)
if debug:
print("Overwriting mdp layout standard config value {}:{} -> {}".format(k, curr_val, v))
mdp_config[k] = v
return OvercookedGridworld(**mdp_config)
def _configure_recipes(self, start_all_orders, num_items_for_soup, **kwargs):
self.recipe_config = {
"num_items_for_soup" : num_items_for_soup,
"all_orders" : start_all_orders,
**kwargs
}
Recipe.configure(self.recipe_config)
def set_sparse_rewards(self, sparse_reward_opts):
self.sparse_reward_opts = sparse_reward_opts.copy()
#####################
# BASIC CLASS UTILS #
#####################
def __eq__(self, other):
return np.array_equal(self.terrain_mtx, other.terrain_mtx) and \
self.start_player_positions == other.start_player_positions and \
self.start_bonus_orders == other.start_bonus_orders and \
self.start_all_orders == other.start_all_orders and \
self.reward_shaping_params == other.reward_shaping_params and \
self.layout_name == other.layout_name
def copy(self):
return OvercookedGridworld(
terrain=self.terrain_mtx.copy(),
start_player_positions=self.start_player_positions,
start_bonus_orders=self.start_bonus_orders,
rew_shaping_params=copy.deepcopy(self.reward_shaping_params),
layout_name=self.layout_name,
start_all_orders=self.start_all_orders
)
@property
def mdp_params(self):
return {
"layout_name": self.layout_name,
"terrain": self.terrain_mtx,
"start_player_positions": self.start_player_positions,
"start_bonus_orders": self.start_bonus_orders,
"rew_shaping_params": copy.deepcopy(self.reward_shaping_params),
"start_all_orders" : self.start_all_orders
}
##############
# GAME LOGIC #
##############
def get_actions(self, state):
"""
Returns the list of lists of valid actions for 'state'.
The ith element of the list is the list of valid actions that player i
can take.
"""
self._check_valid_state(state)
return [self._get_player_actions(state, i) for i in range(len(state.players))]
def _get_player_actions(self, state, player_num):
"""All actions are allowed to all players in all states."""
return Action.ALL_ACTIONS
def _check_action(self, state, joint_action):
for p_action, p_legal_actions in zip(joint_action, self.get_actions(state)):
if p_action not in p_legal_actions:
raise ValueError('Invalid action')
def get_standard_start_state(self):
if self.start_state:
return self.start_state
start_state = OvercookedState.from_player_positions(
self.start_player_positions, bonus_orders=self.start_bonus_orders, all_orders=self.start_all_orders
)
return start_state
def get_random_start_state_fn(self, random_start_pos=False, rnd_obj_prob_thresh=0.0):
def start_state_fn():
if random_start_pos:
valid_positions = self.get_valid_joint_player_positions()
start_pos = valid_positions[np.random.choice(len(valid_positions))]
else:
start_pos = self.start_player_positions
start_state = OvercookedState.from_player_positions(start_pos, bonus_orders=self.start_bonus_orders, all_orders=self.start_all_orders)
if rnd_obj_prob_thresh == 0:
return start_state
# Arbitrary hard-coding for randomization of objects
# For each pot, add a random amount of onions and tomatoes with prob rnd_obj_prob_thresh
# Begin the soup cooking with probability rnd_obj_prob_thresh
pots = self.get_pot_states(start_state)["empty"]
for pot_loc in pots:
p = np.random.rand()
if p < rnd_obj_prob_thresh:
n = int(np.random.randint(low=1, high=4))
m = int(np.random.randint(low=0, high=4-n))
q = np.random.rand()
cooking_tick = 0 if q < rnd_obj_prob_thresh else -1
start_state.objects[pot_loc] = SoupState.get_soup(pot_loc, num_onions=n, num_tomatoes=m, cooking_tick=cooking_tick, cook_time=0)
# For each player, add a random object with prob rnd_obj_prob_thresh
for player in start_state.players:
p = np.random.rand()
if p < rnd_obj_prob_thresh:
# Different objects have different probabilities
obj = np.random.choice(["dish", "onion", "soup"], p=[0.2, 0.6, 0.2])
n = int(np.random.randint(low=1, high=4))
m = int(np.random.randint(low=0, high=4-n))
if obj == "soup":
player.set_object(
SoupState.get_soup(player.position, num_onions=n, num_tomatoes=m, finished=True, cook_time=0)
)
else:
player.set_object(ObjectState(obj, player.position))
return start_state
return start_state_fn
def is_terminal(self, state):
# There is a finite horizon, handled by the environment.
return False
def get_state_transition(self, state, joint_action, display_phi=False, motion_planner=None):
"""Gets information about possible transitions for the action.
Returns the next state, sparse reward and reward shaping.
Assumes all actions are deterministic.
NOTE: Sparse reward is given only when soups are delivered,
shaped reward is given only for completion of subgoals
(not soup deliveries).
"""
events_infos = { event : [False] * self.num_players for event in EVENT_TYPES }
assert not self.is_terminal(state), "Trying to find successor of a terminal state: {}".format(state)
for action, action_set in zip(joint_action, self.get_actions(state)):
if action not in action_set:
raise ValueError("Illegal action %s in state %s" % (action, state))
new_state = state.deepcopy()
# Resolve interacts first
sparse_reward_by_agent, shaped_reward_by_agent = self.resolve_interacts(new_state, joint_action, events_infos)
drop_reward = self.resolve_drops(new_state, joint_action, events_infos)
for i in range(self.num_players):
sparse_reward_by_agent[i] += drop_reward[i]
assert new_state.player_positions == state.player_positions
assert new_state.player_orientations == state.player_orientations
# Resolve player movements
self.resolve_movement(new_state, joint_action)
# Finally, environment effects
self.step_environment_effects(new_state)
# Additional dense reward logic
# shaped_reward += self.calculate_distance_based_shaped_reward(state, new_state)
infos = {
"event_infos": events_infos,
"sparse_reward_by_agent": sparse_reward_by_agent,
"shaped_reward_by_agent": shaped_reward_by_agent,
}
if display_phi:
assert motion_planner is not None, "motion planner must be defined if display_phi is true"
infos["phi_s"] = self.potential_function(state, motion_planner)
infos["phi_s_prime"] = self.potential_function(new_state, motion_planner)
return new_state, infos
def resolve_drops(self, new_state, joint_action, event_infos):
"""
Resolve drops if present.
"""
drop_reward= [0] * self.num_players
for player_idx, (player, action) in enumerate(zip(new_state.players, joint_action)):
if action != 'drop':
continue
if player.has_object():
player.remove_object()
drop_reward[player_idx] += self.reward_shaping_params["DROP_REW"]
return drop_reward
def resolve_interacts(self, new_state, joint_action, events_infos):
"""
Resolve any INTERACT actions, if present.
Currently if two players both interact with a terrain, we resolve player 1's interact
first and then player 2's, without doing anything like collision checking.
"""
pot_states = self.get_pot_states(new_state)
# We divide reward by agent to keep track of who contributed
sparse_reward, shaped_reward = [0] * self.num_players, [0] * self.num_players
for player_idx, (player, action) in enumerate(zip(new_state.players, joint_action)):
if action != Action.INTERACT:
continue
pos, o = player.position, player.orientation
i_pos = Action.move_in_direction(pos, o)
terrain_type = self.get_terrain_type_at_pos(i_pos)
# NOTE: we always log pickup/drop before performing it, as that's
# what the logic of determining whether the pickup/drop is useful assumes
if terrain_type == 'X':
if player.has_object() and not new_state.has_object(i_pos):
obj_name = player.get_object().name
self.log_object_drop(events_infos, new_state, obj_name, pot_states, player_idx)
# Drop object on counter
obj = player.remove_object()
new_state.add_object(obj, i_pos)
elif not player.has_object() and new_state.has_object(i_pos):
obj_name = new_state.get_object(i_pos).name
self.log_object_pickup(events_infos, new_state, obj_name, pot_states, player_idx)
# Pick up object from counter
obj = new_state.remove_object(i_pos)
player.set_object(obj)
elif terrain_type == 'O' and player.held_object is None:
self.log_object_pickup(events_infos, new_state, "onion", pot_states, player_idx)
# Onion pickup from dispenser
obj = ObjectState('onion', pos)
onion_pickup_reward = self.sparse_reward_opts["pickup_onion"]
if onion_pickup_reward > 0:
sparse_reward[player_idx] += onion_pickup_reward
#else:
player.set_object(obj) # actually pickup the onion
elif terrain_type == 'T' and player.held_object is None:
# Tomato pickup from dispenser
player.set_object(ObjectState('tomato', pos))
elif terrain_type == 'D' and player.held_object is None:
self.log_object_pickup(events_infos, new_state, "dish", pot_states, player_idx)
# Give shaped reward if pickup is useful
if self.is_dish_pickup_useful(new_state, pot_states):
shaped_reward[player_idx] += self.reward_shaping_params["DISH_PICKUP_REWARD"]
# Perform dish pickup from dispenser
obj = ObjectState('dish', pos)
player.set_object(obj)
elif terrain_type == 'P' and not player.has_object():
# Cooking soup
if self.soup_to_be_cooked_at_location(new_state, i_pos):
soup = new_state.get_object(i_pos)
soup.begin_cooking()
elif terrain_type == 'P' and player.has_object():
if player.get_object().name == 'dish' and self.soup_ready_at_location(new_state, i_pos):
self.log_object_pickup(events_infos, new_state, "soup", pot_states, player_idx)
# Pick up soup
player.remove_object() # Remove the dish
obj = new_state.remove_object(i_pos) # Get soup
player.set_object(obj)
shaped_reward[player_idx] += self.reward_shaping_params["SOUP_PICKUP_REWARD"]
sparse_reward[player_idx] += self.sparse_reward_opts["add_soup_to_plate"]
elif player.get_object().name in Recipe.ALL_INGREDIENTS:
# Adding ingredient to soup
if not new_state.has_object(i_pos):
# Pot was empty, add soup to it
new_state.add_object(SoupState(i_pos, ingredients=[], cook_time=0))
# Add ingredient if possible
soup = new_state.get_object(i_pos)
if not soup.is_full:
old_soup = soup.deepcopy()
obj = player.remove_object()
soup.add_ingredient(obj)
shaped_reward[player_idx] += self.reward_shaping_params["PLACEMENT_IN_POT_REW"]
sparse_reward[player_idx] += self.sparse_reward_opts["add_onion_to_pot"]
# Log potting
self.log_object_potting(events_infos, new_state, old_soup, soup, obj.name, player_idx)
if obj.name == Recipe.ONION:
events_infos['potting_onion'][player_idx] = True
#if self.sparse_reward_opts["add_onion_to_pot"] > 0:
# new_state.remove_object(i_pos)
elif terrain_type == 'S' and player.has_object():
obj = player.get_object()
if obj.name == 'soup':
delivery_rew = self.deliver_soup(new_state, player, obj)
sparse_reward[player_idx] += self.sparse_reward_opts["deliver_soup"]
# Log soup delivery
events_infos['soup_delivery'][player_idx] = True
return sparse_reward, shaped_reward
def get_recipe_value(self, state, recipe, discounted=False, base_recipe=None, potential_params={}):
"""
Return the reward the player should receive for delivering this recipe
The player receives 0 if recipe not in all_orders, receives base value * order_bonus
if recipe is in bonus orders, and receives base value otherwise
"""
if not discounted:
if not recipe in state.all_orders:
return 0
if not recipe in state.bonus_orders:
return recipe.value
return self.order_bonus * recipe.value
else:
# Calculate missing ingredients needed to complete recipe
missing_ingredients = list(recipe.ingredients)
prev_ingredients = list(base_recipe.ingredients) if base_recipe else []
for ingredient in prev_ingredients:
missing_ingredients.remove(ingredient)
n_tomatoes = len([i for i in missing_ingredients if i == Recipe.TOMATO])
n_onions = len([i for i in missing_ingredients if i == Recipe.ONION])
gamma, pot_onion_steps, pot_tomato_steps = potential_params['gamma'], potential_params['pot_onion_steps'], potential_params['pot_tomato_steps']
return gamma**recipe.time * gamma**(pot_onion_steps * n_onions) * gamma**(pot_tomato_steps * n_tomatoes) * self.get_recipe_value(state, recipe, discounted=False)
def deliver_soup(self, state, player, soup):
"""
Deliver the soup, and get reward if there is no order list
or if the type of the delivered soup matches the next order.
"""
assert soup.name == 'soup', "Tried to deliver something that wasn't soup"
assert soup.is_ready, "Tried to deliever soup that isn't ready"
player.remove_object()
return self.get_recipe_value(state, soup.recipe)
def resolve_movement(self, state, joint_action):
"""Resolve player movement and deal with possible collisions"""
new_positions, new_orientations = self.compute_new_positions_and_orientations(state.players, joint_action)
for player_state, new_pos, new_o in zip(state.players, new_positions, new_orientations):
player_state.update_pos_and_or(new_pos, new_o)
def compute_new_positions_and_orientations(self, old_player_states, joint_action):
"""Compute new positions and orientations ignoring collisions"""
new_positions, new_orientations = list(zip(*[
self._move_if_direction(p.position, p.orientation, a) \
for p, a in zip(old_player_states, joint_action)]))
old_positions = tuple(p.position for p in old_player_states)
new_positions = self._handle_collisions(old_positions, new_positions)
return new_positions, new_orientations
def is_transition_collision(self, old_positions, new_positions):
# Checking for any players ending in same square
if self.is_joint_position_collision(new_positions):
return True
# Check if any two players crossed paths
for idx0, idx1 in itertools.combinations(range(self.num_players), 2):
p1_old, p2_old = old_positions[idx0], old_positions[idx1]
p1_new, p2_new = new_positions[idx0], new_positions[idx1]
if p1_new == p2_old and p1_old == p2_new:
return True
return False
def is_joint_position_collision(self, joint_position):
return any(pos0 == pos1 for pos0, pos1 in itertools.combinations(joint_position, 2))
def step_environment_effects(self, state):
state.timestep += 1
for obj in state.objects.values():
if obj.name == 'soup' and obj.is_cooking:
obj.cook()
def _handle_collisions(self, old_positions, new_positions):
"""If agents collide, they stay at their old locations"""
if self.is_transition_collision(old_positions, new_positions):
return old_positions
return new_positions
def _get_terrain_type_pos_dict(self):
pos_dict = defaultdict(list)
for y, terrain_row in enumerate(self.terrain_mtx):
for x, terrain_type in enumerate(terrain_row):
pos_dict[terrain_type].append((x, y))
return pos_dict
def _move_if_direction(self, position, orientation, action):
"""Returns position and orientation that would
be obtained after executing action"""
if action not in Action.MOTION_ACTIONS:
return position, orientation
new_pos = Action.move_in_direction(position, action)
new_orientation = orientation if action == Action.STAY else action
if new_pos not in self.get_valid_player_positions():
return position, new_orientation
return new_pos, new_orientation
#######################
# LAYOUT / STATE INFO #
#######################
def get_valid_player_positions(self):
return self.terrain_pos_dict[' ']
def get_valid_joint_player_positions(self):
"""Returns all valid tuples of the form (p0_pos, p1_pos, p2_pos, ...)"""
valid_positions = self.get_valid_player_positions()
all_joint_positions = list(itertools.product(valid_positions, repeat=self.num_players))
valid_joint_positions = [j_pos for j_pos in all_joint_positions if not self.is_joint_position_collision(j_pos)]
return valid_joint_positions
def get_valid_player_positions_and_orientations(self):
valid_states = []
for pos in self.get_valid_player_positions():
valid_states.extend([(pos, d) for d in Direction.ALL_DIRECTIONS])
return valid_states
def get_valid_joint_player_positions_and_orientations(self):
"""All joint player position and orientation pairs that are not
overlapping and on empty terrain."""
valid_player_states = self.get_valid_player_positions_and_orientations()
valid_joint_player_states = []
for players_pos_and_orientations in itertools.product(valid_player_states, repeat=self.num_players):
joint_position = [plyer_pos_and_or[0] for plyer_pos_and_or in players_pos_and_orientations]
if not self.is_joint_position_collision(joint_position):
valid_joint_player_states.append(players_pos_and_orientations)
return valid_joint_player_states
def get_adjacent_features(self, player):
adj_feats = []
pos = player.position
for d in Direction.ALL_DIRECTIONS:
adj_pos = Action.move_in_direction(pos, d)
adj_feats.append((adj_pos, self.get_terrain_type_at_pos(adj_pos)))
return adj_feats
def get_terrain_type_at_pos(self, pos):
x, y = pos
return self.terrain_mtx[y][x]
def get_dish_dispenser_locations(self):
return list(self.terrain_pos_dict['D'])
def get_onion_dispenser_locations(self):
return list(self.terrain_pos_dict['O'])
def get_tomato_dispenser_locations(self):
return list(self.terrain_pos_dict['T'])
def get_serving_locations(self):
return list(self.terrain_pos_dict['S'])
def get_pot_locations(self):
return list(self.terrain_pos_dict['P'])
def get_counter_locations(self):
return list(self.terrain_pos_dict['X'])
@property
def num_pots(self):
return len(self.get_pot_locations())
def get_pot_states(self, state):
"""Returns dict with structure:
{
empty: [positions of empty pots]
'x_items': [soup objects with x items that have yet to start cooking],
'cooking': [soup objs that are cooking but not ready]
'ready': [ready soup objs],
}
NOTE: all returned pots are just pot positions
"""
pots_states_dict = defaultdict(list)
for pot_pos in self.get_pot_locations():
if not state.has_object(pot_pos):
pots_states_dict['empty'].append(pot_pos)
else:
soup = state.get_object(pot_pos)
assert soup.name == 'soup', "soup at " + pot_pos + " is not a soup but a " + soup.name
if soup.is_ready:
pots_states_dict['ready'].append(pot_pos)
elif soup.is_cooking:
pots_states_dict['cooking'].append(pot_pos)
else:
num_ingredients = len(soup.ingredients)
pots_states_dict['{}_items'.format(num_ingredients)].append(pot_pos)
return pots_states_dict
def get_counter_objects_dict(self, state, counter_subset=None):
"""Returns a dictionary of pos:objects on counters by type"""
counters_considered = self.terrain_pos_dict['X'] if counter_subset is None else counter_subset
counter_objects_dict = defaultdict(list)
for obj in state.objects.values():
if obj.position in counters_considered:
counter_objects_dict[obj.name].append(obj.position)
return counter_objects_dict
def get_empty_counter_locations(self, state):
counter_locations = self.get_counter_locations()
return [pos for pos in counter_locations if not state.has_object(pos)]
def get_empty_pots(self, pot_states):
"""Returns pots that have 0 items in them"""
return pot_states["empty"]
def get_non_empty_pots(self, pot_states):
return self.get_full_pots(pot_states) + self.get_partially_full_pots(pot_states)
def get_ready_pots(self, pot_states):
return pot_states['ready']
def get_cooking_pots(self, pot_states):
return pot_states['cooking']
def get_full_but_not_cooking_pots(self, pot_states):
return pot_states['{}_items'.format(Recipe.MAX_NUM_INGREDIENTS)]
def get_full_pots(self, pot_states):
return self.get_cooking_pots(pot_states) + self.get_ready_pots(pot_states) + self.get_full_but_not_cooking_pots(pot_states)
def get_partially_full_pots(self, pot_states):
return list(set().union(*[pot_states['{}_items'.format(i)] for i in range(1, Recipe.MAX_NUM_INGREDIENTS)]))
def soup_ready_at_location(self, state, pos):
if not state.has_object(pos):
return False
obj = state.get_object(pos)
assert obj.name == 'soup', 'Object in pot was not soup'
return obj.is_ready
def soup_to_be_cooked_at_location(self, state, pos):
if not state.has_object(pos):
return False
obj = state.get_object(pos)
return obj.name == 'soup' and not obj.is_cooking and not obj.is_ready and len(obj.ingredients) > 0
def _check_valid_state(self, state):
"""Checks that the state is valid.
Conditions checked:
- Players are on free spaces, not terrain
- Held objects have the same position as the player holding them
- Non-held objects are on terrain
- No two players or non-held objects occupy the same position
- Objects have a valid state (eg. no pot with 4 onions)
"""
all_objects = list(state.objects.values())
for player_state in state.players:
# Check that players are not on terrain
pos = player_state.position
assert pos in self.get_valid_player_positions()
# Check that held objects have the same position
if player_state.held_object is not None:
all_objects.append(player_state.held_object)
assert player_state.held_object.position == player_state.position
for obj_pos, obj_state in state.objects.items():
# Check that the hash key position agrees with the position stored
# in the object state
assert obj_state.position == obj_pos
# Check that non-held objects are on terrain
assert self.get_terrain_type_at_pos(obj_pos) != ' '
# Check that players and non-held objects don't overlap
all_pos = [player_state.position for player_state in state.players]
all_pos += [obj_state.position for obj_state in state.objects.values()]
assert len(all_pos) == len(set(all_pos)), "Overlapping players or objects"
# Check that objects have a valid state
for obj_state in all_objects:
assert obj_state.is_valid()
def find_free_counters_valid_for_both_players(self, state, mlam):
"""Finds all empty counter locations that are accessible to both players"""
one_player, other_player = state.players
free_counters = self.get_empty_counter_locations(state)
free_counters_valid_for_both = []
for free_counter in free_counters:
goals = mlam.motion_planner.motion_goals_for_pos[free_counter]
if any([mlam.motion_planner.is_valid_motion_start_goal_pair(one_player.pos_and_or, goal) for goal in goals]) and \
any([mlam.motion_planner.is_valid_motion_start_goal_pair(other_player.pos_and_or, goal) for goal in goals]):
free_counters_valid_for_both.append(free_counter)
return free_counters_valid_for_both
def _get_optimal_possible_recipe(self, state, recipe, discounted, potential_params, return_value):
"""
Traverse the recipe-space graph using DFS to find the best possible recipe that can be made
from the current recipe
Because we can't have empty recipes, we handle the case by letting recipe==None be a stand-in for empty recipe
"""
start_recipe = recipe
visited = set()
stack = []
best_recipe = recipe
best_value = 0
if not recipe:
for ingredient in Recipe.ALL_INGREDIENTS:
stack.append(Recipe([ingredient]))
else:
stack.append(recipe)
while stack:
curr_recipe = stack.pop()
if curr_recipe not in visited:
visited.add(curr_recipe)
curr_value = self.get_recipe_value(state, curr_recipe, base_recipe=start_recipe, discounted=discounted, potential_params=potential_params)
if curr_value > best_value:
best_value, best_recipe = curr_value, curr_recipe
for neighbor in curr_recipe.neighbors():
if not neighbor in visited:
stack.append(neighbor)
if return_value:
return best_recipe, best_value
return best_recipe
def get_optimal_possible_recipe(self, state, recipe, discounted=False, potential_params={}, return_value=False):
"""
Return the best possible recipe that can be made starting with ingredients in `recipe`
Uses self._optimal_possible_recipe as a cache to avoid re-computing. This only works because
the recipe values are currently static (i.e. bonus_orders doesn't change). Would need to have cache
flushed if order dynamics are introduced
"""
cache_valid = not discounted or self._prev_potential_params == potential_params
if not cache_valid:
if discounted:
self._opt_recipe_discount_cache = {}
else:
self._opt_recipe_cache = {}
if discounted:
cache = self._opt_recipe_discount_cache
self._prev_potential_params = potential_params
else:
cache = self._opt_recipe_cache
if recipe not in cache:
# Compute best recipe now and store in cache for later use
opt_recipe, value = self._get_optimal_possible_recipe(state, recipe, discounted=discounted, potential_params=potential_params, return_value=True)
cache[recipe] = (opt_recipe, value)
# Return best recipe (and value) from cache
if return_value:
return cache[recipe]
return cache[recipe][0]
@staticmethod
def _assert_valid_grid(grid):
"""Raises an AssertionError if the grid is invalid.
grid: A sequence of sequences of spaces, representing a grid of a
certain height and width. grid[y][x] is the space at row y and column
x. A space must be either 'X' (representing a counter), ' ' (an empty
space), 'O' (onion supply), 'P' (pot), 'D' (dish supply), 'S' (serving
location), '1' (player 1) and '2' (player 2).
"""
height = len(grid)
width = len(grid[0])
# Make sure the grid is not ragged
assert all(len(row) == width for row in grid), 'Ragged grid'
# Borders must not be free spaces
def is_not_free(c):
return c in 'XOPDST'
for y in range(height):
assert is_not_free(grid[y][0]), 'Left border must not be free'
assert is_not_free(grid[y][-1]), 'Right border must not be free'
for x in range(width):
assert is_not_free(grid[0][x]), 'Top border must not be free'
assert is_not_free(grid[-1][x]), 'Bottom border must not be free'
all_elements = [element for row in grid for element in row]
digits = ['1', '2', '3', '4', '5', '6', '7', '8', '9']
layout_digits = [e for e in all_elements if e in digits]
num_players = len(layout_digits)
assert num_players > 0, "No players (digits) in grid"
layout_digits = list(sorted(map(int, layout_digits)))
assert layout_digits == list(range(1, num_players + 1)), "Some players were missing"
assert all(c in 'XOPDST123456789 ' for c in all_elements), 'Invalid character in grid'
assert all_elements.count('1') == 1, "'1' must be present exactly once"
assert all_elements.count('D') >= 1, "'D' must be present at least once"
assert all_elements.count('S') >= 1, "'S' must be present at least once"
assert all_elements.count('P') >= 1, "'P' must be present at least once"
assert all_elements.count('O') >= 1 or all_elements.count('T') >= 1, "'O' or 'T' must be present at least once"
################################
# EVENT LOGGING HELPER METHODS #
################################
def log_object_potting(self, events_infos, state, old_soup, new_soup, obj_name, player_index):
"""Player added an ingredient to a pot"""
obj_pickup_key = "potting_" + obj_name
if obj_pickup_key not in events_infos:
raise ValueError("Unknown event {}".format(obj_pickup_key))
events_infos[obj_pickup_key][player_index] = True
POTTING_FNS = {
"optimal" : self.is_potting_optimal,
"catastrophic" : self.is_potting_catastrophic,
"viable" : self.is_potting_viable,
"useless" : self.is_potting_useless
}
#for outcome, outcome_fn in POTTING_FNS.items():
# if outcome_fn(state, old_soup, new_soup):
# potting_key = "{}_{}_potting".format(outcome, obj_name)
# events_infos[potting_key][player_index] = True
def log_object_pickup(self, events_infos, state, obj_name, pot_states, player_index):
"""Player picked an object up from a counter or a dispenser"""
obj_pickup_key = obj_name + "_pickup"
if obj_pickup_key not in events_infos:
raise ValueError("Unknown event {}".format(obj_pickup_key))
events_infos[obj_pickup_key][player_index] = True
USEFUL_PICKUP_FNS = {
"tomato" : self.is_ingredient_pickup_useful,
"onion": self.is_ingredient_pickup_useful,
"dish": self.is_dish_pickup_useful
}
#if obj_name in USEFUL_PICKUP_FNS:
# if USEFUL_PICKUP_FNS[obj_name](state, pot_states, player_index):
# obj_useful_key = "useful_" + obj_name + "_pickup"
# events_infos[obj_useful_key][player_index] = True
def log_object_drop(self, events_infos, state, obj_name, pot_states, player_index):
"""Player dropped the object on a counter"""
obj_drop_key = obj_name + "_drop"
if obj_drop_key not in events_infos:
raise ValueError("Unknown event {}".format(obj_drop_key))
events_infos[obj_drop_key][player_index] = True
USEFUL_DROP_FNS = {
"tomato" : self.is_ingredient_drop_useful,
"onion": self.is_ingredient_drop_useful,
"dish": self.is_dish_drop_useful
}
#if obj_name in USEFUL_DROP_FNS:
# if USEFUL_DROP_FNS[obj_name](state, pot_states, player_index):
# obj_useful_key = "useful_" + obj_name + "_drop"
# events_infos[obj_useful_key][player_index] = True
def is_dish_pickup_useful(self, state, pot_states, player_index=None):
"""
NOTE: this only works if self.num_players == 2
Useful if:
- Pot is ready/cooking and there is no player with a dish \
- 2 pots are ready/cooking and there is one player with a dish | -> number of dishes in players hands < number of ready/cooking/partially full soups
- Partially full pot is ok if the other player is on course to fill it /
We also want to prevent picking up and dropping dishes, so add the condition
that there must be no dishes on counters
"""
if self.num_players != 2: return False
# This next line is to prevent reward hacking (this logic is also used by reward shaping)
dishes_on_counters = self.get_counter_objects_dict(state)["dish"]
no_dishes_on_counters = len(dishes_on_counters) == 0
num_player_dishes = len(state.player_objects_by_type['dish'])
non_empty_pots = len(self.get_ready_pots(pot_states) + self.get_cooking_pots(pot_states) + self.get_partially_full_pots(pot_states))
return no_dishes_on_counters and num_player_dishes < non_empty_pots
def is_dish_drop_useful(self, state, pot_states, player_index):
"""
NOTE: this only works if self.num_players == 2
Useful if:
- Onion is needed (all pots are non-full)
- Nobody is holding onions
"""
if self.num_players != 2: return False
all_non_full = len(self.get_full_pots(pot_states)) == 0
other_player = state.players[1 - player_index]
other_player_holding_onion = other_player.has_object() and other_player.get_object().name == "onion"
return all_non_full and not other_player_holding_onion
def is_ingredient_pickup_useful(self, state, pot_states, player_index):
"""
NOTE: this only works if self.num_players == 2
Always useful unless:
- All pots are full & other agent is not holding a dish
"""
if self.num_players != 2: return False
all_pots_full = self.num_pots == len(self.get_full_pots(pot_states))
other_player = state.players[1 - player_index]
other_player_has_dish = other_player.has_object() and other_player.get_object().name == "dish"
return not (all_pots_full and not other_player_has_dish)
def is_ingredient_drop_useful(self, state, pot_states, player_index):
"""
NOTE: this only works if self.num_players == 2
Useful if:
- Dish is needed (all pots are full)
- Nobody is holding a dish
"""
if self.num_players != 2: return False
all_pots_full = len(self.get_full_pots(pot_states)) == self.num_pots
other_player = state.players[1 - player_index]
other_player_holding_dish = other_player.has_object() and other_player.get_object().name == "dish"
return all_pots_full and not other_player_holding_dish
def is_potting_optimal(self, state, old_soup, new_soup):
"""
True if the highest valued soup possible is the same before and after the potting
"""
old_recipe = Recipe(old_soup.ingredients) if old_soup.ingredients else None
new_recipe = Recipe(new_soup.ingredients)
old_val = self.get_recipe_value(state, self.get_optimal_possible_recipe(state, old_recipe))
new_val = self.get_recipe_value(state, self.get_optimal_possible_recipe(state, new_recipe))
return old_val == new_val
def is_potting_viable(self, state, old_soup, new_soup):
"""
True if there exists a non-zero reward soup possible from new ingredients
"""
new_recipe = Recipe(new_soup.ingredients)
new_val = self.get_recipe_value(state, self.get_optimal_possible_recipe(state, new_recipe))
return new_val > 0
def is_potting_catastrophic(self, state, old_soup, new_soup):
"""
True if no non-zero reward soup is possible from new ingredients
"""
old_recipe = Recipe(old_soup.ingredients) if old_soup.ingredients else None
new_recipe = Recipe(new_soup.ingredients)
old_val = self.get_recipe_value(state, self.get_optimal_possible_recipe(state, old_recipe))
new_val = self.get_recipe_value(state, self.get_optimal_possible_recipe(state, new_recipe))
return old_val > 0 and new_val == 0
def is_potting_useless(self, state, old_soup, new_soup):
"""
True if ingredient added to a soup that was already gauranteed to be worth at most 0 points
"""
old_recipe = Recipe(old_soup.ingredients) if old_soup.ingredients else None
old_val = self.get_recipe_value(state, self.get_optimal_possible_recipe(state, old_recipe))
return old_val == 0
#####################
# TERMINAL GRAPHICS #
#####################
def flatten_state(self, state):
"""Simplified state depiction """
state_string = self.state_string(state)
flattened = np.fromstring(state_string, np.int8)
return np.array(flattened), np.array(flattened)
def condensed_state(self, state):
players_dict = {player.position: player for player in state.players}
state_arr = []
for y, terrain_row in enumerate(self.terrain_mtx):
for x, element in enumerate(terrain_row):
item = None
if (x, y) in players_dict.keys():
player = players_dict[(x, y)]
orientation = player.orientation
assert orientation in Direction.ALL_DIRECTIONS
player_idx_lst = [i for i, p in enumerate(state.players) if p.position == player.position]
assert len(player_idx_lst) == 1
player_ind = player_idx_lst[0]
orien_ind = Action.ACTION_TO_INDEX[orientation]
player_object = player.held_object
held_ind = 1
if player_object:
if player_object.name[0] == "s":
held_ind = 2
else:
held_ind = 3
item = (10*player_ind) + (5*held_ind) + (orien_ind)
else:
if element == "X" and state.has_object((x,y)):
item = 30
elif element == "X":
item = 40
elif element == "D":
item = 50
elif element == "O":
item = 60
elif element == "P":
item = 70
elif element == "S":
item = 80
elif element == " ":
item = 90
state_arr.append(item)
return np.array(state_arr), np.array(state_arr)
def state_string(self, state):
"""String representation of the current state"""
players_dict = {player.position: player for player in state.players}
grid_string = ""
for y, terrain_row in enumerate(self.terrain_mtx):
for x, element in enumerate(terrain_row):
grid_string_add = ""
if (x, y) in players_dict.keys():
player = players_dict[(x, y)]
orientation = player.orientation
assert orientation in Direction.ALL_DIRECTIONS
player_idx_lst = [i for i, p in enumerate(state.players) if p.position == player.position]
assert len(player_idx_lst) == 1
grid_string_add += Action.ACTION_TO_CHAR[orientation]
player_object = player.held_object
player_obj_str = ""
if player_object:
player_obj_str += str(player_idx_lst[0])
if player_object.name[0] == "s":
# this is a soup
player_obj_str += str(player_object)
else:
player_obj_str += player_object.name[:1]
else:
player_obj_str += str(player_idx_lst[0])
player_obj_str = player_obj_str.zfill(15)
grid_string_add += player_obj_str
else:
#print(grid_string_add)
grid_string_add += element
if element == "X" and state.has_object((x, y)):
state_obj = state.get_object((x, y))
if state_obj.name[0] == "s":
grid_string_add += str(len(str(state_obj)))
else:
grid_string_add += state_obj.name[:1]
elif element == "P":
if state.has_object((x, y)):
soup = state.get_object((x, y))
#print(soup.soup_str())
#print(len(soup.soup_str()))
str_len = len(str(soup))
grid_string_add += soup.soup_str()
else:
grid_string_add += " "
# display soup
grid_string += grid_string_add
grid_string += "".join([" "] * (7 - len(grid_string_add)))
grid_string += " "
grid_string += "\n\n"
if state.bonus_orders:
grid_string += "Bonus orders: {}\n".format(
state.bonus_orders
)
#print(grid_string)
#print(len(grid_string))
# grid_string += "State potential value: {}\n".format(self.potential_function(state))
return grid_string
###################
# STATE ENCODINGS #
###################
@property
def lossless_state_encoding_shape(self):
return np.array(list(self.shape) + [26])
def lossless_state_encoding(self, overcooked_state, horizon=400, debug=False):
"""Featurizes a OvercookedState object into a stack of boolean masks that are easily readable by a CNN"""
assert self.num_players == 2, "Functionality has to be added to support encondings for > 2 players"
assert type(debug) is bool
base_map_features = ["pot_loc", "counter_loc", "onion_disp_loc", "tomato_disp_loc",
"dish_disp_loc", "serve_loc"]
variable_map_features = ["onions_in_pot", "tomatoes_in_pot", "onions_in_soup", "tomatoes_in_soup",
"soup_cook_time_remaining", "soup_done", "dishes", "onions", "tomatoes"]
urgency_features = ["urgency"]
all_objects = overcooked_state.all_objects_list
def make_layer(position, value):
layer = np.zeros(self.shape)
layer[position] = value
return layer
def process_for_player(primary_agent_idx):
# Ensure that primary_agent_idx layers are ordered before other_agent_idx layers
other_agent_idx = 1 - primary_agent_idx
ordered_player_features = ["player_{}_loc".format(primary_agent_idx), "player_{}_loc".format(other_agent_idx)] + \
["player_{}_orientation_{}".format(i, Direction.DIRECTION_TO_INDEX[d])
for i, d in itertools.product([primary_agent_idx, other_agent_idx], Direction.ALL_DIRECTIONS)]
# LAYERS = ordered_player_features + base_map_features + variable_map_features
LAYERS = ordered_player_features + base_map_features + variable_map_features + urgency_features
state_mask_dict = {k:np.zeros(self.shape) for k in LAYERS}
# MAP LAYERS
if horizon - overcooked_state.timestep < 40:
state_mask_dict["urgency"] = np.ones(self.shape)
for loc in self.get_counter_locations():
state_mask_dict["counter_loc"][loc] = 1
for loc in self.get_pot_locations():
state_mask_dict["pot_loc"][loc] = 1
for loc in self.get_onion_dispenser_locations():
state_mask_dict["onion_disp_loc"][loc] = 1
for loc in self.get_tomato_dispenser_locations():
state_mask_dict["tomato_disp_loc"][loc] = 1
for loc in self.get_dish_dispenser_locations():
state_mask_dict["dish_disp_loc"][loc] = 1
for loc in self.get_serving_locations():
state_mask_dict["serve_loc"][loc] = 1
# PLAYER LAYERS
for i, player in enumerate(overcooked_state.players):
player_orientation_idx = Direction.DIRECTION_TO_INDEX[player.orientation]
state_mask_dict["player_{}_loc".format(i)] = make_layer(player.position, 1)
state_mask_dict["player_{}_orientation_{}".format(i, player_orientation_idx)] = make_layer(player.position, 1)
# OBJECT & STATE LAYERS
for obj in all_objects:
if obj.name == "soup":
# removed the next line because onion doesn't have to be in all the soups?
# if Recipe.ONION in obj.ingredients:
# get the ingredients into a {object: number} dictionary
ingredients_dict = Counter(obj.ingredients)
# assert "onion" in ingredients_dict.keys()
if obj.position in self.get_pot_locations():
if obj.is_idle:
# onions_in_pot and tomatoes_in_pot are used when the soup is idling, and ingredients could still be added
state_mask_dict["onions_in_pot"] += make_layer(obj.position, ingredients_dict["onion"])
state_mask_dict["tomatoes_in_pot"] += make_layer(obj.position, ingredients_dict["tomato"])
else:
state_mask_dict["onions_in_soup"] += make_layer(obj.position, ingredients_dict["onion"])
state_mask_dict["tomatoes_in_soup"] += make_layer(obj.position, ingredients_dict["tomato"])
state_mask_dict["soup_cook_time_remaining"] += make_layer(obj.position, obj.cook_time - obj._cooking_tick)
if obj.is_ready:
state_mask_dict["soup_done"] += make_layer(obj.position, 1)
else:
# If player soup is not in a pot, treat it like a soup that is cooked with remaining time 0
state_mask_dict["onions_in_soup"] += make_layer(obj.position, ingredients_dict["onion"])
state_mask_dict["tomatoes_in_soup"] += make_layer(obj.position, ingredients_dict["tomato"])
state_mask_dict["soup_done"] += make_layer(obj.position, 1)
elif obj.name == "dish":
state_mask_dict["dishes"] += make_layer(obj.position, 1)
elif obj.name == "onion":
state_mask_dict["onions"] += make_layer(obj.position, 1)
elif obj.name == "tomato":
state_mask_dict["tomatoes"] += make_layer(obj.position, 1)
else:
raise ValueError("Unrecognized object")
if debug:
print("terrain----")
print(np.array(self.terrain_mtx))
print("-----------")
print(len(LAYERS))
print(len(state_mask_dict))
for k, v in state_mask_dict.items():
print(k)
print(np.transpose(v, (1, 0)))
# Stack of all the state masks, order decided by order of LAYERS
state_mask_stack = np.array([state_mask_dict[layer_id] for layer_id in LAYERS])
state_mask_stack = np.transpose(state_mask_stack, (1, 2, 0))
assert state_mask_stack.shape[:2] == self.shape
assert state_mask_stack.shape[2] == len(LAYERS)
# NOTE: currently not including time left or order_list in featurization
return np.array(state_mask_stack).astype(int)
# NOTE: Currently not very efficient, a decent amount of computation repeated here
num_players = len(overcooked_state.players)
final_obs_for_players = tuple(process_for_player(i) for i in range(num_players))
return final_obs_for_players
@property
def featurize_state_shape(self):
return np.array([62])
def featurize_state(self, overcooked_state, mlam, horizon=400):
"""
Encode state with some manually designed features.
NOTE: currently works for just two players.
"""
all_features = {}
def make_closest_feature(idx, name, locations):
"Compute (x, y) deltas to closest feature of type `name`, and save it in the features dict"
all_features["p{}_closest_{}".format(idx, name)] = self.get_deltas_to_closest_location(player, locations,
mlam)
IDX_TO_OBJ = ["onion", "soup", "dish", "tomato"]
OBJ_TO_IDX = {o_name: idx for idx, o_name in enumerate(IDX_TO_OBJ)}
counter_objects = self.get_counter_objects_dict(overcooked_state)
pot_state = self.get_pot_states(overcooked_state)
# Player Info
for i, player in enumerate(overcooked_state.players):
orientation_idx = Direction.DIRECTION_TO_INDEX[player.orientation]
all_features["p{}_orientation".format(i)] = np.eye(4)[orientation_idx]
obj = player.held_object
if obj is None:
held_obj_name = "none"
all_features["p{}_objs".format(i)] = np.zeros(len(IDX_TO_OBJ))
else:
held_obj_name = obj.name
obj_idx = OBJ_TO_IDX[held_obj_name]
all_features["p{}_objs".format(i)] = np.eye(len(IDX_TO_OBJ))[obj_idx]
# Closest feature of each type
if held_obj_name == "onion":
all_features["p{}_closest_onion".format(i)] = (0, 0)
else:
make_closest_feature(i, "onion", self.get_onion_dispenser_locations() + counter_objects["onion"])
make_closest_feature(i, "empty_pot", pot_state["empty"])
make_closest_feature(i, "one_onion_pot", pot_state["1_items"])
make_closest_feature(i, "two_onion_pot", pot_state["2_items"])
make_closest_feature(i, "cooking_pot", pot_state["cooking"])
make_closest_feature(i, "ready_pot", pot_state["ready"])
if held_obj_name == "dish":
all_features["p{}_closest_dish".format(i)] = (0, 0)
else:
make_closest_feature(i, "dish", self.get_dish_dispenser_locations() + counter_objects["dish"])
if held_obj_name == "soup":
all_features["p{}_closest_soup".format(i)] = (0, 0)
else:
make_closest_feature(i, "soup", counter_objects["soup"])
make_closest_feature(i, "serving", self.get_serving_locations())
for direction, pos_and_feat in enumerate(self.get_adjacent_features(player)):
adj_pos, feat = pos_and_feat
if direction == player.orientation:
# Check if counter we are facing is empty
facing_counter = (feat == 'X' and adj_pos not in overcooked_state.objects.keys())
facing_counter_feature = [1] if facing_counter else [0]
# NOTE: Really, this feature should have been "closest empty counter"
all_features["p{}_facing_empty_counter".format(i)] = facing_counter_feature
all_features["p{}_wall_{}".format(i, direction)] = [0] if feat == ' ' else [1]
features_np = {k: np.array(v) for k, v in all_features.items()}
p0, p1 = overcooked_state.players
p0_dict = {k: v for k, v in features_np.items() if k[:2] == "p0"}
p1_dict = {k: v for k, v in features_np.items() if k[:2] == "p1"}
p0_features = np.concatenate(list(p0_dict.values()))
p1_features = np.concatenate(list(p1_dict.values()))
p1_rel_to_p0 = np.array(pos_distance(p1.position, p0.position))
abs_pos_p0 = np.array(p0.position)
ordered_features_p0 = np.squeeze(np.concatenate([p0_features, p1_features, p1_rel_to_p0, abs_pos_p0]))
p0_rel_to_p1 = np.array(pos_distance(p0.position, p1.position))
abs_pos_p1 = np.array(p1.position)
ordered_features_p1 = np.squeeze(np.concatenate([p1_features, p0_features, p0_rel_to_p1, abs_pos_p1]))
return ordered_features_p0, ordered_features_p1
def featurize(self, idx, overcooked_state, action, mlam, horizon=400):
"""
Encode state with some manually designed features.
NOTE: currently works for just two players.
"""
#TODO: what happens if we are in terminal state?
# self.is_terminal(overcooked_state)
act2use = None
if idx == 0:
act2use = [action, Action.STAY]
else:
act2use = [Action.STAY, action]
nextState, _ = self.get_state_transition(overcooked_state, act2use)
overcooked_state = nextState
all_features = {}
def make_closest_feature(idx, name, locations):
"Compute (x, y) deltas to closest feature of type `name`, and save it in the features dict"
delt = self.get_deltas_to_closest_location(player, locations,mlam)
all_features["p{}_closest_{}".format(idx, name)] = math.sqrt((delt[0] ** 2) + (delt[1] ** 2))
IDX_TO_OBJ = ["onion", "soup", "dish", "tomato"]
OBJ_TO_IDX = {o_name: idx for idx, o_name in enumerate(IDX_TO_OBJ)}
counter_objects = self.get_counter_objects_dict(overcooked_state)
pot_state = self.get_pot_states(overcooked_state)
# Player Info
for i, player in enumerate(overcooked_state.players):
orientation_idx = Direction.DIRECTION_TO_INDEX[player.orientation]
all_features["p{}_orientation".format(i)] = orientation_idx
obj = player.held_object
if obj is None:
held_obj_name = "none"
all_features["p{}_objs".format(i)] = 0.0
else:
held_obj_name = obj.name
obj_idx = OBJ_TO_IDX[held_obj_name]
all_features["p{}_objs".format(i)] = obj_idx
# Closest feature of each type
if held_obj_name == "onion":
all_features["p{}_closest_onion".format(i)] = 0.0
else:
make_closest_feature(i, "onion", self.get_onion_dispenser_locations() + counter_objects["onion"])
make_closest_feature(i, "empty_pot", pot_state["empty"])
make_closest_feature(i, "one_onion_pot", pot_state["1_items"])
make_closest_feature(i, "two_onion_pot", pot_state["2_items"])
make_closest_feature(i, "cooking_pot", pot_state["cooking"])
make_closest_feature(i, "ready_pot", pot_state["ready"])
if held_obj_name == "dish":
all_features["p{}_closest_dish".format(i)] = 0.0
else:
make_closest_feature(i, "dish", self.get_dish_dispenser_locations() + counter_objects["dish"])
if held_obj_name == "soup":
all_features["p{}_closest_soup".format(i)] = 0.0
else:
make_closest_feature(i, "soup", counter_objects["soup"])
make_closest_feature(i, "serving", self.get_serving_locations())
for direction, pos_and_feat in enumerate(self.get_adjacent_features(player)):
adj_pos, feat = pos_and_feat
if direction == player.orientation:
# Check if counter we are facing is empty
facing_counter = (feat == 'X' and adj_pos not in overcooked_state.objects.keys())
facing_counter_feature = [1] if facing_counter else [0]
# NOTE: Really, this feature should have been "closest empty counter"
all_features["p{}_facing_empty_counter".format(i)] = facing_counter_feature
all_features["p{}_wall_{}".format(i, direction)] = 0 if feat == ' ' else 1
features_np = {k: np.array(v) for k, v in all_features.items()}
p0, p1 = overcooked_state.players
p0_dict = {k: v for k, v in features_np.items() if k[:2] == "p0"}
p1_dict = {k: v for k, v in features_np.items() if k[:2] == "p1"}
return p0_dict, p1_dict
def get_deltas_to_closest_location(self, player, locations, mlam):
_, closest_loc = mlam.motion_planner.min_cost_to_feature(player.pos_and_or, locations, with_argmin=True)
if closest_loc is None:
# "any object that does not exist or I am carrying is going to show up as a (0,0)
# but I can disambiguate the two possibilities by looking at the features
# for what kind of object I'm carrying"
return (0, 0)
dy_loc, dx_loc = pos_distance(closest_loc, player.position)
return dy_loc, dx_loc
###############################
# POTENTIAL REWARD SHAPING FN #
###############################
def potential_function(self, state, mp, gamma=0.99):
"""
Essentially, this is the ɸ(s) function.
The main goal here to to approximately infer the actions of an optimal agent, and derive an estimate for the value
function of the optimal policy. The perfect potential function is indeed the value function
At a high level, we assume each agent acts independetly, and greedily optimally, and then, using the decay factor "gamma",
we calculate the expected discounted reward under this policy
Some implementation details:
* the process of delivering a soup is broken into 4 steps
* Step 1: placing the first ingredient into an empty pot
* Step 2: placing the remaining ingredients in the pot
* Step 3: cooking the soup/retreiving a dish with which to serve the soup
* Step 4: delivering the soup once it is in a dish
* Here is an exhaustive list of the greedy assumptions made at each step
* step 1:
* If an agent is holding an ingredient that could be used to cook an optimal soup, it will use it in that soup
* If no such optimal soup exists, but there is an empty pot, the agent will place the ingredient there
* If neither of the above cases holds, no potential is awarded for possessing the ingredient
* step 2:
* The agent will always try to cook the highest valued soup possible based on the current ingredients in a pot
* Any agent possessing a missing ingredient for an optimal soup will travel directly to the closest such pot
* If the optimal soup has all ingredients, the closest agent not holding anything will go to cook it
* step 3:
* Any player holding a dish attempts to serve the highest valued soup based on recipe values and cook time remaining
* step 4:
* Any agent holding a soup will go directly to the nearest serving area
* At every step, the expected reward is discounted by multiplying the optimal reward by gamma ^ (estimated #steps to complete greedy action)
* In the case that certain actions are infeasible (i.e. an agent is holding a soup in step 4, but no path exists to a serving
area), estimated number of steps in order to complete the action defaults to `max_steps`
* Cooperative behavior between the two agents is not considered for complexity reasons
* Soups that are worth <1 points are rounded to be worth 1 point. This is to incentivize the agent to cook a worthless soup
that happens to be in a pot in order to free up the pot
Parameters:
state: OvercookedState instance representing the state to evaluate potential for
mp: MotionPlanner instance used to calculate gridworld distances to objects
gamma: float, discount factor
max_steps: int, number of steps a high level action is assumed to take in worst case
Returns
phi(state), the potential of the state
"""
if not hasattr(Recipe, '_tomato_value') or not hasattr(Recipe, '_onion_value'):
raise ValueError("Potential function requires Recipe onion and tomato values to work properly")
# Constants needed for potential function
potential_params = {
'gamma' : gamma,
'tomato_value' : Recipe._tomato_value if Recipe._tomato_value else 13,
'onion_value' : Recipe._onion_value if Recipe._tomato_value else 21,
**POTENTIAL_CONSTANTS.get(self.layout_name, POTENTIAL_CONSTANTS['default'])
}
pot_states = self.get_pot_states(state)
# Base potential value is the geometric sum of making optimal soups infinitely
opt_recipe, discounted_opt_recipe_value = self.get_optimal_possible_recipe(state, None, discounted=True, potential_params=potential_params, return_value=True)
opt_recipe_value = self.get_recipe_value(state, opt_recipe)
discount = discounted_opt_recipe_value / opt_recipe_value
steady_state_value = (discount / (1 - discount)) * opt_recipe_value
potential = steady_state_value
# Get list of all soups that have >0 ingredients, sorted based on value of best possible recipe
idle_soups = [state.get_object(pos) for pos in self.get_full_but_not_cooking_pots(pot_states)]
idle_soups.extend([state.get_object(pos) for pos in self.get_partially_full_pots(pot_states)])
idle_soups = sorted(idle_soups, key=lambda soup : self.get_optimal_possible_recipe(state, Recipe(soup.ingredients), discounted=True, potential_params=potential_params, return_value=True)[1], reverse=True)
# Build mapping of non_idle soups to the potential value each one will contribue
# Default potential value is maximimal discount for last two steps applied to optimal recipe value
cooking_soups = [state.get_object(pos) for pos in self.get_cooking_pots(pot_states)]
done_soups = [state.get_object(pos) for pos in self.get_ready_pots(pot_states)]
non_idle_soup_vals = { soup : gamma**(potential_params['max_delivery_steps'] + max(potential_params['max_pickup_steps'], soup.cook_time - soup._cooking_tick)) * max(self.get_recipe_value(state, soup.recipe), 1) for soup in cooking_soups + done_soups }
# Get descriptive list of players based on different attributes
# Note that these lists are mutually exclusive
players_holding_soups = [player for player in state.players if player.has_object() and player.get_object().name == 'soup']
players_holding_dishes = [player for player in state.players if player.has_object() and player.get_object().name == 'dish']
players_holding_tomatoes = [player for player in state.players if player.has_object() and player.get_object().name == Recipe.TOMATO]
players_holding_onions = [player for player in state.players if player.has_object() and player.get_object().name == Recipe.ONION]
players_holding_nothing = [player for player in state.players if not player.has_object()]
### Step 4 potential ###
# Add potential for each player with a soup
for player in players_holding_soups:
# Even if delivery_dist is infinite, we still award potential (as an agent might need to pass the soup to other player first)
delivery_dist = mp.min_cost_to_feature(player.pos_and_or, self.terrain_pos_dict['S'])
potential += gamma**min(delivery_dist, potential_params['max_delivery_steps']) * max(self.get_recipe_value(state, player.get_object().recipe), 1)
### Step 3 potential ###
# Reweight each non-idle soup value based on agents with dishes performing greedily-optimally as outlined in docstring
for player in players_holding_dishes:
best_pickup_soup = None
best_pickup_value = 0
# find best soup to pick up with dish agent currently has
for soup in non_idle_soup_vals:
# How far away the soup is (inf if not-reachable)
pickup_dist = mp.min_cost_to_feature(player.pos_and_or, [soup.position])
# mask to award zero score if not reachable
# Note: this means that potentially "useful" dish pickups (where agent passes dish to other agent
# that can reach the soup) do not recive a potential bump
is_useful = int(pickup_dist < np.inf)
# Always assume worst-case discounting for step 4, and bump zero-valued soups to 1 as mentioned in docstring
pickup_soup_value = gamma**potential_params['max_delivery_steps'] * max(self.get_recipe_value(state, soup.recipe), 1)
cook_time_remaining = soup.cook_time - soup._cooking_tick
discount = gamma**max(cook_time_remaining, min(pickup_dist, potential_params['max_pickup_steps']))
# Final discount-adjusted value for this player pursuing this soup
pickup_value = discount * pickup_soup_value * is_useful
# Update best soup found for this player
if pickup_dist < np.inf and pickup_value > best_pickup_value:
best_pickup_soup = soup
best_pickup_value = pickup_value
# Set best-case score for this soup. Can only improve upon previous players policies
# Note cooperative policies between players not considered
if best_pickup_soup:
non_idle_soup_vals[best_pickup_soup] = max(non_idle_soup_vals[best_pickup_soup], best_pickup_value)
# Apply potential for each idle soup as calculated above
for soup in non_idle_soup_vals:
potential += non_idle_soup_vals[soup]
### Step 2 potential ###
# Iterate over idle soups in decreasing order of value so we greedily prioritize higher valued soups
for soup in idle_soups:
# Calculate optimal recipe
curr_recipe = Recipe(soup.ingredients)
opt_recipe = self.get_optimal_possible_recipe(state, curr_recipe, discounted=True, potential_params=potential_params)
# Calculate missing ingredients needed to complete optimal recipe
missing_ingredients = list(opt_recipe.ingredients)
for ingredient in soup.ingredients:
missing_ingredients.remove(ingredient)
# Base discount for steps 3-4
discount = gamma**(max(potential_params['max_pickup_steps'], opt_recipe.time) + potential_params['max_delivery_steps'])
# Add a multiplicative discount for each needed ingredient (this has the effect of giving more award to soups
# that are closer to being completed)
for ingredient in missing_ingredients:
# Players who might have an ingredient we need
pertinent_players = players_holding_tomatoes if ingredient == Recipe.TOMATO else players_holding_onions
dist = np.inf
closest_player = None
# Find closest player with ingredient we need
for player in pertinent_players:
curr_dist = mp.min_cost_to_feature(player.pos_and_or, [soup.position])
if curr_dist < dist:
dist = curr_dist
closest_player = player
# Update discount to account for adding this missing ingredient (defaults to min_coeff if no pertinent players exist)
discount *= gamma**min(dist, potential_params['pot_{}_steps'.format(ingredient)])
# Cross off this player's ingreident contribution so it can't be double-counted
if closest_player:
pertinent_players.remove(closest_player)
# Update discount to account for time it takes to start the soup cooking once last ingredient is added
if missing_ingredients:
# We assume it only takes one timestep if there are missing ingredients since the agent delivering the last ingredient
# will be at the pot already
discount *= gamma
else:
# Otherwise, we assume that every player holding nothing will make a beeline to this soup since it's already optimal
cook_dist = min([mp.min_cost_to_feature(player.pos_and_or, [soup.position]) for player in players_holding_nothing], default=np.inf)
discount *= gamma**min(cook_dist, potential_params['max_pickup_steps'])
potential += discount * max(self.get_recipe_value(state, opt_recipe), 1)
### Step 1 Potential ###
# Add potential for each tomato that is left over after using all others to complete optimal recipes
for player in players_holding_tomatoes:
# will be inf if there exists no empty pot that is reachable
dist = mp.min_cost_to_feature(player.pos_and_or, self.get_empty_pots(pot_states))
is_useful = int(dist < np.inf)
discount = gamma**(min(potential_params['pot_tomato_steps'], dist) + potential_params['max_pickup_steps'] + potential_params['max_delivery_steps']) * is_useful
potential += discount * potential_params['tomato_value']
# Add potential for each onion that is remaining after using others to complete optimal recipes if possible
for player in players_holding_onions:
dist = mp.min_cost_to_feature(player.pos_and_or, self.get_empty_pots(pot_states))
is_useful = int(dist < np.inf)
discount = gamma**(min(potential_params['pot_onion_steps'], dist) + potential_params['max_pickup_steps'] + potential_params['max_delivery_steps']) * is_useful
potential += discount * potential_params['onion_value']
# At last
return potential
##############
# DEPRECATED #
##############
# def calculate_distance_based_shaped_reward(self, state, new_state):
# """
# Adding reward shaping based on distance to certain features.
# """
# distance_based_shaped_reward = 0
#
# pot_states = self.get_pot_states(new_state)
# ready_pots = pot_states["tomato"]["ready"] + pot_states["onion"]["ready"]
# cooking_pots = ready_pots + pot_states["tomato"]["cooking"] + pot_states["onion"]["cooking"]
# nearly_ready_pots = cooking_pots + pot_states["tomato"]["partially_full"] + pot_states["onion"]["partially_full"]
# dishes_in_play = len(new_state.player_objects_by_type['dish'])
# for player_old, player_new in zip(state.players, new_state.players):
# # Linearly increase reward depending on vicinity to certain features, where distance of 10 achieves 0 reward
# max_dist = 8
#
# if player_new.held_object is not None and player_new.held_object.name == 'dish' and len(nearly_ready_pots) >= dishes_in_play:
# min_dist_to_pot_new = np.inf
# min_dist_to_pot_old = np.inf
# for pot in nearly_ready_pots:
# new_dist = np.linalg.norm(np.array(pot) - np.array(player_new.position))
# old_dist = np.linalg.norm(np.array(pot) - np.array(player_old.position))
# if new_dist < min_dist_to_pot_new:
# min_dist_to_pot_new = new_dist
# if old_dist < min_dist_to_pot_old:
# min_dist_to_pot_old = old_dist
# if min_dist_to_pot_old > min_dist_to_pot_new:
# distance_based_shaped_reward += self.reward_shaping_params["POT_DISTANCE_REW"] * (1 - min(min_dist_to_pot_new / max_dist, 1))
#
# if player_new.held_object is None and len(cooking_pots) > 0 and dishes_in_play == 0:
# min_dist_to_d_new = np.inf
# min_dist_to_d_old = np.inf
# for serving_loc in self.terrain_pos_dict['D']:
# new_dist = np.linalg.norm(np.array(serving_loc) - np.array(player_new.position))
# old_dist = np.linalg.norm(np.array(serving_loc) - np.array(player_old.position))
# if new_dist < min_dist_to_d_new:
# min_dist_to_d_new = new_dist
# if old_dist < min_dist_to_d_old:
# min_dist_to_d_old = old_dist
#
# if min_dist_to_d_old > min_dist_to_d_new:
# distance_based_shaped_reward += self.reward_shaping_params["DISH_DISP_DISTANCE_REW"] * (1 - min(min_dist_to_d_new / max_dist, 1))
#
# if player_new.held_object is not None and player_new.held_object.name == 'soup':
# min_dist_to_s_new = np.inf
# min_dist_to_s_old = np.inf
# for serving_loc in self.terrain_pos_dict['S']:
# new_dist = np.linalg.norm(np.array(serving_loc) - np.array(player_new.position))
# old_dist = np.linalg.norm(np.array(serving_loc) - np.array(player_old.position))
# if new_dist < min_dist_to_s_new:
# min_dist_to_s_new = new_dist
#
# if old_dist < min_dist_to_s_old:
# min_dist_to_s_old = old_dist
#
# if min_dist_to_s_old > min_dist_to_s_new:
# distance_based_shaped_reward += self.reward_shaping_params["SOUP_DISTANCE_REW"] * (1 - min(min_dist_to_s_new / max_dist, 1))
#
# return distance_based_shaped_reward
``` |
{
"source": "johnharveymath/fetchers-python",
"score": 4
} |
#### File: plugins/ESP_MSVP/utils.py
```python
import pandas as pd
def parser(data):
"""
DESCRIPTION:
A function to parse all data downloaded from CSV into the DB format.
:param data: [pandas DataFrame] non-parsed data.
:return: [pandas DataFrame] parsed data.
"""
data = data.rename(columns={"CCAA": "adm_area_1", "fecha": "date", "casos": "confirmed", "UCI": "hospitalised_icu",
"Hospitalizados": "hospitalised", "curados": "recovered", "muertes": "dead"})
data = data[['date', 'adm_area_1', 'confirmed', 'recovered', 'dead', 'hospitalised', 'hospitalised_icu']]
data = data.where(pd.notnull(data), '')
return data
```
#### File: plugins/EU_ZH/fetcher.py
```python
import logging
import pandas as pd
import os
import sys
__all__ = ('EU_ZH_Fetcher',)
from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher
logger = logging.getLogger(__name__)
"""
site-location: https://github.com/covid19-eu-zh/covid19-eu-data
COVID19 data for European countries created and maintained by covid19-eu-zh
Data originally from
Austria's Sozial Ministerium https://www.sozialministerium.at/Informationen-zum-Coronavirus/Neuartiges-Coronavirus-(2019-nCov).html
Czech Ministry of Health https://onemocneni-aktualne.mzcr.cz/covid-19
Germany's Robert Koch Institute https://www.rki.de/DE/Content/InfAZ/N/Neuartiges_Coronavirus/Fallzahlen.html
Hungary's Office of the Prime Minister https://koronavirus.gov.hu/
Ireland's Health Protection Surveillance Centre https://www.hpsc.ie/a-z/respiratory/coronavirus/novelcoronavirus/casesinireland/
Poland - Government https://www.gov.pl/web/koronawirus/wykaz-zarazen-koronawirusem-sars-cov-2
Sweden's Public Health Authority https://www.folkhalsomyndigheten.se/smittskydd-beredskap/utbrott/aktuella-utbrott/covid-19/aktuellt-epidemiologiskt-lage/
Slovenia's Government Communications Office https://www.gov.si/en/topics/coronavirus-disease-covid-19/
Belgian institute for health: https://epistat.wiv-isp.be/Covid/
"""
class EU_ZH_Fetcher(BaseEpidemiologyFetcher):
LOAD_PLUGIN = True
SOURCE = 'EU_ZH'
def fetch(self, url):
return pd.read_csv(url)
# Certain regions have excess characters in some source files
def clean_string(self, input):
if isinstance(input, str):
return input.replace('', '')
else:
return input
def parse_int(self, data):
if pd.isna(data):
return None
if isinstance(data, str):
data = data.replace('*', '')
return int(data)
def country_fetcher(self, region, country, code_3, code_2):
logger.info("Processing number of cases in " + country)
if code_3 == 'NOR':
logger.warning("These GIDs not entirely accurate due to change in Norway's county boundaries, 2020.")
if code_3 == 'BEL':
logger.warning("These GIDs has MISSING region due to unknown data resourses, 2020.")
url = 'https://github.com/covid19-eu-zh/covid19-eu-data/raw/master/dataset/covid-19-' + code_2 + '.csv'
df = self.fetch(url)
for index, record in df.iterrows():
# date Y-m-d or Y-m-dTH:M:S
date = record['datetime'].split('T')[0]
adm_area_2 = None
# If no region is reported then all data is national
if not hasattr(record, region):
adm_area_1 = None
gid = [code_3]
# Ignore two known corrupted lines in the Polish data
elif str(record[region])[:4] == 'http':
continue
elif pd.isna(record[region]) and code_3 == 'POL':
continue
# Austria's national data is reported with a blank region
elif pd.isna(record[region]) and code_3 == 'AUT':
adm_area_1 = None
gid = [code_3]
elif region == 'nuts_2' and code_3 == 'BEL':
if self.clean_string(record['nuts_1']) == 'MISSING' or pd.isna(record[region]):
continue
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=self.clean_string(record['nuts_1']),
input_adm_area_2=self.clean_string(record[region]),
return_original_if_failure=True,
suppress_exception=True
)
# If the region appears cleanly, then we can translate to obtain GID
elif region == 'nuts_1' and code_3 == 'BEL':
if pd.notna(record['nuts_2']):
continue
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=self.clean_string(record[region]),
return_original_if_failure=True,
suppress_exception=True
)
else:
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=self.clean_string(record[region]),
return_original_if_failure=True,
suppress_exception=True
)
upsert_obj = {
'source': self.SOURCE,
'date': date,
'country': country,
'countrycode': code_3,
'adm_area_1': adm_area_1,
'adm_area_2': adm_area_2,
'adm_area_3': None,
'gid': gid
}
# add the epidemiological properties to the object if they exist
if hasattr(record, 'tests'):
upsert_obj['tested'] = self.parse_int(record['tests'])
if hasattr(record, 'cases'):
upsert_obj['confirmed'] = self.parse_int(record['cases'])
if hasattr(record, 'tests_positive'):
upsert_obj['confirmed'] = self.parse_int(record['tests_positive'])
if hasattr(record, 'recovered'):
upsert_obj['recovered'] = self.parse_int(record['recovered'])
if hasattr(record, 'deaths'):
upsert_obj['dead'] = self.parse_int(record['deaths'])
if hasattr(record, 'hospitalized'):
upsert_obj['hospitalised'] = self.parse_int(record['hospitalized'])
if hasattr(record, 'intensive_care'):
upsert_obj['hospitalised_icu'] = self.parse_int(record['intensive_care'])
if hasattr(record, 'quarantine'):
upsert_obj['quarantined'] = self.parse_int(record['quarantine'])
self.upsert_data(**upsert_obj)
# read the list of countries from a csv file in order to fetch each one
def load_countries_to_fetch(self):
input_csv_fname = getattr(self.__class__, 'INPUT_CSV', "input.csv")
path = os.path.dirname(sys.modules[self.__class__.__module__].__file__)
csv_fname = os.path.join(path, input_csv_fname)
if not os.path.exists(csv_fname):
return None
colnames = ['country', 'code_3', 'code_2', 'region']
input_pd = pd.read_csv(csv_fname)
input_pd.columns = colnames
input_pd = input_pd.where((pd.notnull(input_pd)), None)
return input_pd
def run(self):
countries = self.load_countries_to_fetch()
for index, record in countries.iterrows():
self.country_fetcher(record['region'], record['country'], record['code_3'], record['code_2'])
```
#### File: plugins/GBR_PHE/fetcher.py
```python
import logging
from uk_covid19 import Cov19API
__all__ = ('EnglandFetcher',)
from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher
logger = logging.getLogger(__name__)
class EnglandFetcher(BaseEpidemiologyFetcher):
''' a fetcher to collect data from Public Health England'''
LOAD_PLUGIN = True
SOURCE = 'GBR_PHE' # Public Health England
START_DATE = '2020-02-28'
def fetch_uk(self):
uk = ['areaType=overview', 'date>' + self.get_first_date_to_fetch(self.START_DATE)]
cases_and_deaths = {
"date": "date",
"areaName": "areaName",
"areaType": "areaType",
"cases": "cumCasesBySpecimenDate",
"tests": "cumTestsByPublishDate",
"deaths": "cumDeaths28DaysByDeathDate",
"cumAdmissions": "cumAdmissions"
}
api = Cov19API(filters=uk, structure=cases_and_deaths)
data = api.get_json()
return data
def fetch_nation(self):
nation = ['areaType=nation', 'areaName=England', 'date>' + self.get_first_date_to_fetch(self.START_DATE)]
cases_and_deaths = {
"date": "date",
"areaName": "areaName",
"areaType": "areaType",
"cases": "cumCasesBySpecimenDate",
"tests": "cumTestsByPublishDate",
"deaths": "cumDeaths28DaysByDeathDate",
"cumAdmissions": "cumAdmissions"
}
api = Cov19API(filters=nation, structure=cases_and_deaths)
data = api.get_json()
return data
def fetch_utla(self):
utla = ['areaType=utla', 'date>' + self.get_first_date_to_fetch(self.START_DATE)]
cases_and_deaths = {
"date": "date",
"areaName": "areaName",
"areaType": "areaType",
"areaCode": "areaCode",
"cases": "cumCasesBySpecimenDate",
"tests": "cumTestsByPublishDate",
"cumAdmissions": "cumAdmissions",
"deaths": "cumDeaths28DaysByDeathDate"
}
api = Cov19API(filters=utla, structure=cases_and_deaths)
data = api.get_json()
return data
def fetch_ltla(self):
ltla = ['areaType=ltla', 'date>' + self.get_first_date_to_fetch(self.START_DATE)]
cases_and_deaths = {
"date": "date",
"areaName": "areaName",
"areaType": "areaType",
"areaCode": "areaCode",
"cases": "cumCasesBySpecimenDate",
"tests": "cumTestsByPublishDate",
"cumAdmissions": "cumAdmissions",
"deaths": "cumDeaths28DaysByDeathDate"
}
api = Cov19API(filters=ltla, structure=cases_and_deaths)
data = api.get_json()
return data
def upsert_uk_data(self, data):
for record in data:
# only use English local authorities
region_type = record.get('areaType')
if region_type in ['utla', 'ltla'] and record.get('areaCode')[0] != 'E':
continue
region = record.get('areaName')
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=region,
input_adm_area_2=region_type,
input_adm_area_3=None,
return_original_if_failure=True
)
upsert_obj = {
'source': self.SOURCE,
'date': str(record.get('date')),
'country': 'United Kingdom',
'countrycode': 'GBR',
'adm_area_1': adm_area_1,
'adm_area_2': adm_area_2,
'adm_area_3': adm_area_3,
'gid': gid,
'dead': record.get('deaths'),
'confirmed': record.get('cases'),
'tested': record.get('tests'),
'hospitalised': record.get('cumAdmissions')
}
self.upsert_data(**upsert_obj)
def run(self):
methods = [self.fetch_uk, self.fetch_nation, self.fetch_utla, self.fetch_ltla]
for method in methods:
data = method()['data']
self.upsert_uk_data(data)
```
#### File: plugins/IRQ_GOV/fetcher.py
```python
import logging
import pandas as pd
import requests
from datetime import datetime
from bs4 import BeautifulSoup
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
__all__ = ('IraqFetcher',)
from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher
logger = logging.getLogger(__name__)
class IraqFetcher(BaseEpidemiologyFetcher):
''' a fetcher to collect data for Iraq'''
LOAD_PLUGIN = True
SOURCE = 'IRQ_GOV'
def wd_config(self):
# configue a webdriver for selenium
# this should probably be set at AbstractFetcher level
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
self.wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
def fetch_province(self,url):
self.wd.get(url)
#create an empty dataframe
df = pd.DataFrame()
element = WebDriverWait(self.wd, 20).until(EC.presence_of_element_located((By.CLASS_NAME, "visual.visual-columnChart.allow-deferred-rendering")))
time.sleep(10)
#Get last updated date
last_updated = self.wd.find_element_by_xpath(".//span[contains(text(), 'Updated on')]").text
last_updated = last_updated.split(',')
last_updated = last_updated[0]
last_updated = last_updated.strip()
last_updated = last_updated.replace('Updated on ','')
date = datetime.strptime(last_updated, '%d %b %Y').strftime('%Y-%m-%d')
#get the pages menu
menu_btn = self.wd.find_element_by_xpath(".//*[@id='embedWrapperID']/div[2]/logo-bar/div/div/div/logo-bar-navigation/span/a[3]")
menu_btn.click()
time.sleep(5)
# go to the third page
page_btn = self.wd.find_element_by_xpath(".//*[@id='embedWrapperID']/div[2]/logo-bar/div/div/div/logo-bar-navigation/span/a[3]")
page_btn.click()
time.sleep(5)
# find all the str column values in the table
city = self.wd.find_element_by_xpath(".//*[name()='div' and @aria-label='COVID-19 Cumulative Status Matrix']//*[name()='div' and @class='rowHeaders']")
city = city.text.splitlines()
confirmed = self.wd.find_element_by_xpath(".//*[name()='div' and @aria-label='COVID-19 Cumulative Status Matrix']//*[name()='div' and @class='bodyCells']/div/div/div[1]")
confirmed = confirmed.text.splitlines()
recovered = self.wd.find_element_by_xpath(".//*[name()='div' and @aria-label='COVID-19 Cumulative Status Matrix']//*[name()='div' and @class='bodyCells']/div/div/div[2]")
recovered = recovered.text.splitlines()
dead = self.wd.find_element_by_xpath(".//*[name()='div' and @aria-label='COVID-19 Cumulative Status Matrix']//*[name()='div' and @class='bodyCells']/div/div/div[3]")
dead = dead.text.splitlines()
lst = []
lst = list(zip(city, confirmed, recovered, dead))
df = pd.DataFrame(lst, columns=['city', 'confirmed','recovered', 'dead'])
# Baghdad is reported two rows from the source. the code below adds up
# the values from the two rows and creates a new total row for Baghdad
# set city column is the index
df.set_index('city', inplace =True)
# select the two rows for Baghdad
baghdad = df.loc[['BAGHDAD-KARKH','BAGHDAD-RESAFA'],:]
baghdad[['confirmed','recovered', 'dead']] = baghdad[['confirmed','recovered', 'dead']].apply(pd.to_numeric)
#add the new cumulative Bagdad sum to the DF
df = df.append(baghdad.sum().rename('BAGHDAD'))
#remove the two Baghdad rows from the original dataframe
df = df.drop(['BAGHDAD-KARKH', 'BAGHDAD-RESAFA'])
# reove the city column as index
df.reset_index(inplace=True)
self.wd.quit()
return df,date
def run(self):
self.wd_config()
logger.info("Processing provice data for Iraq")
url = 'https://app.powerbi.com/view?r=<KEY>'
data,last_update = self.fetch_province(url)
for index, record in data.iterrows():
confirmed = int(record['confirmed'])
dead = int(record['dead'])
recovered = int(record['recovered'])
province = record['city']
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=province,
input_adm_area_2=None,
input_adm_area_3=None,
return_original_if_failure=True
)
upsert_obj = {
'source': self.SOURCE,
'date': last_update,
'country': 'Iraq',
'countrycode': 'IRQ',
'adm_area_1': adm_area_1,
'adm_area_2': None,
'adm_area_3': None,
'gid': gid,
'confirmed': confirmed,
'recovered': recovered,
'dead': dead,
}
self.upsert_data(**upsert_obj)
```
#### File: plugins/ITA_PCDM/fetcher.py
```python
import logging
import pandas as pd
__all__ = ('ItalyPCDMFetcher',)
from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher
logger = logging.getLogger(__name__)
class ItalyPCDMFetcher(BaseEpidemiologyFetcher):
LOAD_PLUGIN = True
SOURCE = 'ITA_PCDM'
def fetch(self):
return pd.read_csv('https://raw.githubusercontent.com/DavideMagno/ItalianCovidData/'
'master/Daily_Covis19_Italian_Data_Cumulative.csv')
def run(self):
data = self.fetch()
for index, record in data.iterrows():
# CSV columns: "Date","Region","Hospitalised","In ICU","Home Isolation",
# "Healed","Dead","Tests"
date = record[0]
region = record[1]
hospitalised = int(record[2])
in_icu = int(record[3])
quarantined = int(record[4])
recovered = int(record[5])
dead = int(record[6])
tested = int(record[7])
confirmed = in_icu + quarantined + recovered + dead
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
country_code='ITA',
input_adm_area_1=region,
input_adm_area_2=None,
input_adm_area_3=None,
return_original_if_failure=True
)
upsert_obj = {
'source': self.SOURCE,
'date': date,
'country': 'Italy',
'countrycode': 'ITA',
'adm_area_1': adm_area_1,
'adm_area_2': adm_area_2,
'adm_area_3': adm_area_3,
'tested': tested,
'confirmed': confirmed,
'recovered': recovered,
'dead': dead,
'hospitalised': hospitalised,
'hospitalised_icu': in_icu,
'quarantined': quarantined,
'gid': gid
}
self.upsert_data(**upsert_obj)
```
#### File: plugins/LBN_GOV/fetcher.py
```python
import logging
import pandas as pd
import requests
import time
from datetime import datetime
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from .utils import AR_TO_EN
__all__ = ('LebanonGovFetcher',)
from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher
logger = logging.getLogger(__name__)
class LebanonGovFetcher(BaseEpidemiologyFetcher):
''' a fetcher to collect data from Lebanon Ministry of Health'''
LOAD_PLUGIN = True
SOURCE = 'LBN_GOV' # Lebanon Ministry of Health
wd = None
def wd_config(self):
# configue a webdriver for selenium
# this should probably be set at AbstractFetcher level
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
self.wd = webdriver.Chrome('chromedriver',chrome_options=chrome_options)
# function appends the year to the date. This fucntion does not work if the cagrt dates go back more than 11 months
def get_date(self,dt):
yr = datetime.now().year
current_month = datetime.now().month
month_name = dt[-3:]
month_number = datetime.strptime(month_name, '%b').month
if month_number > current_month:
yr = yr - 1
dt = dt + ' ' + str(yr)
date = datetime.strptime(dt, '%d %b %Y').strftime('%Y-%m-%d')
return date
def fetch_regional(self,url):
self.wd_config()
self.wd.get(url)
# wait until charts visible
element = WebDriverWait(self.wd, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "apexcharts-canvas")))
time.sleep(2)
# Get last updated date
last_update = self.wd.find_element_by_xpath(".//*[name()='h4' and @class='last-update']/strong").text
last_update = last_update.split(',')
last_update = last_update[0]
last_update = last_update + ' ' + str(datetime.now().year)
last_update = datetime.strptime(last_update, '%b %d %Y').strftime('%Y-%m-%d')
chart = self.wd.find_element_by_xpath(".//*[name()='div' and @id='casesbydistricts']")
bar_labels = chart.find_element_by_css_selector("g.apexcharts-datalabels")
labels=bar_labels.text
lst1=labels.split()
bar_labels1 = chart.find_element_by_css_selector("g.apexcharts-yaxis-texts-g.apexcharts-xaxis-inversed-texts-g")
provinces=bar_labels1.text
lst2=provinces.split('\n')
data= list(zip(lst1,lst2))
df = pd.DataFrame(data, columns =['cases', 'province'])
self.wd.quit()
return df,last_update
def fetch_national(self,url):
website_content = requests.get(url)
soup = BeautifulSoup(website_content.text, 'lxml')
lst = []
Dict = {}
divs = soup.find_all('div', class_='counter-content')
for elem in divs:
h = elem.find('h1').get_text().strip()
p = elem.find('p').get_text().strip()
#spaces and newline characters. split and join
AR_str = " ".join(p.split())
#map the Arabic to the English translation
EN_str = AR_TO_EN[AR_str]
# Add to Dictionary
Dict.update( {EN_str : h} )
# NOTE : hospitalised_icu = Number of cases currently in ICU
df = pd.DataFrame({'confirmed':[Dict['confirmed'],],
'dead':[Dict['dead'],],
'recovered':[Dict['recovered'],],
'hospitalised_icu':[Dict['hospitalised_icu'],],
'tested':[Dict['tested'],],
'quarantined':[Dict['quarantined'],]})
return df
def fetch_historical(self,url):
self.wd_config()
self.wd.get(url)
res = []
# wait until charts visible
element = WebDriverWait(self.wd, 10).until(EC.presence_of_element_located((By.CLASS_NAME, "apexcharts-canvas")))
time.sleep(2)
# make a list of all charts
chart_list = self.wd.find_elements_by_css_selector("div.apexcharts-canvas")
chart=chart_list[0]
# only works if chart is in view - mouse needs to scroll over element
# this took me ages to figure out, but the rest of the code got better while it was happening!!
self.wd.execute_script("arguments[0].scrollIntoView();", chart)
# iterate through the bars on the chart to hover on
bar_list = chart.find_elements_by_xpath(".//*[name()='path' and @id='apexcharts-bar-area-0']")
for el in bar_list:
info = []
hover = ActionChains(self.wd).move_to_element(el)
hover.perform()
# at this point the apexcharts-tooltip light element gets populated
label = chart.find_element_by_css_selector("div.apexcharts-tooltip.light")
d = label.find_element_by_css_selector("div.apexcharts-tooltip-title")
date = self.get_date(d.text)
info.append(date)
data = label.find_elements_by_css_selector("div.apexcharts-tooltip-y-group")
for datum in data:
label_and_value = datum.find_elements_by_css_selector("span")
if label_and_value:
info.append(label_and_value[0].text)
info.append(label_and_value[1].text)
res.append(info)
df = pd.DataFrame(res, columns=['date', 'daily', 'dailynum', 'total', 'totalnum'])
self.wd.quit()
return df
def run(self):
url = 'https://corona.ministryinfo.gov.lb/'
#Must run fetch_regional first as it gets last updated date
logger.debug('Fetching regional level information')
rdata, last_update = self.fetch_regional(url)
for index, record in rdata.iterrows():
confirmed = record[0]
province = record[1]
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=province,
input_adm_area_2=None,
input_adm_area_3=None,
return_original_if_failure=True
)
if adm_area_1 !='' :
upsert_obj = {
'source': self.SOURCE,
'date': last_update,
'country': 'Lebanon',
'countrycode': 'LBN',
'adm_area_1': adm_area_1,
'adm_area_2': adm_area_2,
'adm_area_3': None,
'confirmed': confirmed,
'gid': gid
}
self.upsert_data(**upsert_obj)
logger.debug('Fetching historical level information')
hdata = self.fetch_historical(url)
for index, record in hdata.iterrows():
date = record['date']
confirmed = record['totalnum']
upsert_obj = {
'source': self.SOURCE,
'date': date,
'country': 'Lebanon',
'countrycode': 'LBN',
'adm_area_1': None,
'adm_area_2': None,
'adm_area_3': None,
'gid': ['LBN'],
'confirmed': confirmed
}
self.upsert_data(**upsert_obj)
logger.info("feching country-level information")
ndata = self.fetch_national(url)
# date = datetime.today().strftime('%Y-%m-%d')
dead = int(ndata.loc[ : , 'dead' ])
confirmed = int(ndata.loc[ : , 'confirmed' ])
recovered = int(ndata.loc[ : , 'recovered' ])
tested = int(ndata.loc[ : , 'tested' ])
hospitalised_icu = int(ndata.loc[ : , 'hospitalised_icu' ])
quarantined = int(ndata.loc[ : , 'quarantined' ])
upsert_obj = {
'source': self.SOURCE,
'date': last_update,
'country': 'Lebanon',
'countrycode': 'LBN',
'adm_area_1': None,
'adm_area_2': None,
'adm_area_3': None,
'gid': ['LBN'],
'confirmed': confirmed,
'recovered': recovered,
'dead': dead,
'tested' : tested,
'hospitalised_icu' : hospitalised_icu,
'quarantined' : quarantined,
}
self.upsert_data(**upsert_obj)
```
#### File: plugins/WEATHER/utils.py
```python
import os
import json
import pickle
import netCDF4
import numpy as np
import pandas as pd
from requests import get
# opening netCDF4 files via url is not reliable
# (it requires the package to be built with OPenDAP support)
# we dowload and write to disk the file before opening it
def download_MET_file(url, file_name):
try:
os.remove(file_name)
except:
pass
# dowload the file from url and save it on disk
# get request
response = get(url)
if response.status_code != 200:
return False
# open in binary mode
with open(file_name, "wb") as file:
# write to file
file.write(response.content)
file.close()
return True
def load_local_data():
# load the variables dict
with open("plugins/WEATHER/input/weather_indicators.json", "r") as read_file:
weather_indicators = json.load(read_file)
# load grid to GADM level 1 dict
with open('plugins/WEATHER/input/adm_1_info.pkl', 'rb') as handle:
adm_1_info = pickle.load(handle)
# load grid to GADM level 2 dict
with open('plugins/WEATHER/input/adm_2_info.pkl', 'rb') as handle:
adm_2_info = pickle.load(handle)
return weather_indicators, adm_1_info, adm_2_info
# dowload the weather data for a single variable for all days in daterange
# use the adm_1_info and adm_2_info to assign each point in the grid to the right
# GID at level 1 or 2. the dicts also contains the GADM informations on each GID
# returns a pandas dataframe
def create_aggr_df(indicator, day, variables, adm_1_info, adm_2_info, logger):
source = []
date = []
gid = []
country = []
countrycode = []
adm_area_1 = []
adm_area_2 = []
adm_area_3 = []
avg = []
std = []
samplesize = []
valid_percentage = []
logger.debug("downloading data for {} for {}".format(indicator, day.strftime('%Y-%m-%d')))
URL = "https://metdatasa.blob.core.windows.net/covid19-response/metoffice_global_daily/"
temp_file = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'netCDF4_file.nc')
if not download_MET_file("{}{}/{}{}.nc".format(URL, variables[indicator]['folder'], variables[indicator]['file'],
day.strftime('%Y%m%d')), file_name=temp_file):
return None
nc = netCDF4.Dataset(temp_file)
data = nc.variables[variables[indicator]['variable']][:].data.reshape(-1)
if 'cloudaltitude' in indicator:
# remove default values 9*10^36
data[data > 10e20] = np.nan
# Level 1 aggregation
for area_0 in adm_1_info:
for area_1 in adm_1_info[area_0]:
idx_list = [point[0] for point in adm_1_info[area_0][area_1]["points"]]
to_avg = [data[idx] for idx in idx_list]
samplesize.append(len(to_avg))
source.append("MET")
date.append(day.strftime('%Y-%m-%d'))
gid.append(adm_1_info[area_0][area_1]["gid"])
country.append(adm_1_info[area_0][area_1]["country"])
countrycode.append(adm_1_info[area_0][area_1]["countrycode"])
adm_area_1.append(adm_1_info[area_0][area_1]["adm_area_1"])
adm_area_2.append(adm_1_info[area_0][area_1]["adm_area_2"])
adm_area_3.append(adm_1_info[area_0][area_1]["adm_area_3"])
if 'cloudaltitude' in indicator:
avg.append(np.nanmean(to_avg))
std.append(np.nanstd(to_avg, ddof=1))
valid_percentage.append(((~np.isnan(to_avg)).sum()) / (len(to_avg)))
else:
avg.append(np.mean(to_avg))
std.append(np.std(to_avg, ddof=1))
# Level 2 aggregation
for area_0 in adm_2_info:
for area_1 in adm_2_info[area_0]:
for area_2 in adm_2_info[area_0][area_1]:
idx_list = [point[0] for point in adm_2_info[area_0][area_1][area_2]["points"]]
to_avg = [data[idx] for idx in idx_list]
samplesize.append(len(to_avg))
source.append("MET")
date.append(day.strftime('%Y-%m-%d'))
gid.append(adm_2_info[area_0][area_1][area_2]["gid"])
country.append(adm_2_info[area_0][area_1][area_2]["country"])
countrycode.append(adm_2_info[area_0][area_1][area_2]["countrycode"])
adm_area_1.append(adm_2_info[area_0][area_1][area_2]["adm_area_1"])
adm_area_2.append(adm_2_info[area_0][area_1][area_2]["adm_area_2"])
adm_area_3.append(adm_2_info[area_0][area_1][area_2]["adm_area_3"])
if 'cloudaltitude' in indicator:
avg.append(np.nanmean(to_avg))
std.append(np.nanstd(to_avg, ddof=1))
valid_percentage.append(((~np.isnan(to_avg)).sum()) / (len(to_avg)))
else:
avg.append(np.mean(to_avg))
std.append(np.std(to_avg, ddof=1))
if 'cloudaltitude' in indicator:
d = {'source': source, 'date': date, 'gid': gid,
'country': country, 'countrycode': countrycode,
'adm_area_1': adm_area_1, 'adm_area_2': adm_area_2, 'adm_area_3': adm_area_3,
'samplesize': samplesize,
indicator+'_valid': valid_percentage,
indicator+'_avg': avg,
indicator+'_std': std,
}
else:
d = {'source': source, 'date': date, 'gid': gid,
'country': country, 'countrycode': countrycode,
'adm_area_1': adm_area_1, 'adm_area_2': adm_area_2, 'adm_area_3': adm_area_3,
'samplesize': samplesize,
indicator+'_avg': avg,
indicator+'_std': std,
}
try:
os.remove(temp_file)
except:
pass
return pd.DataFrame(data=d)
```
#### File: src/utils/decorators.py
```python
import time
import logging
logger = logging.getLogger(__name__)
TIME_DURATION_UNITS = (
('week', 60 * 60 * 24 * 7),
('day', 60 * 60 * 24),
('hour', 60 * 60),
('min', 60),
('sec', 1)
)
def seconds_to_human(seconds):
if seconds < 1:
return f'{seconds}s'
parts = []
for unit, div in TIME_DURATION_UNITS:
amount, seconds = divmod(int(seconds), div)
if amount > 0:
parts.append('{} {}{}'.format(amount, unit, "" if amount == 1 else "s"))
return ', '.join(parts)
def timeit(method):
def timed(*args, **kw):
start_time = time.time()
result = method(*args, **kw)
hr_time_diff = seconds_to_human(time.time() - start_time)
logger.info(f'{method.__name__} execution time: {hr_time_diff}')
return result
return timed
```
#### File: src/utils/email.py
```python
import re
import os
import logging
import smtplib
import itertools
import pandas as pd
from email.message import EmailMessage
from utils.config import config
EMAIL_REGEX = re.compile(r"^[_a-z0-9-]+(\.[_a-z0-9-]+)*@[a-z0-9-]+(\.[a-z0-9-]+)*(\.[a-z]{2,4})$")
logger = logging.getLogger(__name__)
def validate_address(email: str) -> bool:
return isinstance(email, str) and re.match(EMAIL_REGEX, email)
def send_email(src_code: str, subject: str, message: str):
emails_file_path = os.path.join(os.path.dirname(__file__), "..", "data", "emails.csv")
if not os.path.isfile(emails_file_path):
logger.error(f"Unable to send notification email, file: {emails_file_path} doesn't exist")
return
df = pd.read_csv(emails_file_path, encoding='utf-8')
df_recipients = df[(df.source_code == src_code) | (df.source_code == '*')]
emails_list = df_recipients[
['email_curator_1', 'email_curator_2', 'email_curator_3', 'email_curator_4']
].values.tolist()
emails = set(itertools.chain.from_iterable(emails_list))
receivers = [email for email in emails if validate_address(email)]
msg = EmailMessage()
msg['Subject'] = subject
msg['From'] = config.SYS_EMAIL
msg['To'] = receivers
msg.set_content(message)
with smtplib.SMTP(config.SYS_EMAIL_SMTP, 587) as s:
s.starttls()
s.login(config.SYS_EMAIL, config.SYS_EMAIL_PASS)
s.send_message(msg)
logger.info(f'Email successfully sent to: {receivers}')
```
#### File: utils/fetcher/abstract_fetcher.py
```python
import os
import sys
from datetime import datetime, timedelta
from abc import ABC, abstractmethod
__all__ = ('AbstractFetcher')
from utils.config import config
from utils.types import FetcherType
from utils.adapter.abstract_adapter import AbstractAdapter
from utils.country_codes_translator.translator import CountryCodesTranslator
from utils.administrative_division_translator.translator import AdmTranslator
class AbstractFetcher(ABC):
TYPE = FetcherType.EPIDEMIOLOGY
LOAD_PLUGIN = False
def __init__(self, data_adapter: AbstractAdapter):
self.adm_translator = self.load_adm_translator()
self.country_codes_translator = CountryCodesTranslator()
self.sliding_window_days = config.SLIDING_WINDOW_DAYS
self.data_adapter = data_adapter
def get_first_date_to_fetch(self, initial_date: str) -> str:
if self.sliding_window_days:
date_from = (datetime.now() - timedelta(days=self.sliding_window_days)).strftime('%Y-%m-%d')
else:
date_from = initial_date
return date_from
def load_adm_translator(self) -> AdmTranslator:
translation_csv_fname = getattr(self.__class__, 'TRANSLATION_CSV', "translation.csv")
path = os.path.dirname(sys.modules[self.__class__.__module__].__file__)
return AdmTranslator(os.path.join(path, translation_csv_fname))
def get_region(self, countrycode: str, input_adm_area_1: str = None, input_adm_area_2: str = None,
input_adm_area_3: str = None, suppress_exception: bool = False):
# Check first if input data can be matched to translate.csv
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
countrycode, input_adm_area_1, input_adm_area_2, input_adm_area_3,
return_original_if_failure=False,
suppress_exception=True
)
if not success:
try:
# Check if input data can be matched directly into administrative division table
country, adm_area_1, adm_area_2, adm_area_3, gid = self.data_adapter.get_adm_division(
countrycode, input_adm_area_1, input_adm_area_2, input_adm_area_3)
except Exception as ex:
adm_area_1, adm_area_2, adm_area_3, gid = input_adm_area_1, input_adm_area_2, input_adm_area_3, None
return adm_area_1, adm_area_2, adm_area_3, gid
def get_earliest_timestamp(self):
return None
def get_latest_timestamp(self):
return None
def get_details(self):
return None
@abstractmethod
def run(self):
raise NotImplementedError()
``` |
{
"source": "johnharveymath/oxcovid19db",
"score": 3
} |
#### File: src/oxcovid19db/tools.py
```python
import pandas as pd
from .columnlists import ColumnLists
def trim(gid_list):
"""
Remove an underscore and any following characters from each element in a list of strings
Add a terminal period for easy hierarchical comparison
Intended to remove the version number from gids
"""
result = []
for gid in gid_list:
gid = gid.split("_")
result.append(gid[0] + '.')
return result
def is_match(left, right):
""" Test whether the list of strings (gids), left, is equal to or a child of the list right """
left_trimmed = trim(left)
right_trimmed = trim(right)
result = True
for left_gid in left_trimmed:
gid_matches = False
for right_gid in right_trimmed:
if left_gid.startswith(right_gid):
gid_matches = True
break
result = result & gid_matches
return result
def make_parent_dictionary(left_gid_list, right_gid_list):
""" For each gid in left or in right, find a parent gid which appears in both"""
parent = dict()
for left_gid in left_gid_list:
for right_gid in right_gid_list:
if is_match(left_gid, right_gid):
parent[left_gid] = right_gid
parent[right_gid] = right_gid
for right_gid in right_gid_list:
if not parent.get(right_gid):
for left_gid in left_gid_list:
if is_match(right_gid, left_gid):
parent[left_gid] = left_gid
parent[right_gid] = left_gid
if not parent.get(right_gid):
parent[right_gid] = right_gid
for left_gid in left_gid_list:
if not parent.get(left_gid):
parent[left_gid] = left_gid
return parent
def make_adm_area_dictionary(left, right):
""" Creates a look-up dictionary providing administrative names for a gid """
left_dict = left[geo_columns(left) + ['gid']].drop_duplicates().set_index('gid').T.to_dict()
right_dict = right[geo_columns(right) + ['gid']].drop_duplicates().set_index('gid').T.to_dict()
return {**left_dict, **right_dict}
def convert_and_list(df):
""" Add the additional columns which will be needed and obtain a list of all gids in the df"""
newcols = ['parent1', 'parent2', 'parent3', 'parentgid']
header_list = list(df.columns) + newcols
df = df.reindex(columns=header_list)
try:
df_gid_list = df['gid'].unique()
except KeyError as e:
raise ValueError("The argument must have a column named gid")
return df, df_gid_list
def find_common_regions(left, right):
""" Add geographic information to two dataframes, left and right, so that they can be joined on gid """
# add extra columns and report a list of unique gids in each df
left, left_gid_list = convert_and_list(left)
right, right_gid_list = convert_and_list(right)
# make a dictionary to identify parents and fill parentgid column
parent = make_parent_dictionary(left_gid_list, right_gid_list)
left['parentgid'] = left['gid'].map(parent.get)
right['parentgid'] = right['gid'].map(parent.get)
# fill in the names for the parentgid in parent1, parent2, parent3
adm_dict = make_adm_area_dictionary(left, right)
converter = {'adm_area_1': 'parent1', 'adm_area_2': 'parent2', 'adm_area_3': 'parent3'}
for col in geo_columns(left):
left[converter[col]] = left['parentgid'].map(lambda x: adm_dict.get(x).get(col))
for col in geo_columns(right):
right[converter[col]] = right['parentgid'].map(lambda x: adm_dict.get(x).get(col))
return left, right
def make_agg_rules():
""" Collects the columns for the database and assigns the correct aggregation rules to each """
agg_rules = dict()
wm = lambda x: np.average(x, weights=df.loc[x.index, "samplesize"])
# collect column_lists (either from a cached state or from the database)
table_list = ['epidemiology', 'mobility', 'weather']
column_lists = ColumnLists(table_list)
for column in column_lists.epidemiology:
agg_rules[column] = ['sum']
for column in column_lists.mobility:
agg_rules[column] = ['mean']
for column in column_lists.weather:
if column != 'samplesize':
agg_rules[column] = [wm]
return agg_rules
def group_by_parent(df):
""" Aggregates data with the same administrative area and date according to rules for that data type """
# Group by geographical columns and by date
grouping_columns = [col for col in df.columns if (col.startswith('parent') or col == 'date')]
# Set of rules for how to aggregate any possible column
agg_rules = make_agg_rules()
# Set up variables to hold aggregation rules for this particular DataFrame and new column names
agg_dict = dict()
agg_columns = []
# If a column can be aggregated, check if it is in df, then add to agg_dict and agg_columns
for key in agg_rules:
if key in df.columns:
agg_dict[key] = agg_rules[key]
for val in agg_rules[key]:
if isinstance(val, str):
agg_columns.append(key + '_' + val)
else:
agg_columns.append(key + '_wtmean')
# Group, aggregate by agg_dict, and rename by agg_columns
grouped = df.groupby(grouping_columns, dropna=False).agg(agg_dict)
grouped.columns = agg_columns
return grouped.reset_index()
def geo_columns(df):
""" Extract column names in df which are geographical names"""
potential_columns = ['adm_area_1', 'adm_area_2', 'adm_area_3']
geo_columns = [col for col in potential_columns if col in df.columns]
return geo_columns
def validate_input(df):
""" An input df must have one column of names and one column of gids """
if not geo_columns(df):
return False
if 'gid' not in df.columns:
return False
return True
def merge(left, right, how='inner'):
""" join two dataframes of oxcovid19 data """
# confirm that both dataframes have necessary geographical information
if not validate_input(left):
raise ValueError('left dataframe columns must include gid and at least one of adm_area_1, adm_area_2, '
'adm_area_3')
if not validate_input(right):
raise ValueError('right dataframe columns must include gid and at least one of adm_area_1, adm_area_2, '
'adm_area_3')
# Augment the dataframes with parent regions
left, right = find_common_regions(left, right)
# Carry out join on the parent regions and on date, if present
group_on = ['date', 'parent1', 'parent2', 'parent3',
'parentgid'] if 'date' in left.columns and 'date' in right.columns else ['parent1', 'parent2',
'parent3', 'parentgid']
result = pd.merge(group_by_parent(left), group_by_parent(right), how=how, on=group_on)
# Restore original column names and drop empty columns
result.rename(
columns={'parent1': 'adm_area_1', 'parent2': 'adm_area_2', 'parent3': 'adm_area_3', 'parentgid': 'gid'},
inplace=True)
drops = [col for col in ['adm_area_1', 'adm_area_2', 'adm_area_3'] if result[col].isnull().all()]
result.drop(columns=drops, inplace=True)
return result
``` |
{
"source": "john-haugland/osdu-sdk-python",
"score": 2
} |
#### File: osdu/entitlements/_client.py
```python
from typing import Union
from osdu.client import OsduClient
from osdu.serviceclientbase import ServiceClientBase
VALID_ENTITLEMENTS_API_VERSIONS = [2]
class EntitlementsClient(ServiceClientBase):
"""A client for working with the OSDU Entitlements API."""
def __init__(self, client: OsduClient, service_version: Union[int, str] = "latest"):
"""Setup the EntitlementsClient
Args:
client (OsduClient): client to use for connection
service_version (Union[int, str], optional): service version (3 or 'latest') Defaults to 'latest'.
Raises:
ValueError: [description]
"""
super().__init__(client, "entitlements", VALID_ENTITLEMENTS_API_VERSIONS, service_version)
# def query():
# pass
# def query_by_id():
# pass
def is_healthy(self) -> bool:
"""Returns health status of the API
Returns:
bool: health status of the API
"""
response = self._client.get(self.api_url("health/readiness_check"))
return response.status_code == 200
def list_groups(self) -> dict:
"""List groups
Returns:
dict: containing the result
"""
response_json = self._client.get_returning_json(self.api_url("groups"))
return response_json
def list_group_members(self, group: str) -> dict:
"""List members in a group
Args:
group (str): The email of the group.
Returns:
dict: containing the result
"""
response_json = self._client.get_returning_json(self.api_url(f"groups/{group}/members"))
return response_json
def add_group(self, group: str) -> dict:
"""Add a new group
Args:
group (str): The email of the group.
Returns:
dict: containing the result
"""
request_data = {"name": group}
response_json = self._client.post_returning_json(
self.api_url("groups"), request_data, [200, 201]
)
return response_json
def delete_group(self, group: str):
"""Delete a group
Args:
group (str): The email of the group.
"""
_ = self._client.delete(self.api_url(f"groups/{group}"), [200, 204])
def add_member_to_group(self, member: str, group: str, role: str) -> dict:
"""Add member to group
Args:
member (str): The email of the member to be added.
group (str): The email of the group.
role (str): The role in the group.
Returns:
dict: containing the result
"""
request_data = {
"email": member,
"role": role,
}
response_json = self._client.post_returning_json(
self.api_url(f"groups/{group}/members"), request_data
)
return response_json
def remove_member_from_group(self, member: str, group: str):
"""Remove member from group
Args:
member (str): The email of the member to remove.
group (str): The email of the group.
"""
_ = self._client.delete(self.api_url(f"groups/{group}/members/{member}"), [204])
```
#### File: tests/identity/test_environment.py
```python
import logging
import os
from unittest.case import TestCase
import mock
from testfixtures import LogCapture
from osdu.identity import OsduEnvironmentCredential
from osdu.identity.consts import EnvironmentVariables
from osdu.identity.exceptions import CredentialUnavailableError
class TestOsduEnvironmentCredential(TestCase):
"""Test cases for refresh token OSDU client"""
def test_init_token(self):
"""Test the init method for token credentials"""
envs = {
EnvironmentVariables.CLIENT_ID: "CLIENT_ID",
EnvironmentVariables.CLIENT_SECRET: "CLIENT_SECRET",
EnvironmentVariables.TOKEN_ENDPOINT: "TOKEN_ENDPOINT",
EnvironmentVariables.REFRESH_TOKEN: "REFRESH_TOKEN",
}
with mock.patch.dict(os.environ, envs, clear=True):
with LogCapture(level=logging.INFO) as log_capture:
client = OsduEnvironmentCredential()
# pylint: disable=protected-access
self.assertEqual("OsduTokenCredential", client._credential.__class__.__name__)
self.assertEqual(len(log_capture.records), 1)
def test_init_msal(self):
"""Test the init method for msal"""
envs = {
EnvironmentVariables.CLIENT_ID: "CLIENT_ID",
EnvironmentVariables.AUTHORITY: "AUTHORITY",
EnvironmentVariables.SCOPES: "SCOPES",
}
with mock.patch.dict(os.environ, envs, clear=True):
with LogCapture(level=logging.INFO) as log_capture:
client = OsduEnvironmentCredential()
# pylint: disable=protected-access
self.assertEqual(
"OsduMsalInteractiveCredential", client._credential.__class__.__name__
)
self.assertEqual(len(log_capture.records), 1)
def test_init_msal_optional(self):
"""Test the init method for msal with optional arguments set"""
envs = {
EnvironmentVariables.CLIENT_ID: "CLIENT_ID",
EnvironmentVariables.AUTHORITY: "AUTHORITY",
EnvironmentVariables.SCOPES: "SCOPES",
EnvironmentVariables.TOKEN_CACHE: "TOKEN_CACHE",
}
with mock.patch.dict(os.environ, envs, clear=True):
client = OsduEnvironmentCredential()
# pylint: disable=protected-access
self.assertEqual("OsduMsalInteractiveCredential", client._credential.__class__.__name__)
self.assertEqual("TOKEN_CACHE", client._credential.token_cache)
def test_init_invalid(self):
"""Test incomplete setup doesn't assign any credentials."""
envs = {}
with mock.patch.dict(os.environ, envs, clear=True):
with LogCapture(level=logging.WARN) as log_capture:
client = OsduEnvironmentCredential()
# pylint: disable=protected-access
self.assertIsNone(client._credential)
self.assertEqual(len(log_capture.records), 1)
def test_get_token_invalid_fails(self):
"""Test incomplete setup throws exception for get_token())."""
envs = {}
with mock.patch.dict(os.environ, envs, clear=True):
client = OsduEnvironmentCredential()
with self.assertRaises(CredentialUnavailableError):
client.get_token()
if __name__ == "__main__":
import nose2
nose2.main()
``` |
{
"source": "JohnHau/mis",
"score": 4
} |
#### File: JohnHau/mis/tk.py
```python
from tkinter import *
def hello():
print("hello")
root=Tk()
button=Button(root,text='click me',command=hello)
button.pack()
root.mainloop()
``` |
{
"source": "John-Hau/NXP-MCUBootUtility",
"score": 2
} |
#### File: src/win/secBootWin.py
```python
import wx
import wx.xrc
###########################################################################
## Class secBootWin
###########################################################################
class secBootWin ( wx.Frame ):
def __init__( self, parent ):
wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"NXP MCU Boot Utility", pos = wx.DefaultPosition, size = wx.Size( 1122,730 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
self.SetSizeHints( wx.DefaultSize, wx.DefaultSize )
self.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_menubar = wx.MenuBar( 0 )
self.m_menu_file = wx.Menu()
self.m_menuItem_exit = wx.MenuItem( self.m_menu_file, wx.ID_ANY, u"Exit", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_file.Append( self.m_menuItem_exit )
self.m_menubar.Append( self.m_menu_file, u"File" )
self.m_menu_edit = wx.Menu()
self.m_menubar.Append( self.m_menu_edit, u"Edit" )
self.m_menu_view = wx.Menu()
self.m_menu_language = wx.Menu()
self.m_menuItem_english = wx.MenuItem( self.m_menu_language, wx.ID_ANY, u"EN - English", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_language.Append( self.m_menuItem_english )
self.m_menuItem_chinese = wx.MenuItem( self.m_menu_language, wx.ID_ANY, u"ZH - 简体中文", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_language.Append( self.m_menuItem_chinese )
self.m_menu_view.AppendSubMenu( self.m_menu_language, u"Language/语言" )
self.m_menubar.Append( self.m_menu_view, u"View" )
self.m_menu_tools = wx.Menu()
self.m_menu_runMode = wx.Menu()
self.m_menuItem_runModeEntry = wx.MenuItem( self.m_menu_runMode, wx.ID_ANY, u"Entry", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_runMode.Append( self.m_menuItem_runModeEntry )
self.m_menuItem_runModeMaster = wx.MenuItem( self.m_menu_runMode, wx.ID_ANY, u"Master", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_runMode.Append( self.m_menuItem_runModeMaster )
self.m_menu_tools.AppendSubMenu( self.m_menu_runMode, u"Run Mode" )
self.m_menu_usbDetection = wx.Menu()
self.m_menuItem_usbDetectionDynamic = wx.MenuItem( self.m_menu_usbDetection, wx.ID_ANY, u"Dynamic", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_usbDetection.Append( self.m_menuItem_usbDetectionDynamic )
self.m_menuItem_usbDetectionStatic = wx.MenuItem( self.m_menu_usbDetection, wx.ID_ANY, u"Static", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_usbDetection.Append( self.m_menuItem_usbDetectionStatic )
self.m_menu_tools.AppendSubMenu( self.m_menu_usbDetection, u"USB Detection" )
self.m_menu_soundEffect = wx.Menu()
self.m_menuItem_soundEffectContra = wx.MenuItem( self.m_menu_soundEffect, wx.ID_ANY, u"Contra", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_soundEffect.Append( self.m_menuItem_soundEffectContra )
self.m_menuItem_soundEffectMario = wx.MenuItem( self.m_menu_soundEffect, wx.ID_ANY, u"Mario", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_soundEffect.Append( self.m_menuItem_soundEffectMario )
self.m_menuItem_soundEffectQuiet = wx.MenuItem( self.m_menu_soundEffect, wx.ID_ANY, u"Quiet", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_soundEffect.Append( self.m_menuItem_soundEffectQuiet )
self.m_menu_tools.AppendSubMenu( self.m_menu_soundEffect, u"Sound Effect" )
self.m_menu_genSbFile = wx.Menu()
self.m_menuItem_genSbFileYes = wx.MenuItem( self.m_menu_genSbFile, wx.ID_ANY, u"Yes", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_genSbFile.Append( self.m_menuItem_genSbFileYes )
self.m_menuItem_genSbFileNo = wx.MenuItem( self.m_menu_genSbFile, wx.ID_ANY, u"No", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_genSbFile.Append( self.m_menuItem_genSbFileNo )
self.m_menu_tools.AppendSubMenu( self.m_menu_genSbFile, u"Generate .sb file" )
self.m_menu_imageReadback = wx.Menu()
self.m_menuItem_imageReadbackAutomatic = wx.MenuItem( self.m_menu_imageReadback, wx.ID_ANY, u"Automatic", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_imageReadback.Append( self.m_menuItem_imageReadbackAutomatic )
self.m_menuItem_imageReadbackManual = wx.MenuItem( self.m_menu_imageReadback, wx.ID_ANY, u"Manual", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_imageReadback.Append( self.m_menuItem_imageReadbackManual )
self.m_menu_tools.AppendSubMenu( self.m_menu_imageReadback, u"Image Readback" )
self.m_menu_flashloaderResident = wx.Menu()
self.m_menuItem_flashloaderResidentDefault = wx.MenuItem( self.m_menu_flashloaderResident, wx.ID_ANY, u"Default", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flashloaderResident.Append( self.m_menuItem_flashloaderResidentDefault )
self.m_menuItem_flashloaderResidentItcm = wx.MenuItem( self.m_menu_flashloaderResident, wx.ID_ANY, u"ITCM", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flashloaderResident.Append( self.m_menuItem_flashloaderResidentItcm )
self.m_menuItem_flashloaderResidentDtcm = wx.MenuItem( self.m_menu_flashloaderResident, wx.ID_ANY, u"DTCM", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flashloaderResident.Append( self.m_menuItem_flashloaderResidentDtcm )
self.m_menuItem_flashloaderResidentOcram = wx.MenuItem( self.m_menu_flashloaderResident, wx.ID_ANY, u"OCRAM", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flashloaderResident.Append( self.m_menuItem_flashloaderResidentOcram )
self.m_menu_tools.AppendSubMenu( self.m_menu_flashloaderResident, u"Flashloader Resident" )
self.m_menu_efuseGroup = wx.Menu()
self.m_menuItem_efuseGroup0 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"0", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup0 )
self.m_menuItem_efuseGroup1 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"1", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup1 )
self.m_menuItem_efuseGroup2 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"2", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup2 )
self.m_menuItem_efuseGroup3 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"3", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup3 )
self.m_menuItem_efuseGroup4 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"4", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup4 )
self.m_menuItem_efuseGroup5 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"5", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup5 )
self.m_menuItem_efuseGroup6 = wx.MenuItem( self.m_menu_efuseGroup, wx.ID_ANY, u"6", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_efuseGroup.Append( self.m_menuItem_efuseGroup6 )
self.m_menu_tools.AppendSubMenu( self.m_menu_efuseGroup, u"eFuse Group" )
self.m_menu_flexspiXipRegion = wx.Menu()
self.m_menuItem_flexspiXipRegion0 = wx.MenuItem( self.m_menu_flexspiXipRegion, wx.ID_ANY, u"0", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flexspiXipRegion.Append( self.m_menuItem_flexspiXipRegion0 )
self.m_menuItem_flexspiXipRegion1 = wx.MenuItem( self.m_menu_flexspiXipRegion, wx.ID_ANY, u"1", wx.EmptyString, wx.ITEM_RADIO )
self.m_menu_flexspiXipRegion.Append( self.m_menuItem_flexspiXipRegion1 )
self.m_menu_tools.AppendSubMenu( self.m_menu_flexspiXipRegion, u"FlexSPI XIP Region" )
self.m_menubar.Append( self.m_menu_tools, u"Tools" )
self.m_menu_window = wx.Menu()
self.m_menubar.Append( self.m_menu_window, u"Window" )
self.m_menu_help = wx.Menu()
self.m_menuItem_homePage = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"Home Page", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_homePage )
self.m_menuItem_aboutAuthor = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"About Author", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_aboutAuthor )
self.m_menuItem_contributors = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"Contributors", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_contributors )
self.m_menuItem_specialThanks = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"Special Thanks", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_specialThanks )
self.m_menuItem_revisionHistory = wx.MenuItem( self.m_menu_help, wx.ID_ANY, u"Revision History", wx.EmptyString, wx.ITEM_NORMAL )
self.m_menu_help.Append( self.m_menuItem_revisionHistory )
self.m_menubar.Append( self.m_menu_help, u"Help" )
self.SetMenuBar( self.m_menubar )
bSizer_win = wx.BoxSizer( wx.VERTICAL )
wSizer_func = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
bSizer_setup = wx.BoxSizer( wx.VERTICAL )
self.m_notebook_targetSetup = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
self.m_panel_targetSetup = wx.Panel( self.m_notebook_targetSetup, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_targetSetup.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
wSizer_targetSetup = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText_mcuSeries = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, u"MCU Series:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_mcuSeries.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_mcuSeries, 0, wx.ALL, 5 )
m_choice_mcuSeriesChoices = [ u"i.MXRT", u"LPC", u"Kinetis" ]
self.m_choice_mcuSeries = wx.Choice( self.m_panel_targetSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_mcuSeriesChoices, 0 )
self.m_choice_mcuSeries.SetSelection( 0 )
wSizer_targetSetup.Add( self.m_choice_mcuSeries, 0, wx.ALL, 5 )
self.m_staticText_mcuDevice = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, u"MCU Device:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_mcuDevice.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_mcuDevice, 0, wx.ALL, 5 )
m_choice_mcuDeviceChoices = [ u"i.MXRT1015", u"i.MXRT102x", u"i.MXRT105x", u"i.MXRT106x", u"i.MXRT1064 SIP" ]
self.m_choice_mcuDevice = wx.Choice( self.m_panel_targetSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_mcuDeviceChoices, 0 )
self.m_choice_mcuDevice.SetSelection( 2 )
wSizer_targetSetup.Add( self.m_choice_mcuDevice, 0, wx.ALL, 5 )
self.m_staticText_bootDevice = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, u"Boot Device:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_bootDevice.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_bootDevice, 0, wx.ALL, 5 )
m_choice_bootDeviceChoices = [ u"FLEXSPI NOR", u"FLEXSPI NAND", u"SEMC NOR", u"SEMC NAND", u"uSDHC SD", u"uSDHC MMC/eMMC", u"LPSPI NOR/EEPROM" ]
self.m_choice_bootDevice = wx.Choice( self.m_panel_targetSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_bootDeviceChoices, 0 )
self.m_choice_bootDevice.SetSelection( 0 )
wSizer_targetSetup.Add( self.m_choice_bootDevice, 0, wx.ALL, 5 )
self.m_staticText_null1TargetSetup = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 256,5 ), 0 )
self.m_staticText_null1TargetSetup.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_null1TargetSetup, 0, wx.ALL, 5 )
self.m_staticText_null2TargetSetup = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
self.m_staticText_null2TargetSetup.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_null2TargetSetup, 0, wx.ALL, 5 )
self.m_button_bootDeviceConfiguration = wx.Button( self.m_panel_targetSetup, wx.ID_ANY, u"Boot Device Configuration", wx.DefaultPosition, wx.Size( 200,-1 ), 0 )
wSizer_targetSetup.Add( self.m_button_bootDeviceConfiguration, 0, wx.ALL, 5 )
self.m_staticText_null3TargetSetup = wx.StaticText( self.m_panel_targetSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
self.m_staticText_null3TargetSetup.Wrap( -1 )
wSizer_targetSetup.Add( self.m_staticText_null3TargetSetup, 0, wx.ALL, 5 )
self.m_button_deviceConfigurationData = wx.Button( self.m_panel_targetSetup, wx.ID_ANY, u"Device Configuration Data (DCD)", wx.DefaultPosition, wx.Size( 200,-1 ), 0 )
wSizer_targetSetup.Add( self.m_button_deviceConfigurationData, 0, wx.ALL, 5 )
self.m_panel_targetSetup.SetSizer( wSizer_targetSetup )
self.m_panel_targetSetup.Layout()
wSizer_targetSetup.Fit( self.m_panel_targetSetup )
self.m_notebook_targetSetup.AddPage( self.m_panel_targetSetup, u"Target Setup", False )
bSizer_setup.Add( self.m_notebook_targetSetup, 1, wx.EXPAND |wx.ALL, 5 )
self.m_notebook_portSetup = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_panel_portSetup = wx.Panel( self.m_notebook_portSetup, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
wSizer_portSetup = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText_null1PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
self.m_staticText_null1PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null1PortSetup, 0, wx.ALL, 5 )
self.m_radioBtn_uart = wx.RadioButton( self.m_panel_portSetup, wx.ID_ANY, u"UART", wx.DefaultPosition, wx.Size( 60,-1 ), 0 )
wSizer_portSetup.Add( self.m_radioBtn_uart, 0, wx.ALL, 5 )
self.m_radioBtn_usbhid = wx.RadioButton( self.m_panel_portSetup, wx.ID_ANY, u"USB-HID", wx.DefaultPosition, wx.Size( 70,-1 ), 0 )
wSizer_portSetup.Add( self.m_radioBtn_usbhid, 0, wx.ALL, 5 )
self.m_staticText_portVid = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, u"COM Port:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_portVid.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_portVid, 0, wx.ALL, 5 )
m_choice_portVidChoices = []
self.m_choice_portVid = wx.Choice( self.m_panel_portSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_portVidChoices, 0 )
self.m_choice_portVid.SetSelection( 0 )
wSizer_portSetup.Add( self.m_choice_portVid, 0, wx.ALL, 5 )
self.m_staticText_baudPid = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, u"Baudrate:", wx.DefaultPosition, wx.Size( 95,-1 ), 0 )
self.m_staticText_baudPid.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_baudPid, 0, wx.ALL, 5 )
m_choice_baudPidChoices = []
self.m_choice_baudPid = wx.Choice( self.m_panel_portSetup, wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_baudPidChoices, 0 )
self.m_choice_baudPid.SetSelection( 0 )
wSizer_portSetup.Add( self.m_choice_baudPid, 0, wx.ALL, 5 )
self.m_staticText_null2PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 256,5 ), 0 )
self.m_staticText_null2PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null2PortSetup, 0, wx.ALL, 5 )
self.m_staticText_null3PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 45,-1 ), 0 )
self.m_staticText_null3PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null3PortSetup, 0, wx.ALL, 5 )
self.m_bitmap_connectLed = wx.StaticBitmap( self.m_panel_portSetup, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.Size( 30,30 ), 0 )
wSizer_portSetup.Add( self.m_bitmap_connectLed, 0, wx.ALL, 5 )
self.m_staticText_null4PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 30,-1 ), 0 )
self.m_staticText_null4PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null4PortSetup, 0, wx.ALL, 5 )
self.m_checkBox_oneStepConnect = wx.CheckBox( self.m_panel_portSetup, wx.ID_ANY, u"One Step", wx.DefaultPosition, wx.Size( -1,30 ), 0 )
wSizer_portSetup.Add( self.m_checkBox_oneStepConnect, 0, wx.ALL, 5 )
self.m_staticText_null5PortSetup = wx.StaticText( self.m_panel_portSetup, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 45,-1 ), 0 )
self.m_staticText_null5PortSetup.Wrap( -1 )
wSizer_portSetup.Add( self.m_staticText_null5PortSetup, 0, wx.ALL, 5 )
self.m_button_connect = wx.Button( self.m_panel_portSetup, wx.ID_ANY, u"Connect to ROM", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
wSizer_portSetup.Add( self.m_button_connect, 0, wx.ALL, 5 )
self.m_panel_portSetup.SetSizer( wSizer_portSetup )
self.m_panel_portSetup.Layout()
wSizer_portSetup.Fit( self.m_panel_portSetup )
self.m_notebook_portSetup.AddPage( self.m_panel_portSetup, u"Port Setup", False )
bSizer_setup.Add( self.m_notebook_portSetup, 1, wx.EXPAND |wx.ALL, 5 )
self.m_notebook_deviceStatus = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_panel_deviceStatus = wx.Panel( self.m_notebook_deviceStatus, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
bSizer_deviceStatus = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_deviceStatus = wx.TextCtrl( self.m_panel_deviceStatus, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 250,158 ), wx.TE_MULTILINE|wx.TE_RICH2 )
bSizer_deviceStatus.Add( self.m_textCtrl_deviceStatus, 0, wx.ALL, 5 )
self.m_panel_deviceStatus.SetSizer( bSizer_deviceStatus )
self.m_panel_deviceStatus.Layout()
bSizer_deviceStatus.Fit( self.m_panel_deviceStatus )
self.m_notebook_deviceStatus.AddPage( self.m_panel_deviceStatus, u"Device Status", False )
bSizer_setup.Add( self.m_notebook_deviceStatus, 1, wx.EXPAND |wx.ALL, 5 )
wSizer_func.Add( bSizer_setup, 1, wx.EXPAND, 5 )
bSizer_boot = wx.BoxSizer( wx.VERTICAL )
wSizer_bootType = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText_secureBootType = wx.StaticText( self, wx.ID_ANY, u"Secure Boot Type:", wx.DefaultPosition, wx.Size( 118,-1 ), 0 )
self.m_staticText_secureBootType.Wrap( -1 )
self.m_staticText_secureBootType.SetFont( wx.Font( 10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial Rounded MT Bold" ) )
self.m_staticText_secureBootType.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BACKGROUND ) )
wSizer_bootType.Add( self.m_staticText_secureBootType, 0, wx.ALL, 5 )
m_choice_secureBootTypeChoices = [ u"DEV Unsigned Image Boot", u"HAB Signed Image Boot", u"HAB Encrypted Image Boot", u"BEE Encrypted Image Boot" ]
self.m_choice_secureBootType = wx.Choice( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( 299,-1 ), m_choice_secureBootTypeChoices, 0 )
self.m_choice_secureBootType.SetSelection( 0 )
wSizer_bootType.Add( self.m_choice_secureBootType, 0, wx.ALL, 5 )
self.m_button_allInOneAction = wx.Button( self, wx.ID_ANY, u"All-In-One Action", wx.DefaultPosition, wx.Size( 124,-1 ), 0 )
wSizer_bootType.Add( self.m_button_allInOneAction, 0, wx.ALL, 5 )
self.m_staticText_null1BootType = wx.StaticText( self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 30,-1 ), 0 )
self.m_staticText_null1BootType.Wrap( -1 )
wSizer_bootType.Add( self.m_staticText_null1BootType, 0, wx.ALL, 5 )
self.m_bitmap_nxp = wx.StaticBitmap( self, wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.Size( 80,30 ), 0 )
wSizer_bootType.Add( self.m_bitmap_nxp, 0, wx.ALL, 5 )
bSizer_boot.Add( wSizer_bootType, 1, wx.EXPAND, 5 )
self.m_notebook_imageSeq = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,450 ), 0 )
self.m_notebook_imageSeq.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_panel_genSeq = wx.Panel( self.m_notebook_imageSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_genSeq.SetForegroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
self.m_panel_genSeq.SetBackgroundColour( wx.Colour( 64, 64, 64 ) )
wSizer_genSeq = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
wSizer_genSeq.SetMinSize( wx.Size( 800,-1 ) )
self.m_panel_doAuth = wx.Panel( self.m_panel_genSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_doAuth.SetBackgroundColour( wx.Colour( 64, 64, 64 ) )
bSizer_doAuth = wx.BoxSizer( wx.VERTICAL )
self.m_panel_doAuth1_certInput = wx.Panel( self.m_panel_doAuth, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_certInput = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_doAuth1_certInput, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_serial = wx.StaticText( sbSizer_certInput.GetStaticBox(), wx.ID_ANY, u"serial (8 digits):", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_serial.Wrap( -1 )
sbSizer_certInput.Add( self.m_staticText_serial, 0, wx.ALL, 5 )
self.m_textCtrl_serial = wx.TextCtrl( sbSizer_certInput.GetStaticBox(), wx.ID_ANY, u"12345678", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
sbSizer_certInput.Add( self.m_textCtrl_serial, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_staticText_keyPass = wx.StaticText( sbSizer_certInput.GetStaticBox(), wx.ID_ANY, u"key_pass (text):", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_keyPass.Wrap( -1 )
sbSizer_certInput.Add( self.m_staticText_keyPass, 0, wx.ALL, 5 )
self.m_textCtrl_keyPass = wx.TextCtrl( sbSizer_certInput.GetStaticBox(), wx.ID_ANY, u"test", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
sbSizer_certInput.Add( self.m_textCtrl_keyPass, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_button_advCertSettings = wx.Button( sbSizer_certInput.GetStaticBox(), wx.ID_ANY, u"Advanced Cert Settings", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
sbSizer_certInput.Add( self.m_button_advCertSettings, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_doAuth1_certInput.SetSizer( sbSizer_certInput )
self.m_panel_doAuth1_certInput.Layout()
sbSizer_certInput.Fit( self.m_panel_doAuth1_certInput )
bSizer_doAuth.Add( self.m_panel_doAuth1_certInput, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_doAuth2_certFmt = wx.Panel( self.m_panel_doAuth, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_certFmt = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_doAuth2_certFmt, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_certFmt = wx.StaticText( sbSizer_certFmt.GetStaticBox(), wx.ID_ANY, u"Certificate Format:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_certFmt.Wrap( -1 )
sbSizer_certFmt.Add( self.m_staticText_certFmt, 0, wx.ALL, 5 )
m_choice_certFmtChoices = [ u"X.509v3" ]
self.m_choice_certFmt = wx.Choice( sbSizer_certFmt.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 90,-1 ), m_choice_certFmtChoices, 0 )
self.m_choice_certFmt.SetSelection( 0 )
sbSizer_certFmt.Add( self.m_choice_certFmt, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_staticText_hashAlgo = wx.StaticText( sbSizer_certFmt.GetStaticBox(), wx.ID_ANY, u"Hash Algorithm:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_hashAlgo.Wrap( -1 )
sbSizer_certFmt.Add( self.m_staticText_hashAlgo, 0, wx.ALL, 5 )
m_choice_hashAlgoChoices = [ u"SHA-256" ]
self.m_choice_hashAlgo = wx.Choice( sbSizer_certFmt.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 90,-1 ), m_choice_hashAlgoChoices, 0 )
self.m_choice_hashAlgo.SetSelection( 0 )
sbSizer_certFmt.Add( self.m_choice_hashAlgo, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_doAuth2_certFmt.SetSizer( sbSizer_certFmt )
self.m_panel_doAuth2_certFmt.Layout()
sbSizer_certFmt.Fit( self.m_panel_doAuth2_certFmt )
bSizer_doAuth.Add( self.m_panel_doAuth2_certFmt, 1, wx.EXPAND |wx.ALL, 5 )
self.m_button_genCert = wx.Button( self.m_panel_doAuth, wx.ID_ANY, u"Generate Certificate,SRK", wx.DefaultPosition, wx.Size( 195,-1 ), 0 )
bSizer_doAuth.Add( self.m_button_genCert, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_doAuth.SetSizer( bSizer_doAuth )
self.m_panel_doAuth.Layout()
bSizer_doAuth.Fit( self.m_panel_doAuth )
wSizer_genSeq.Add( self.m_panel_doAuth, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_genImage = wx.Panel( self.m_panel_genSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_genImage.SetBackgroundColour( wx.Colour( 64, 64, 64 ) )
bSizer_genImage = wx.BoxSizer( wx.VERTICAL )
self.m_panel_genImage1_browseApp = wx.Panel( self.m_panel_genImage, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_browseApp = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_genImage1_browseApp, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_appPath = wx.StaticText( sbSizer_browseApp.GetStaticBox(), wx.ID_ANY, u"Application Image File:", wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
self.m_staticText_appPath.Wrap( -1 )
sbSizer_browseApp.Add( self.m_staticText_appPath, 0, wx.ALL, 5 )
self.m_filePicker_appPath = wx.FilePickerCtrl( sbSizer_browseApp.GetStaticBox(), wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.*", wx.DefaultPosition, wx.Size( 280,23 ), wx.FLP_DEFAULT_STYLE )
sbSizer_browseApp.Add( self.m_filePicker_appPath, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
m_choice_appFormatChoices = [ u"Auto-detect image format", u".out(axf) from Keil MDK", u".out(elf) from IAR EWARM", u".out(axf) from MCUXpresso", u".out(elf) from GCC ARM", u"Motorola S-Records (.srec/.s19)", u"Intel Extended Hex (.hex)", u"Raw Binary (.bin)" ]
self.m_choice_appFormat = wx.Choice( sbSizer_browseApp.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 200,-1 ), m_choice_appFormatChoices, 0 )
self.m_choice_appFormat.SetSelection( 0 )
sbSizer_browseApp.Add( self.m_choice_appFormat, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_staticText_appBaseAddr = wx.StaticText( sbSizer_browseApp.GetStaticBox(), wx.ID_ANY, u"Base Address for Raw Binary Image:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_appBaseAddr.Wrap( -1 )
sbSizer_browseApp.Add( self.m_staticText_appBaseAddr, 0, wx.ALL, 5 )
self.m_textCtrl_appBaseAddr = wx.TextCtrl( sbSizer_browseApp.GetStaticBox(), wx.ID_ANY, u"Eg: 0x00003000", wx.DefaultPosition, wx.Size( 200,-1 ), 0 )
self.m_textCtrl_appBaseAddr.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
sbSizer_browseApp.Add( self.m_textCtrl_appBaseAddr, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_genImage1_browseApp.SetSizer( sbSizer_browseApp )
self.m_panel_genImage1_browseApp.Layout()
sbSizer_browseApp.Fit( self.m_panel_genImage1_browseApp )
bSizer_genImage.Add( self.m_panel_genImage1_browseApp, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_genImage2_habCryptoAlgo = wx.Panel( self.m_panel_genImage, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ), wx.TAB_TRAVERSAL )
sbSizer_habCryptoAlgo = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_genImage2_habCryptoAlgo, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_habCryptoAlgo = wx.StaticText( sbSizer_habCryptoAlgo.GetStaticBox(), wx.ID_ANY, u"HAB Encryption Algorithm:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_habCryptoAlgo.Wrap( -1 )
sbSizer_habCryptoAlgo.Add( self.m_staticText_habCryptoAlgo, 0, wx.ALL, 5 )
m_choice_habCryptoAlgoChoices = [ u"AES-128" ]
self.m_choice_habCryptoAlgo = wx.Choice( sbSizer_habCryptoAlgo.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 90,-1 ), m_choice_habCryptoAlgoChoices, 0 )
self.m_choice_habCryptoAlgo.SetSelection( 0 )
sbSizer_habCryptoAlgo.Add( self.m_choice_habCryptoAlgo, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_genImage2_habCryptoAlgo.SetSizer( sbSizer_habCryptoAlgo )
self.m_panel_genImage2_habCryptoAlgo.Layout()
sbSizer_habCryptoAlgo.Fit( self.m_panel_genImage2_habCryptoAlgo )
bSizer_genImage.Add( self.m_panel_genImage2_habCryptoAlgo, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_genImage3_enableCertForHwCrypto = wx.Panel( self.m_panel_genImage, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ), wx.TAB_TRAVERSAL )
sbSizer_enableCertForHwCrypto = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_genImage3_enableCertForHwCrypto, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_enableCertForHwCrypto = wx.StaticText( sbSizer_enableCertForHwCrypto.GetStaticBox(), wx.ID_ANY, u"Enable Certificate for HW (BEE/OTFAD) Encryption:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_enableCertForHwCrypto.Wrap( -1 )
sbSizer_enableCertForHwCrypto.Add( self.m_staticText_enableCertForHwCrypto, 0, wx.ALL, 5 )
m_choice_enableCertForHwCryptoChoices = [ u"No", u"Yes" ]
self.m_choice_enableCertForHwCrypto = wx.Choice( sbSizer_enableCertForHwCrypto.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 90,-1 ), m_choice_enableCertForHwCryptoChoices, 0 )
self.m_choice_enableCertForHwCrypto.SetSelection( 0 )
sbSizer_enableCertForHwCrypto.Add( self.m_choice_enableCertForHwCrypto, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_genImage3_enableCertForHwCrypto.SetSizer( sbSizer_enableCertForHwCrypto )
self.m_panel_genImage3_enableCertForHwCrypto.Layout()
sbSizer_enableCertForHwCrypto.Fit( self.m_panel_genImage3_enableCertForHwCrypto )
bSizer_genImage.Add( self.m_panel_genImage3_enableCertForHwCrypto, 1, wx.EXPAND |wx.ALL, 5 )
self.m_button_genImage = wx.Button( self.m_panel_genImage, wx.ID_ANY, u"Generate Unsigned Bootable Image", wx.DefaultPosition, wx.Size( 225,-1 ), 0 )
bSizer_genImage.Add( self.m_button_genImage, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_genImage.SetSizer( bSizer_genImage )
self.m_panel_genImage.Layout()
bSizer_genImage.Fit( self.m_panel_genImage )
wSizer_genSeq.Add( self.m_panel_genImage, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_prepHwCrypto = wx.Panel( self.m_panel_genSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_prepHwCrypto.SetBackgroundColour( wx.Colour( 64, 64, 64 ) )
bSizer_prepHwCrypto = wx.BoxSizer( wx.VERTICAL )
self.m_panel_prepHwCrypto1_hwCryptoKeyRegion = wx.Panel( self.m_panel_prepHwCrypto, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_keyStorageRegion = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_prepHwCrypto1_hwCryptoKeyRegion, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_keyStorageRegion = wx.StaticText( sbSizer_keyStorageRegion.GetStaticBox(), wx.ID_ANY, u"Key Storage Region:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_keyStorageRegion.Wrap( -1 )
sbSizer_keyStorageRegion.Add( self.m_staticText_keyStorageRegion, 0, wx.ALL, 5 )
m_choice_keyStorageRegionChoices = [ u"Fixed Otpmk(SNVS) Key", u"Flexible User Keys" ]
self.m_choice_keyStorageRegion = wx.Choice( sbSizer_keyStorageRegion.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 150,-1 ), m_choice_keyStorageRegionChoices, 0 )
self.m_choice_keyStorageRegion.SetSelection( 1 )
sbSizer_keyStorageRegion.Add( self.m_choice_keyStorageRegion, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_staticText_availHwCryptoEngines = wx.StaticText( sbSizer_keyStorageRegion.GetStaticBox(), wx.ID_ANY, u"Max Available HW Crypto Engines:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_availHwCryptoEngines.Wrap( -1 )
sbSizer_keyStorageRegion.Add( self.m_staticText_availHwCryptoEngines, 0, wx.ALL, 5 )
m_choice_availHwCryptoEnginesChoices = [ u"1", u"2" ]
self.m_choice_availHwCryptoEngines = wx.Choice( sbSizer_keyStorageRegion.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 90,-1 ), m_choice_availHwCryptoEnginesChoices, 0 )
self.m_choice_availHwCryptoEngines.SetSelection( 0 )
sbSizer_keyStorageRegion.Add( self.m_choice_availHwCryptoEngines, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_button_advKeySettings = wx.Button( sbSizer_keyStorageRegion.GetStaticBox(), wx.ID_ANY, u"Advanced Key Settings", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
sbSizer_keyStorageRegion.Add( self.m_button_advKeySettings, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_prepHwCrypto1_hwCryptoKeyRegion.SetSizer( sbSizer_keyStorageRegion )
self.m_panel_prepHwCrypto1_hwCryptoKeyRegion.Layout()
sbSizer_keyStorageRegion.Fit( self.m_panel_prepHwCrypto1_hwCryptoKeyRegion )
bSizer_prepHwCrypto.Add( self.m_panel_prepHwCrypto1_hwCryptoKeyRegion, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_prepHwCrypto2_hwCryptoAlgo = wx.Panel( self.m_panel_prepHwCrypto, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_hwCryptoAlgo = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_prepHwCrypto2_hwCryptoAlgo, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_hwCryptoAlgo = wx.StaticText( sbSizer_hwCryptoAlgo.GetStaticBox(), wx.ID_ANY, u"HW Encryption Algorithm:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_hwCryptoAlgo.Wrap( -1 )
sbSizer_hwCryptoAlgo.Add( self.m_staticText_hwCryptoAlgo, 0, wx.ALL, 5 )
m_choice_hwCryptoAlgoChoices = [ u"AES-128" ]
self.m_choice_hwCryptoAlgo = wx.Choice( sbSizer_hwCryptoAlgo.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 90,-1 ), m_choice_hwCryptoAlgoChoices, 0 )
self.m_choice_hwCryptoAlgo.SetSelection( 0 )
sbSizer_hwCryptoAlgo.Add( self.m_choice_hwCryptoAlgo, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_staticText_maxFacCnt = wx.StaticText( sbSizer_hwCryptoAlgo.GetStaticBox(), wx.ID_ANY, u"Max Protection Regions:", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_staticText_maxFacCnt.Wrap( -1 )
sbSizer_hwCryptoAlgo.Add( self.m_staticText_maxFacCnt, 0, wx.ALL, 5 )
m_choice_maxFacCntChoices = [ u"3" ]
self.m_choice_maxFacCnt = wx.Choice( sbSizer_hwCryptoAlgo.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.Size( 90,-1 ), m_choice_maxFacCntChoices, 0 )
self.m_choice_maxFacCnt.SetSelection( 0 )
sbSizer_hwCryptoAlgo.Add( self.m_choice_maxFacCnt, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_prepHwCrypto2_hwCryptoAlgo.SetSizer( sbSizer_hwCryptoAlgo )
self.m_panel_prepHwCrypto2_hwCryptoAlgo.Layout()
sbSizer_hwCryptoAlgo.Fit( self.m_panel_prepHwCrypto2_hwCryptoAlgo )
bSizer_prepHwCrypto.Add( self.m_panel_prepHwCrypto2_hwCryptoAlgo, 1, wx.EXPAND |wx.ALL, 5 )
self.m_button_prepHwCrypto = wx.Button( self.m_panel_prepHwCrypto, wx.ID_ANY, u"Prepare For Encryption", wx.DefaultPosition, wx.Size( 195,-1 ), 0 )
bSizer_prepHwCrypto.Add( self.m_button_prepHwCrypto, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_prepHwCrypto.SetSizer( bSizer_prepHwCrypto )
self.m_panel_prepHwCrypto.Layout()
bSizer_prepHwCrypto.Fit( self.m_panel_prepHwCrypto )
wSizer_genSeq.Add( self.m_panel_prepHwCrypto, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_genSeq.SetSizer( wSizer_genSeq )
self.m_panel_genSeq.Layout()
wSizer_genSeq.Fit( self.m_panel_genSeq )
self.m_notebook_imageSeq.AddPage( self.m_panel_genSeq, u"Image Generation Sequence", True )
self.m_panel_loadSeq = wx.Panel( self.m_notebook_imageSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_loadSeq.SetBackgroundColour( wx.Colour( 160, 160, 160 ) )
wSizer_loadSeq = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
wSizer_loadSeq.SetMinSize( wx.Size( 800,-1 ) )
self.m_panel_progSrk = wx.Panel( self.m_panel_loadSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_progSrk.SetBackgroundColour( wx.Colour( 64, 64, 64 ) )
bSizer_progSrk = wx.BoxSizer( wx.VERTICAL )
self.m_panel_progSrk1_showSrk = wx.Panel( self.m_panel_progSrk, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_showSrk = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_progSrk1_showSrk, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_srk256bit = wx.StaticText( sbSizer_showSrk.GetStaticBox(), wx.ID_ANY, u"Burn below SRK data (256bits) into Fuse SRK0-7 Region:", wx.DefaultPosition, wx.Size( 120,60 ), 0 )
self.m_staticText_srk256bit.Wrap( -1 )
sbSizer_showSrk.Add( self.m_staticText_srk256bit, 0, wx.ALL, 5 )
self.m_textCtrl_srk256bit = wx.TextCtrl( sbSizer_showSrk.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 110,180 ), wx.TE_MULTILINE|wx.TE_NO_VSCROLL|wx.TE_RICH2 )
self.m_textCtrl_srk256bit.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_GRAYTEXT ) )
sbSizer_showSrk.Add( self.m_textCtrl_srk256bit, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_progSrk1_showSrk.SetSizer( sbSizer_showSrk )
self.m_panel_progSrk1_showSrk.Layout()
sbSizer_showSrk.Fit( self.m_panel_progSrk1_showSrk )
bSizer_progSrk.Add( self.m_panel_progSrk1_showSrk, 1, wx.EXPAND |wx.ALL, 5 )
self.m_button_progSrk = wx.Button( self.m_panel_progSrk, wx.ID_ANY, u"Burn SRK data", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer_progSrk.Add( self.m_button_progSrk, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_progSrk.SetSizer( bSizer_progSrk )
self.m_panel_progSrk.Layout()
bSizer_progSrk.Fit( self.m_panel_progSrk )
wSizer_loadSeq.Add( self.m_panel_progSrk, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_operHwCrypto = wx.Panel( self.m_panel_loadSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_operHwCrypto.SetBackgroundColour( wx.Colour( 64, 64, 64 ) )
bSizer_operHwCrypto = wx.BoxSizer( wx.VERTICAL )
self.m_panel_operHwCrypto1_hwCryptoKeyInfo = wx.Panel( self.m_panel_operHwCrypto, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_hwCryptoKeyInfo = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_operHwCrypto1_hwCryptoKeyInfo, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_hwCryptoKeyInfo = wx.StaticText( sbSizer_hwCryptoKeyInfo.GetStaticBox(), wx.ID_ANY, u"Burn user DEK data (128bits * n) into below Region for HW Crypto:", wx.DefaultPosition, wx.Size( 130,45 ), 0 )
self.m_staticText_hwCryptoKeyInfo.Wrap( -1 )
sbSizer_hwCryptoKeyInfo.Add( self.m_staticText_hwCryptoKeyInfo, 0, wx.ALL, 5 )
self.m_panel_operHwCrypto1_hwCryptoKeyInfo.SetSizer( sbSizer_hwCryptoKeyInfo )
self.m_panel_operHwCrypto1_hwCryptoKeyInfo.Layout()
sbSizer_hwCryptoKeyInfo.Fit( self.m_panel_operHwCrypto1_hwCryptoKeyInfo )
bSizer_operHwCrypto.Add( self.m_panel_operHwCrypto1_hwCryptoKeyInfo, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_operHwCrypto2_showGp4Dek = wx.Panel( self.m_panel_operHwCrypto, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_showGp4Dek = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_operHwCrypto2_showGp4Dek, wx.ID_ANY, u"Fuse GP4 Region:" ), wx.VERTICAL )
self.m_textCtrl_gp4Dek128bit = wx.TextCtrl( sbSizer_showGp4Dek.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 110,100 ), wx.TE_MULTILINE|wx.TE_NO_VSCROLL|wx.TE_RICH2 )
self.m_textCtrl_gp4Dek128bit.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_GRAYTEXT ) )
sbSizer_showGp4Dek.Add( self.m_textCtrl_gp4Dek128bit, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_operHwCrypto2_showGp4Dek.SetSizer( sbSizer_showGp4Dek )
self.m_panel_operHwCrypto2_showGp4Dek.Layout()
sbSizer_showGp4Dek.Fit( self.m_panel_operHwCrypto2_showGp4Dek )
bSizer_operHwCrypto.Add( self.m_panel_operHwCrypto2_showGp4Dek, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_operHwCrypto3_showSwgp2Dek = wx.Panel( self.m_panel_operHwCrypto, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_showSwgp2Dek = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_operHwCrypto3_showSwgp2Dek, wx.ID_ANY, u"Fuse SW_GP2 Region:" ), wx.VERTICAL )
self.m_textCtrl_swgp2Dek128bit = wx.TextCtrl( sbSizer_showSwgp2Dek.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 110,100 ), wx.TE_MULTILINE|wx.TE_NO_VSCROLL|wx.TE_RICH2 )
self.m_textCtrl_swgp2Dek128bit.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_GRAYTEXT ) )
sbSizer_showSwgp2Dek.Add( self.m_textCtrl_swgp2Dek128bit, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_operHwCrypto3_showSwgp2Dek.SetSizer( sbSizer_showSwgp2Dek )
self.m_panel_operHwCrypto3_showSwgp2Dek.Layout()
sbSizer_showSwgp2Dek.Fit( self.m_panel_operHwCrypto3_showSwgp2Dek )
bSizer_operHwCrypto.Add( self.m_panel_operHwCrypto3_showSwgp2Dek, 1, wx.EXPAND |wx.ALL, 5 )
self.m_button_operHwCrypto = wx.Button( self.m_panel_operHwCrypto, wx.ID_ANY, u"Burn DEK data", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer_operHwCrypto.Add( self.m_button_operHwCrypto, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_operHwCrypto.SetSizer( bSizer_operHwCrypto )
self.m_panel_operHwCrypto.Layout()
bSizer_operHwCrypto.Fit( self.m_panel_operHwCrypto )
wSizer_loadSeq.Add( self.m_panel_operHwCrypto, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_flashImage = wx.Panel( self.m_panel_loadSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_flashImage.SetBackgroundColour( wx.Colour( 64, 64, 64 ) )
bSizer_flashImage = wx.BoxSizer( wx.VERTICAL )
self.m_panel_flashImage1_showImage = wx.Panel( self.m_panel_flashImage, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ), wx.TAB_TRAVERSAL )
self.m_panel_flashImage1_showImage.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_BTNSHADOW ) )
sbSizer_showImage = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_flashImage1_showImage, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_showImage = wx.StaticText( sbSizer_showImage.GetStaticBox(), wx.ID_ANY, u"Program final bootable image to boot device:", wx.DefaultPosition, wx.Size( 160,35 ), 0 )
self.m_staticText_showImage.Wrap( -1 )
sbSizer_showImage.Add( self.m_staticText_showImage, 0, wx.ALL, 5 )
self.m_bitmap_bootableImage = wx.StaticBitmap( sbSizer_showImage.GetStaticBox(), wx.ID_ANY, wx.NullBitmap, wx.DefaultPosition, wx.Size( 160,310 ), 0 )
sbSizer_showImage.Add( self.m_bitmap_bootableImage, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_flashImage1_showImage.SetSizer( sbSizer_showImage )
self.m_panel_flashImage1_showImage.Layout()
sbSizer_showImage.Fit( self.m_panel_flashImage1_showImage )
bSizer_flashImage.Add( self.m_panel_flashImage1_showImage, 1, wx.EXPAND |wx.ALL, 5 )
self.m_button_flashImage = wx.Button( self.m_panel_flashImage, wx.ID_ANY, u"Load Image", wx.DefaultPosition, wx.Size( 165,-1 ), 0 )
bSizer_flashImage.Add( self.m_button_flashImage, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_flashImage.SetSizer( bSizer_flashImage )
self.m_panel_flashImage.Layout()
bSizer_flashImage.Fit( self.m_panel_flashImage )
wSizer_loadSeq.Add( self.m_panel_flashImage, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_progDek = wx.Panel( self.m_panel_loadSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_progDek.SetBackgroundColour( wx.Colour( 64, 64, 64 ) )
bSizer_progDek = wx.BoxSizer( wx.VERTICAL )
self.m_panel_progDek1_showHabDek = wx.Panel( self.m_panel_progDek, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
sbSizer_showHabDek = wx.StaticBoxSizer( wx.StaticBox( self.m_panel_progDek1_showHabDek, wx.ID_ANY, wx.EmptyString ), wx.VERTICAL )
self.m_staticText_habDek128bit = wx.StaticText( sbSizer_showHabDek.GetStaticBox(), wx.ID_ANY, u"Use below DEK data (128bits) to generate keyblob and program it to flash for HAB:", wx.DefaultPosition, wx.Size( 160,70 ), 0 )
self.m_staticText_habDek128bit.Wrap( -1 )
sbSizer_showHabDek.Add( self.m_staticText_habDek128bit, 0, wx.ALL, 5 )
self.m_textCtrl_habDek128bit = wx.TextCtrl( sbSizer_showHabDek.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 110,100 ), wx.TE_MULTILINE|wx.TE_NO_VSCROLL|wx.TE_RICH2 )
self.m_textCtrl_habDek128bit.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_GRAYTEXT ) )
sbSizer_showHabDek.Add( self.m_textCtrl_habDek128bit, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_progDek1_showHabDek.SetSizer( sbSizer_showHabDek )
self.m_panel_progDek1_showHabDek.Layout()
sbSizer_showHabDek.Fit( self.m_panel_progDek1_showHabDek )
bSizer_progDek.Add( self.m_panel_progDek1_showHabDek, 1, wx.EXPAND |wx.ALL, 5 )
self.m_button_progDek = wx.Button( self.m_panel_progDek, wx.ID_ANY, u"Enable HAB, Load KeyBlob Data", wx.DefaultPosition, wx.DefaultSize, 0 )
bSizer_progDek.Add( self.m_button_progDek, 0, wx.ALIGN_CENTER|wx.ALL, 5 )
self.m_panel_progDek.SetSizer( bSizer_progDek )
self.m_panel_progDek.Layout()
bSizer_progDek.Fit( self.m_panel_progDek )
wSizer_loadSeq.Add( self.m_panel_progDek, 1, wx.EXPAND |wx.ALL, 5 )
self.m_panel_loadSeq.SetSizer( wSizer_loadSeq )
self.m_panel_loadSeq.Layout()
wSizer_loadSeq.Fit( self.m_panel_loadSeq )
self.m_notebook_imageSeq.AddPage( self.m_panel_loadSeq, u"Image Loading Sequence", False )
self.m_panel_fuseUtil = wx.Panel( self.m_notebook_imageSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_fuseUtil.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
wSizer_fuseUtil = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
bSizer_fuseGroupTxt0 = wx.BoxSizer( wx.VERTICAL )
self.m_button_fuse400 = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Lock", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_button_fuse400.SetFont( wx.Font( 8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial" ) )
bSizer_fuseGroupTxt0.Add( self.m_button_fuse400, 0, wx.ALL, 5 )
self.m_staticText_fuse410 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"UUID0", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse410.Wrap( -1 )
self.m_staticText_fuse410.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse410, 0, wx.ALL, 5 )
self.m_staticText_fuse420 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"UUID1", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse420.Wrap( -1 )
self.m_staticText_fuse420.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse420, 0, wx.ALL, 5 )
self.m_staticText_fuse430 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x430:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse430.Wrap( -1 )
self.m_staticText_fuse430.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse430, 0, wx.ALL, 5 )
self.m_staticText_fuse440 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x440:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse440.Wrap( -1 )
self.m_staticText_fuse440.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse440, 0, wx.ALL, 5 )
self.m_button_fuse450 = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Cfg0", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_button_fuse450.SetFont( wx.Font( 8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial" ) )
bSizer_fuseGroupTxt0.Add( self.m_button_fuse450, 0, wx.ALL, 5 )
self.m_button_fuse460 = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Cfg1", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_button_fuse460.SetFont( wx.Font( 8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial" ) )
bSizer_fuseGroupTxt0.Add( self.m_button_fuse460, 0, wx.ALL, 5 )
self.m_button_fuse470 = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Cfg2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_button_fuse470.SetFont( wx.Font( 8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial" ) )
bSizer_fuseGroupTxt0.Add( self.m_button_fuse470, 0, wx.ALL, 5 )
self.m_staticText_fuse480 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x480:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse480.Wrap( -1 )
self.m_staticText_fuse480.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse480, 0, wx.ALL, 5 )
self.m_staticText_fuse490 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x490:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse490.Wrap( -1 )
self.m_staticText_fuse490.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse490, 0, wx.ALL, 5 )
self.m_staticText_fuse4a0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x4a0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse4a0.Wrap( -1 )
self.m_staticText_fuse4a0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse4a0, 0, wx.ALL, 5 )
self.m_staticText_fuse4b0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x4b0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse4b0.Wrap( -1 )
self.m_staticText_fuse4b0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse4b0, 0, wx.ALL, 5 )
self.m_staticText_fuse4c0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x4c0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse4c0.Wrap( -1 )
self.m_staticText_fuse4c0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse4c0, 0, wx.ALL, 5 )
self.m_staticText_fuse4d0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x4d0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse4d0.Wrap( -1 )
self.m_staticText_fuse4d0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt0.Add( self.m_staticText_fuse4d0, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupTxt0, 1, wx.EXPAND, 5 )
bSizer_fuseGroupCtrl0 = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_fuse400 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), wx.TE_PROCESS_ENTER )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse400, 0, wx.ALL, 5 )
self.m_textCtrl_fuse410 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse410, 0, wx.ALL, 5 )
self.m_textCtrl_fuse420 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse420, 0, wx.ALL, 5 )
self.m_textCtrl_fuse430 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse430, 0, wx.ALL, 5 )
self.m_textCtrl_fuse440 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse440, 0, wx.ALL, 5 )
self.m_textCtrl_fuse450 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, u"Boot Cfg0", wx.DefaultPosition, wx.Size( 75,20 ), wx.TE_PROCESS_ENTER )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse450, 0, wx.ALL, 5 )
self.m_textCtrl_fuse460 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, u"Boot Cfg1", wx.DefaultPosition, wx.Size( 75,20 ), wx.TE_PROCESS_ENTER )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse460, 0, wx.ALL, 5 )
self.m_textCtrl_fuse470 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, u"Boot Cfg2", wx.DefaultPosition, wx.Size( 75,20 ), wx.TE_PROCESS_ENTER )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse470, 0, wx.ALL, 5 )
self.m_textCtrl_fuse480 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse480, 0, wx.ALL, 5 )
self.m_textCtrl_fuse490 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse490, 0, wx.ALL, 5 )
self.m_textCtrl_fuse4a0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse4a0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse4b0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse4b0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse4c0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse4c0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse4d0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl0.Add( self.m_textCtrl_fuse4d0, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupCtrl0, 1, wx.EXPAND, 5 )
bSizer_fuseGroupTxt1 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText_fuse4e0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x4e0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse4e0.Wrap( -1 )
self.m_staticText_fuse4e0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse4e0, 0, wx.ALL, 5 )
self.m_staticText_fuse4f0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x4f0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse4f0.Wrap( -1 )
self.m_staticText_fuse4f0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse4f0, 0, wx.ALL, 5 )
self.m_staticText_fuse500 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"OTPMK", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse500.Wrap( -1 )
self.m_staticText_fuse500.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse500, 0, wx.ALL, 5 )
self.m_staticText_fuse510 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"OTPMK", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse510.Wrap( -1 )
self.m_staticText_fuse510.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse510, 0, wx.ALL, 5 )
self.m_staticText_fuse520 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"OTPMK", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse520.Wrap( -1 )
self.m_staticText_fuse520.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse520, 0, wx.ALL, 5 )
self.m_staticText_fuse530 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"OTPMK", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse530.Wrap( -1 )
self.m_staticText_fuse530.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse530, 0, wx.ALL, 5 )
self.m_staticText_fuse540 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"OTPMK", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse540.Wrap( -1 )
self.m_staticText_fuse540.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse540, 0, wx.ALL, 5 )
self.m_staticText_fuse550 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"OTPMK", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse550.Wrap( -1 )
self.m_staticText_fuse550.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse550, 0, wx.ALL, 5 )
self.m_staticText_fuse560 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"OTPMK", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse560.Wrap( -1 )
self.m_staticText_fuse560.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse560, 0, wx.ALL, 5 )
self.m_staticText_fuse570 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"OTPMK", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse570.Wrap( -1 )
self.m_staticText_fuse570.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse570, 0, wx.ALL, 5 )
self.m_staticText_fuse580 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK0", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse580.Wrap( -1 )
self.m_staticText_fuse580.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse580.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse580, 0, wx.ALL, 5 )
self.m_staticText_fuse590 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK1", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse590.Wrap( -1 )
self.m_staticText_fuse590.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse590.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse590, 0, wx.ALL, 5 )
self.m_staticText_fuse5a0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5a0.Wrap( -1 )
self.m_staticText_fuse5a0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5a0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse5a0, 0, wx.ALL, 5 )
self.m_staticText_fuse5b0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK3", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5b0.Wrap( -1 )
self.m_staticText_fuse5b0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5b0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt1.Add( self.m_staticText_fuse5b0, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupTxt1, 1, wx.EXPAND, 5 )
bSizer_fuseGroupCtrl1 = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_fuse4e0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse4e0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse4f0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse4f0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse500 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse500, 0, wx.ALL, 5 )
self.m_textCtrl_fuse510 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse510, 0, wx.ALL, 5 )
self.m_textCtrl_fuse520 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse520, 0, wx.ALL, 5 )
self.m_textCtrl_fuse530 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse530, 0, wx.ALL, 5 )
self.m_textCtrl_fuse540 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse540, 0, wx.ALL, 5 )
self.m_textCtrl_fuse550 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse550, 0, wx.ALL, 5 )
self.m_textCtrl_fuse560 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse560, 0, wx.ALL, 5 )
self.m_textCtrl_fuse570 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse570, 0, wx.ALL, 5 )
self.m_textCtrl_fuse580 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse580, 0, wx.ALL, 5 )
self.m_textCtrl_fuse590 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse590, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5a0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse5a0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5b0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl1.Add( self.m_textCtrl_fuse5b0, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupCtrl1, 1, wx.EXPAND, 5 )
bSizer_fuseGroupTxt2 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText_fuse5c0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK4", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5c0.Wrap( -1 )
self.m_staticText_fuse5c0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5c0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse5c0, 0, wx.ALL, 5 )
self.m_staticText_fuse5d0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK5", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5d0.Wrap( -1 )
self.m_staticText_fuse5d0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5d0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse5d0, 0, wx.ALL, 5 )
self.m_staticText_fuse5e0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK6", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5e0.Wrap( -1 )
self.m_staticText_fuse5e0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5e0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse5e0, 0, wx.ALL, 5 )
self.m_staticText_fuse5f0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SRK7", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse5f0.Wrap( -1 )
self.m_staticText_fuse5f0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse5f0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse5f0, 0, wx.ALL, 5 )
self.m_staticText_fuse600 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x600:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse600.Wrap( -1 )
self.m_staticText_fuse600.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse600, 0, wx.ALL, 5 )
self.m_staticText_fuse610 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x610:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse610.Wrap( -1 )
self.m_staticText_fuse610.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse610, 0, wx.ALL, 5 )
self.m_staticText_fuse620 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x620:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse620.Wrap( -1 )
self.m_staticText_fuse620.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse620, 0, wx.ALL, 5 )
self.m_staticText_fuse630 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x630:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse630.Wrap( -1 )
self.m_staticText_fuse630.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse630, 0, wx.ALL, 5 )
self.m_staticText_fuse640 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x640:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse640.Wrap( -1 )
self.m_staticText_fuse640.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse640, 0, wx.ALL, 5 )
self.m_staticText_fuse650 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x650:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse650.Wrap( -1 )
self.m_staticText_fuse650.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse650, 0, wx.ALL, 5 )
self.m_staticText_fuse660 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x660:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse660.Wrap( -1 )
self.m_staticText_fuse660.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse660, 0, wx.ALL, 5 )
self.m_staticText_fuse670 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x670:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse670.Wrap( -1 )
self.m_staticText_fuse670.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse670, 0, wx.ALL, 5 )
self.m_staticText_fuse680 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x680:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse680.Wrap( -1 )
self.m_staticText_fuse680.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse680, 0, wx.ALL, 5 )
self.m_staticText_fuse690 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SwGp2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse690.Wrap( -1 )
self.m_staticText_fuse690.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse690.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt2.Add( self.m_staticText_fuse690, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupTxt2, 1, wx.EXPAND, 5 )
bSizer_fuseGroupCtrl2 = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_fuse5c0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse5c0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5d0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse5d0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5e0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse5e0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse5f0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse5f0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse600 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse600, 0, wx.ALL, 5 )
self.m_textCtrl_fuse610 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse610, 0, wx.ALL, 5 )
self.m_textCtrl_fuse620 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse620, 0, wx.ALL, 5 )
self.m_textCtrl_fuse630 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse630, 0, wx.ALL, 5 )
self.m_textCtrl_fuse640 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse640, 0, wx.ALL, 5 )
self.m_textCtrl_fuse650 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse650, 0, wx.ALL, 5 )
self.m_textCtrl_fuse660 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse660, 0, wx.ALL, 5 )
self.m_textCtrl_fuse670 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse670, 0, wx.ALL, 5 )
self.m_textCtrl_fuse680 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse680, 0, wx.ALL, 5 )
self.m_textCtrl_fuse690 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl2.Add( self.m_textCtrl_fuse690, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupCtrl2, 1, wx.EXPAND, 5 )
bSizer_fuseGroupTxt3 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText_fuse6a0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SwGp2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse6a0.Wrap( -1 )
self.m_staticText_fuse6a0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse6a0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse6a0, 0, wx.ALL, 5 )
self.m_staticText_fuse6b0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SwGp2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse6b0.Wrap( -1 )
self.m_staticText_fuse6b0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse6b0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse6b0, 0, wx.ALL, 5 )
self.m_staticText_fuse6c0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"SwGp2", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse6c0.Wrap( -1 )
self.m_staticText_fuse6c0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse6c0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse6c0, 0, wx.ALL, 5 )
self.m_button_fuse6d0 = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Conf0", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_button_fuse6d0.SetFont( wx.Font( 7, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial" ) )
bSizer_fuseGroupTxt3.Add( self.m_button_fuse6d0, 0, wx.ALL, 5 )
self.m_button_fuse6e0 = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Conf1", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_button_fuse6e0.SetFont( wx.Font( 7, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "Arial" ) )
bSizer_fuseGroupTxt3.Add( self.m_button_fuse6e0, 0, wx.ALL, 5 )
self.m_staticText_fuse6f0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x6f0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse6f0.Wrap( -1 )
self.m_staticText_fuse6f0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse6f0, 0, wx.ALL, 5 )
self.m_staticText_fuse700 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x700:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse700.Wrap( -1 )
self.m_staticText_fuse700.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse700, 0, wx.ALL, 5 )
self.m_staticText_fuse710 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x710:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse710.Wrap( -1 )
self.m_staticText_fuse710.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse710, 0, wx.ALL, 5 )
self.m_staticText_fuse720 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x720:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse720.Wrap( -1 )
self.m_staticText_fuse720.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse720, 0, wx.ALL, 5 )
self.m_staticText_fuse730 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x730:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse730.Wrap( -1 )
self.m_staticText_fuse730.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse730, 0, wx.ALL, 5 )
self.m_staticText_fuse740 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x740:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse740.Wrap( -1 )
self.m_staticText_fuse740.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse740, 0, wx.ALL, 5 )
self.m_staticText_fuse750 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x750:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse750.Wrap( -1 )
self.m_staticText_fuse750.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse750, 0, wx.ALL, 5 )
self.m_staticText_fuse760 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x760:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse760.Wrap( -1 )
self.m_staticText_fuse760.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse760, 0, wx.ALL, 5 )
self.m_staticText_fuse770 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x770:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse770.Wrap( -1 )
self.m_staticText_fuse770.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt3.Add( self.m_staticText_fuse770, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupTxt3, 1, wx.EXPAND, 5 )
bSizer_fuseGroupCtrl3 = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_fuse6a0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse6a0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse6b0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse6b0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse6c0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse6c0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse6d0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, u"Misc Conf0", wx.DefaultPosition, wx.Size( 75,20 ), wx.TE_PROCESS_ENTER )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse6d0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse6e0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, u"Misc Conf1", wx.DefaultPosition, wx.Size( 75,20 ), wx.TE_PROCESS_ENTER )
self.m_textCtrl_fuse6e0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse6e0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse6f0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse6f0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse700 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse700, 0, wx.ALL, 5 )
self.m_textCtrl_fuse710 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse710, 0, wx.ALL, 5 )
self.m_textCtrl_fuse720 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse720, 0, wx.ALL, 5 )
self.m_textCtrl_fuse730 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse730, 0, wx.ALL, 5 )
self.m_textCtrl_fuse740 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse740, 0, wx.ALL, 5 )
self.m_textCtrl_fuse750 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse750, 0, wx.ALL, 5 )
self.m_textCtrl_fuse760 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse760, 0, wx.ALL, 5 )
self.m_textCtrl_fuse770 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl3.Add( self.m_textCtrl_fuse770, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupCtrl3, 1, wx.EXPAND, 5 )
bSizer_fuseGroupTxt4 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText_fuse780 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x780:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse780.Wrap( -1 )
self.m_staticText_fuse780.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse780, 0, wx.ALL, 5 )
self.m_staticText_fuse790 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x790:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse790.Wrap( -1 )
self.m_staticText_fuse790.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse790, 0, wx.ALL, 5 )
self.m_staticText_fuse7a0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x7a0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse7a0.Wrap( -1 )
self.m_staticText_fuse7a0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse7a0, 0, wx.ALL, 5 )
self.m_staticText_fuse7b0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x7b0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse7b0.Wrap( -1 )
self.m_staticText_fuse7b0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse7b0, 0, wx.ALL, 5 )
self.m_staticText_fuse7c0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x7c0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse7c0.Wrap( -1 )
self.m_staticText_fuse7c0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse7c0, 0, wx.ALL, 5 )
self.m_staticText_fuse7d0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x7d0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse7d0.Wrap( -1 )
self.m_staticText_fuse7d0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse7d0, 0, wx.ALL, 5 )
self.m_staticText_fuse7e0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x7e0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse7e0.Wrap( -1 )
self.m_staticText_fuse7e0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse7e0, 0, wx.ALL, 5 )
self.m_staticText_fuse7f0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x7f0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse7f0.Wrap( -1 )
self.m_staticText_fuse7f0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse7f0, 0, wx.ALL, 5 )
self.m_staticText_fuse800 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x800:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse800.Wrap( -1 )
self.m_staticText_fuse800.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse800, 0, wx.ALL, 5 )
self.m_staticText_fuse810 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x810:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse810.Wrap( -1 )
self.m_staticText_fuse810.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse810, 0, wx.ALL, 5 )
self.m_staticText_fuse820 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x820:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse820.Wrap( -1 )
self.m_staticText_fuse820.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse820, 0, wx.ALL, 5 )
self.m_staticText_fuse830 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x830:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse830.Wrap( -1 )
self.m_staticText_fuse830.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse830, 0, wx.ALL, 5 )
self.m_staticText_fuse840 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x840:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse840.Wrap( -1 )
self.m_staticText_fuse840.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse840, 0, wx.ALL, 5 )
self.m_staticText_fuse850 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x850:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse850.Wrap( -1 )
self.m_staticText_fuse850.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt4.Add( self.m_staticText_fuse850, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupTxt4, 1, wx.EXPAND, 5 )
bSizer_fuseGroupCtrl4 = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_fuse780 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse780, 0, wx.ALL, 5 )
self.m_textCtrl_fuse790 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse790, 0, wx.ALL, 5 )
self.m_textCtrl_fuse7a0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse7a0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse7b0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse7b0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse7c0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse7c0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse7d0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse7d0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse7e0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse7e0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse7f0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse7f0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse800 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse800, 0, wx.ALL, 5 )
self.m_textCtrl_fuse810 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse810, 0, wx.ALL, 5 )
self.m_textCtrl_fuse820 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse820, 0, wx.ALL, 5 )
self.m_textCtrl_fuse830 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse830, 0, wx.ALL, 5 )
self.m_textCtrl_fuse840 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse840, 0, wx.ALL, 5 )
self.m_textCtrl_fuse850 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl4.Add( self.m_textCtrl_fuse850, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupCtrl4, 1, wx.EXPAND, 5 )
bSizer_fuseGroupTxt5 = wx.BoxSizer( wx.VERTICAL )
self.m_staticText_fuse860 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x860:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse860.Wrap( -1 )
self.m_staticText_fuse860.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse860, 0, wx.ALL, 5 )
self.m_staticText_fuse870 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x870:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse870.Wrap( -1 )
self.m_staticText_fuse870.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse870, 0, wx.ALL, 5 )
self.m_staticText_fuse880 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x880:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse880.Wrap( -1 )
self.m_staticText_fuse880.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse880, 0, wx.ALL, 5 )
self.m_staticText_fuse890 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x890:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse890.Wrap( -1 )
self.m_staticText_fuse890.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse890, 0, wx.ALL, 5 )
self.m_staticText_fuse8a0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x8a0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse8a0.Wrap( -1 )
self.m_staticText_fuse8a0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse8a0, 0, wx.ALL, 5 )
self.m_staticText_fuse8b0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"0x8b0:", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse8b0.Wrap( -1 )
self.m_staticText_fuse8b0.SetFont( wx.Font( 9, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, "宋体" ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse8b0, 0, wx.ALL, 5 )
self.m_staticText_fuse8c0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"Gp4", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse8c0.Wrap( -1 )
self.m_staticText_fuse8c0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse8c0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse8c0, 0, wx.ALL, 5 )
self.m_staticText_fuse8d0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"Gp4", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse8d0.Wrap( -1 )
self.m_staticText_fuse8d0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse8d0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse8d0, 0, wx.ALL, 5 )
self.m_staticText_fuse8e0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"Gp4", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse8e0.Wrap( -1 )
self.m_staticText_fuse8e0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse8e0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse8e0, 0, wx.ALL, 5 )
self.m_staticText_fuse8f0 = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, u"Gp4", wx.DefaultPosition, wx.Size( 31,20 ), 0 )
self.m_staticText_fuse8f0.Wrap( -1 )
self.m_staticText_fuse8f0.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_staticText_fuse8f0.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_fuseGroupTxt5.Add( self.m_staticText_fuse8f0, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupTxt5, 1, wx.EXPAND, 5 )
bSizer_fuseGroupCtrl5 = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_fuse860 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse860, 0, wx.ALL, 5 )
self.m_textCtrl_fuse870 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse870, 0, wx.ALL, 5 )
self.m_textCtrl_fuse880 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse880, 0, wx.ALL, 5 )
self.m_textCtrl_fuse890 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse890, 0, wx.ALL, 5 )
self.m_textCtrl_fuse8a0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse8a0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse8b0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse8b0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse8c0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse8c0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse8d0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse8d0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse8e0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse8e0, 0, wx.ALL, 5 )
self.m_textCtrl_fuse8f0 = wx.TextCtrl( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_textCtrl_fuse8f0, 0, wx.ALL, 5 )
self.m_button_scan = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Scan", wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_button_scan, 0, wx.ALL, 5 )
self.m_button_burn = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Burn", wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_button_burn, 0, wx.ALL, 5 )
self.m_button_save = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Save", wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_button_save, 0, wx.ALL, 5 )
self.m_button_load = wx.Button( self.m_panel_fuseUtil, wx.ID_ANY, u"Load", wx.DefaultPosition, wx.Size( 75,20 ), 0 )
bSizer_fuseGroupCtrl5.Add( self.m_button_load, 0, wx.ALL, 5 )
self.m_staticText_null0Fuse = wx.StaticText( self.m_panel_fuseUtil, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 31,15 ), 0 )
self.m_staticText_null0Fuse.Wrap( -1 )
bSizer_fuseGroupCtrl5.Add( self.m_staticText_null0Fuse, 0, wx.ALL, 5 )
wSizer_fuseUtil.Add( bSizer_fuseGroupCtrl5, 1, wx.EXPAND, 5 )
self.m_panel_fuseUtil.SetSizer( wSizer_fuseUtil )
self.m_panel_fuseUtil.Layout()
wSizer_fuseUtil.Fit( self.m_panel_fuseUtil )
self.m_notebook_imageSeq.AddPage( self.m_panel_fuseUtil, u"eFuse Operation Utility", False )
self.m_panel_memView = wx.Panel( self.m_notebook_imageSeq, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_memView.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
wSizer_memView = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText_memStart = wx.StaticText( self.m_panel_memView, wx.ID_ANY, u"Start / Offset:", wx.DefaultPosition, wx.Size( 70,-1 ), 0 )
self.m_staticText_memStart.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_memStart, 0, wx.ALL, 5 )
self.m_textCtrl_memStart = wx.TextCtrl( self.m_panel_memView, wx.ID_ANY, u"0x0", wx.DefaultPosition, wx.Size( 90,-1 ), 0 )
wSizer_memView.Add( self.m_textCtrl_memStart, 0, wx.ALL, 5 )
self.m_staticText_memLength = wx.StaticText( self.m_panel_memView, wx.ID_ANY, u"Byte Length (For Read/Erase):", wx.DefaultPosition, wx.Size( 160,-1 ), 0 )
self.m_staticText_memLength.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_memLength, 0, wx.ALL, 5 )
self.m_textCtrl_memLength = wx.TextCtrl( self.m_panel_memView, wx.ID_ANY, u"0x2000", wx.DefaultPosition, wx.Size( 90,-1 ), 0 )
wSizer_memView.Add( self.m_textCtrl_memLength, 0, wx.ALL, 5 )
self.m_staticText_memBinFile = wx.StaticText( self.m_panel_memView, wx.ID_ANY, u"Bin File:", wx.DefaultPosition, wx.Size( 50,-1 ), 0 )
self.m_staticText_memBinFile.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_memBinFile, 0, wx.ALL, 5 )
self.m_filePicker_memBinFile = wx.FilePickerCtrl( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, u"Select a file", u"*.*", wx.DefaultPosition, wx.Size( 210,-1 ), wx.FLP_DEFAULT_STYLE )
wSizer_memView.Add( self.m_filePicker_memBinFile, 0, wx.ALL, 5 )
self.m_staticText_null0MemView = wx.StaticText( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 40,-1 ), 0 )
self.m_staticText_null0MemView.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_null0MemView, 0, wx.ALL, 5 )
self.m_button_readMem = wx.Button( self.m_panel_memView, wx.ID_ANY, u"Read", wx.DefaultPosition, wx.Size( 100,-1 ), 0 )
wSizer_memView.Add( self.m_button_readMem, 0, wx.ALL, 5 )
self.m_staticText_null1MemView = wx.StaticText( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
self.m_staticText_null1MemView.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_null1MemView, 0, wx.ALL, 5 )
self.m_button_eraseMem = wx.Button( self.m_panel_memView, wx.ID_ANY, u"Erase", wx.DefaultPosition, wx.Size( 100,-1 ), 0 )
wSizer_memView.Add( self.m_button_eraseMem, 0, wx.ALL, 5 )
self.m_staticText_null2MemView = wx.StaticText( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
self.m_staticText_null2MemView.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_null2MemView, 0, wx.ALL, 5 )
self.m_button_writeMem = wx.Button( self.m_panel_memView, wx.ID_ANY, u"Write (Auto Erase)", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
wSizer_memView.Add( self.m_button_writeMem, 0, wx.ALL, 5 )
self.m_staticText_null3MemView = wx.StaticText( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 20,-1 ), 0 )
self.m_staticText_null3MemView.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_null3MemView, 0, wx.ALL, 5 )
self.m_button_executeApp = wx.Button( self.m_panel_memView, wx.ID_ANY, u"Execute From Start", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
wSizer_memView.Add( self.m_button_executeApp, 0, wx.ALL, 5 )
self.m_textCtrl_bootDeviceMem = wx.TextCtrl( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 740,290 ), wx.TE_MULTILINE|wx.TE_RICH2 )
self.m_textCtrl_bootDeviceMem.SetFont( wx.Font( 10, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) )
self.m_textCtrl_bootDeviceMem.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
wSizer_memView.Add( self.m_textCtrl_bootDeviceMem, 0, wx.ALL, 5 )
self.m_staticText_null4MemView = wx.StaticText( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 25,-1 ), 0 )
self.m_staticText_null4MemView.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_null4MemView, 0, wx.ALL, 5 )
self.m_button_viewMem = wx.Button( self.m_panel_memView, wx.ID_ANY, u"View Bootable Image", wx.DefaultPosition, wx.Size( 150,-1 ), 0 )
wSizer_memView.Add( self.m_button_viewMem, 0, wx.ALL, 5 )
self.m_staticText_null5MemView = wx.StaticText( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 10,-1 ), 0 )
self.m_staticText_null5MemView.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_null5MemView, 0, wx.ALL, 5 )
self.m_button_clearMem = wx.Button( self.m_panel_memView, wx.ID_ANY, u"Clear The Screen", wx.DefaultPosition, wx.Size( 115,-1 ), 0 )
wSizer_memView.Add( self.m_button_clearMem, 0, wx.ALL, 5 )
self.m_staticText_null6MemView = wx.StaticText( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 10,-1 ), 0 )
self.m_staticText_null6MemView.Wrap( -1 )
wSizer_memView.Add( self.m_staticText_null6MemView, 0, wx.ALL, 5 )
self.m_checkBox_saveImageData = wx.CheckBox( self.m_panel_memView, wx.ID_ANY, u"Save image/data file to", wx.DefaultPosition, wx.Size( 140,-1 ), 0 )
wSizer_memView.Add( self.m_checkBox_saveImageData, 0, wx.ALL, 5 )
self.m_dirPicker_savedBinFolder = wx.DirPickerCtrl( self.m_panel_memView, wx.ID_ANY, wx.EmptyString, u"Select a folder", wx.DefaultPosition, wx.Size( 210,-1 ), wx.DIRP_DEFAULT_STYLE )
wSizer_memView.Add( self.m_dirPicker_savedBinFolder, 0, wx.ALL, 5 )
self.m_panel_memView.SetSizer( wSizer_memView )
self.m_panel_memView.Layout()
wSizer_memView.Fit( self.m_panel_memView )
self.m_notebook_imageSeq.AddPage( self.m_panel_memView, u"Boot Device Memory", False )
bSizer_boot.Add( self.m_notebook_imageSeq, 1, wx.EXPAND |wx.ALL, 5 )
self.m_notebook_bootLog = wx.Notebook( self, wx.ID_ANY, wx.DefaultPosition, wx.Size( -1,-1 ), 0 )
self.m_panel_log = wx.Panel( self.m_notebook_bootLog, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.TAB_TRAVERSAL )
self.m_panel_log.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
wSizer_log = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
bSizer_showLog = wx.BoxSizer( wx.VERTICAL )
self.m_textCtrl_log = wx.TextCtrl( self.m_panel_log, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 650,68 ), wx.TE_MULTILINE )
bSizer_showLog.Add( self.m_textCtrl_log, 0, wx.ALL, 5 )
wSizer_log.Add( bSizer_showLog, 1, wx.EXPAND, 5 )
bSizer_logAction = wx.BoxSizer( wx.VERTICAL )
self.m_button_clearLog = wx.Button( self.m_panel_log, wx.ID_ANY, u"Clear", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_button_clearLog.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_logAction.Add( self.m_button_clearLog, 0, wx.ALL, 5 )
self.m_button_saveLog = wx.Button( self.m_panel_log, wx.ID_ANY, u"Save", wx.DefaultPosition, wx.DefaultSize, 0 )
self.m_button_saveLog.SetBackgroundColour( wx.SystemSettings.GetColour( wx.SYS_COLOUR_WINDOW ) )
bSizer_logAction.Add( self.m_button_saveLog, 0, wx.ALL, 5 )
wSizer_log.Add( bSizer_logAction, 1, wx.EXPAND, 5 )
wSizer_actionGauge = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS )
self.m_staticText_costTime = wx.StaticText( self.m_panel_log, wx.ID_ANY, u" 00:00.000", wx.DefaultPosition, wx.Size( 55,-1 ), 0 )
self.m_staticText_costTime.Wrap( -1 )
wSizer_actionGauge.Add( self.m_staticText_costTime, 0, wx.ALL, 5 )
self.m_gauge_action = wx.Gauge( self.m_panel_log, wx.ID_ANY, 100, wx.DefaultPosition, wx.Size( 680,-1 ), wx.GA_HORIZONTAL )
self.m_gauge_action.SetValue( 100 )
wSizer_actionGauge.Add( self.m_gauge_action, 0, wx.ALL, 5 )
self.m_staticText_null1ActionGauge = wx.StaticText( self.m_panel_log, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size( 750,1 ), 0 )
self.m_staticText_null1ActionGauge.Wrap( -1 )
wSizer_actionGauge.Add( self.m_staticText_null1ActionGauge, 0, wx.ALL, 5 )
wSizer_log.Add( wSizer_actionGauge, 1, wx.EXPAND, 5 )
self.m_panel_log.SetSizer( wSizer_log )
self.m_panel_log.Layout()
wSizer_log.Fit( self.m_panel_log )
self.m_notebook_bootLog.AddPage( self.m_panel_log, u"Log", False )
bSizer_boot.Add( self.m_notebook_bootLog, 1, wx.EXPAND |wx.ALL, 5 )
wSizer_func.Add( bSizer_boot, 1, wx.EXPAND, 5 )
bSizer_win.Add( wSizer_func, 1, wx.EXPAND, 5 )
self.SetSizer( bSizer_win )
self.Layout()
self.m_statusBar = self.CreateStatusBar( 1, wx.STB_SIZEGRIP, wx.ID_ANY )
self.Centre( wx.BOTH )
# Connect Events
self.Bind( wx.EVT_CLOSE, self.callbackClose )
self.Bind( wx.EVT_MENU, self.callbackExit, id = self.m_menuItem_exit.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetLanguageAsEnglish, id = self.m_menuItem_english.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetLanguageAsChinese, id = self.m_menuItem_chinese.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetRunModeAsEntry, id = self.m_menuItem_runModeEntry.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetRunModeAsMaster, id = self.m_menuItem_runModeMaster.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetUsbDetectionAsDynamic, id = self.m_menuItem_usbDetectionDynamic.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetUsbDetectionAsStatic, id = self.m_menuItem_usbDetectionStatic.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetSoundEffectAsContra, id = self.m_menuItem_soundEffectContra.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetSoundEffectAsMario, id = self.m_menuItem_soundEffectMario.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetSoundEffectAsQuiet, id = self.m_menuItem_soundEffectQuiet.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetGenSbFileAsYes, id = self.m_menuItem_genSbFileYes.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetGenSbFileAsNo, id = self.m_menuItem_genSbFileNo.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetImageReadbackAsAutomatic, id = self.m_menuItem_imageReadbackAutomatic.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetImageReadbackAsManual, id = self.m_menuItem_imageReadbackManual.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetFlashloaderResidentToDefault, id = self.m_menuItem_flashloaderResidentDefault.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetFlashloaderResidentToItcm, id = self.m_menuItem_flashloaderResidentItcm.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetFlashloaderResidentToDtcm, id = self.m_menuItem_flashloaderResidentDtcm.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetFlashloaderResidentToOcram, id = self.m_menuItem_flashloaderResidentOcram.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetEfuseGroupTo0, id = self.m_menuItem_efuseGroup0.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetEfuseGroupTo1, id = self.m_menuItem_efuseGroup1.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetEfuseGroupTo2, id = self.m_menuItem_efuseGroup2.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetEfuseGroupTo3, id = self.m_menuItem_efuseGroup3.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetEfuseGroupTo4, id = self.m_menuItem_efuseGroup4.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetEfuseGroupTo5, id = self.m_menuItem_efuseGroup5.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetEfuseGroupTo6, id = self.m_menuItem_efuseGroup6.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetFlexspiXipRegionTo0, id = self.m_menuItem_flexspiXipRegion0.GetId() )
self.Bind( wx.EVT_MENU, self.callbackSetFlexspiXipRegionTo1, id = self.m_menuItem_flexspiXipRegion1.GetId() )
self.Bind( wx.EVT_MENU, self.callbackShowHomePage, id = self.m_menuItem_homePage.GetId() )
self.Bind( wx.EVT_MENU, self.callbackShowAboutAuthor, id = self.m_menuItem_aboutAuthor.GetId() )
self.Bind( wx.EVT_MENU, self.callbackShowContributors, id = self.m_menuItem_contributors.GetId() )
self.Bind( wx.EVT_MENU, self.callbackShowSpecialThanks, id = self.m_menuItem_specialThanks.GetId() )
self.Bind( wx.EVT_MENU, self.callbackShowRevisionHistory, id = self.m_menuItem_revisionHistory.GetId() )
self.m_choice_mcuSeries.Bind( wx.EVT_CHOICE, self.callbackSetMcuSeries )
self.m_choice_mcuDevice.Bind( wx.EVT_CHOICE, self.callbackSetMcuDevice )
self.m_choice_bootDevice.Bind( wx.EVT_CHOICE, self.callbackSetBootDevice )
self.m_button_bootDeviceConfiguration.Bind( wx.EVT_BUTTON, self.callbackBootDeviceConfiguration )
self.m_button_deviceConfigurationData.Bind( wx.EVT_BUTTON, self.callbackDeviceConfigurationData )
self.m_radioBtn_uart.Bind( wx.EVT_RADIOBUTTON, self.callbackSetUartPort )
self.m_radioBtn_usbhid.Bind( wx.EVT_RADIOBUTTON, self.callbackSetUsbhidPort )
self.m_checkBox_oneStepConnect.Bind( wx.EVT_CHECKBOX, self.callbackSetOneStep )
self.m_button_connect.Bind( wx.EVT_BUTTON, self.callbackConnectToDevice )
self.m_choice_secureBootType.Bind( wx.EVT_CHOICE, self.callbackSetSecureBootType )
self.m_button_allInOneAction.Bind( wx.EVT_BUTTON, self.callbackAllInOneAction )
self.m_button_advCertSettings.Bind( wx.EVT_BUTTON, self.callbackAdvCertSettings )
self.m_button_genCert.Bind( wx.EVT_BUTTON, self.callbackGenCert )
self.m_filePicker_appPath.Bind( wx.EVT_FILEPICKER_CHANGED, self.callbackChangedAppFile )
self.m_choice_appFormat.Bind( wx.EVT_CHOICE, self.callbackSetAppFormat )
self.m_choice_enableCertForHwCrypto.Bind( wx.EVT_CHOICE, self.callbackSetCertForHwCrypto )
self.m_button_genImage.Bind( wx.EVT_BUTTON, self.callbackGenImage )
self.m_choice_keyStorageRegion.Bind( wx.EVT_CHOICE, self.callbackSetKeyStorageRegion )
self.m_button_advKeySettings.Bind( wx.EVT_BUTTON, self.callbackAdvKeySettings )
self.m_button_prepHwCrypto.Bind( wx.EVT_BUTTON, self.callbackDoHwEncryption )
self.m_button_progSrk.Bind( wx.EVT_BUTTON, self.callbackProgramSrk )
self.m_button_operHwCrypto.Bind( wx.EVT_BUTTON, self.callbackProgramHwCryptoDek )
self.m_button_flashImage.Bind( wx.EVT_BUTTON, self.callbackFlashImage )
self.m_button_progDek.Bind( wx.EVT_BUTTON, self.callbackFlashHabDek )
self.m_button_fuse400.Bind( wx.EVT_BUTTON, self.callbackSetEfuseLock )
self.m_button_fuse450.Bind( wx.EVT_BUTTON, self.callbackSetEfuseBootCfg0 )
self.m_button_fuse460.Bind( wx.EVT_BUTTON, self.callbackSetEfuseBootCfg1 )
self.m_button_fuse470.Bind( wx.EVT_BUTTON, self.callbackSetEfuseBootCfg2 )
self.m_textCtrl_fuse400.Bind( wx.EVT_TEXT_ENTER, self.callbackEnterEfuseLock )
self.m_textCtrl_fuse450.Bind( wx.EVT_TEXT_ENTER, self.callbackEnterEfuseBootCfg0 )
self.m_textCtrl_fuse460.Bind( wx.EVT_TEXT_ENTER, self.callbackEnterEfuseBootCfg1 )
self.m_textCtrl_fuse470.Bind( wx.EVT_TEXT_ENTER, self.callbackEnterEfuseBootCfg2 )
self.m_button_fuse6d0.Bind( wx.EVT_BUTTON, self.callbackSetEfuseMiscConf0 )
self.m_button_fuse6e0.Bind( wx.EVT_BUTTON, self.callbackSetEfuseMiscConf1 )
self.m_textCtrl_fuse6d0.Bind( wx.EVT_TEXT_ENTER, self.callbackEnterEfuseMiscConf0 )
self.m_textCtrl_fuse6e0.Bind( wx.EVT_TEXT_ENTER, self.callbackEnterEfuseMiscConf1 )
self.m_button_scan.Bind( wx.EVT_BUTTON, self.callbackScanFuse )
self.m_button_burn.Bind( wx.EVT_BUTTON, self.callbackBurnFuse )
self.m_button_save.Bind( wx.EVT_BUTTON, self.callbackSaveFuse )
self.m_button_load.Bind( wx.EVT_BUTTON, self.callbackLoadFuse )
self.m_button_readMem.Bind( wx.EVT_BUTTON, self.callbackReadMem )
self.m_button_eraseMem.Bind( wx.EVT_BUTTON, self.callbackEraseMem )
self.m_button_writeMem.Bind( wx.EVT_BUTTON, self.callbackWriteMem )
self.m_button_executeApp.Bind( wx.EVT_BUTTON, self.callbackExecuteApp )
self.m_button_viewMem.Bind( wx.EVT_BUTTON, self.callbackViewMem )
self.m_button_clearMem.Bind( wx.EVT_BUTTON, self.callbackClearMem )
self.m_button_clearLog.Bind( wx.EVT_BUTTON, self.callbackClearLog )
self.m_button_saveLog.Bind( wx.EVT_BUTTON, self.callbackSaveLog )
def __del__( self ):
pass
# Virtual event handlers, overide them in your derived class
def callbackClose( self, event ):
event.Skip()
def callbackExit( self, event ):
event.Skip()
def callbackSetLanguageAsEnglish( self, event ):
event.Skip()
def callbackSetLanguageAsChinese( self, event ):
event.Skip()
def callbackSetRunModeAsEntry( self, event ):
event.Skip()
def callbackSetRunModeAsMaster( self, event ):
event.Skip()
def callbackSetUsbDetectionAsDynamic( self, event ):
event.Skip()
def callbackSetUsbDetectionAsStatic( self, event ):
event.Skip()
def callbackSetSoundEffectAsContra( self, event ):
event.Skip()
def callbackSetSoundEffectAsMario( self, event ):
event.Skip()
def callbackSetSoundEffectAsQuiet( self, event ):
event.Skip()
def callbackSetGenSbFileAsYes( self, event ):
event.Skip()
def callbackSetGenSbFileAsNo( self, event ):
event.Skip()
def callbackSetImageReadbackAsAutomatic( self, event ):
event.Skip()
def callbackSetImageReadbackAsManual( self, event ):
event.Skip()
def callbackSetFlashloaderResidentToDefault( self, event ):
event.Skip()
def callbackSetFlashloaderResidentToItcm( self, event ):
event.Skip()
def callbackSetFlashloaderResidentToDtcm( self, event ):
event.Skip()
def callbackSetFlashloaderResidentToOcram( self, event ):
event.Skip()
def callbackSetEfuseGroupTo0( self, event ):
event.Skip()
def callbackSetEfuseGroupTo1( self, event ):
event.Skip()
def callbackSetEfuseGroupTo2( self, event ):
event.Skip()
def callbackSetEfuseGroupTo3( self, event ):
event.Skip()
def callbackSetEfuseGroupTo4( self, event ):
event.Skip()
def callbackSetEfuseGroupTo5( self, event ):
event.Skip()
def callbackSetEfuseGroupTo6( self, event ):
event.Skip()
def callbackSetFlexspiXipRegionTo0( self, event ):
event.Skip()
def callbackSetFlexspiXipRegionTo1( self, event ):
event.Skip()
def callbackShowHomePage( self, event ):
event.Skip()
def callbackShowAboutAuthor( self, event ):
event.Skip()
def callbackShowContributors( self, event ):
event.Skip()
def callbackShowSpecialThanks( self, event ):
event.Skip()
def callbackShowRevisionHistory( self, event ):
event.Skip()
def callbackSetMcuSeries( self, event ):
event.Skip()
def callbackSetMcuDevice( self, event ):
event.Skip()
def callbackSetBootDevice( self, event ):
event.Skip()
def callbackBootDeviceConfiguration( self, event ):
event.Skip()
def callbackDeviceConfigurationData( self, event ):
event.Skip()
def callbackSetUartPort( self, event ):
event.Skip()
def callbackSetUsbhidPort( self, event ):
event.Skip()
def callbackSetOneStep( self, event ):
event.Skip()
def callbackConnectToDevice( self, event ):
event.Skip()
def callbackSetSecureBootType( self, event ):
event.Skip()
def callbackAllInOneAction( self, event ):
event.Skip()
def callbackAdvCertSettings( self, event ):
event.Skip()
def callbackGenCert( self, event ):
event.Skip()
def callbackChangedAppFile( self, event ):
event.Skip()
def callbackSetAppFormat( self, event ):
event.Skip()
def callbackSetCertForHwCrypto( self, event ):
event.Skip()
def callbackGenImage( self, event ):
event.Skip()
def callbackSetKeyStorageRegion( self, event ):
event.Skip()
def callbackAdvKeySettings( self, event ):
event.Skip()
def callbackDoHwEncryption( self, event ):
event.Skip()
def callbackProgramSrk( self, event ):
event.Skip()
def callbackProgramHwCryptoDek( self, event ):
event.Skip()
def callbackFlashImage( self, event ):
event.Skip()
def callbackFlashHabDek( self, event ):
event.Skip()
def callbackSetEfuseLock( self, event ):
event.Skip()
def callbackSetEfuseBootCfg0( self, event ):
event.Skip()
def callbackSetEfuseBootCfg1( self, event ):
event.Skip()
def callbackSetEfuseBootCfg2( self, event ):
event.Skip()
def callbackEnterEfuseLock( self, event ):
event.Skip()
def callbackEnterEfuseBootCfg0( self, event ):
event.Skip()
def callbackEnterEfuseBootCfg1( self, event ):
event.Skip()
def callbackEnterEfuseBootCfg2( self, event ):
event.Skip()
def callbackSetEfuseMiscConf0( self, event ):
event.Skip()
def callbackSetEfuseMiscConf1( self, event ):
event.Skip()
def callbackEnterEfuseMiscConf0( self, event ):
event.Skip()
def callbackEnterEfuseMiscConf1( self, event ):
event.Skip()
def callbackScanFuse( self, event ):
event.Skip()
def callbackBurnFuse( self, event ):
event.Skip()
def callbackSaveFuse( self, event ):
event.Skip()
def callbackLoadFuse( self, event ):
event.Skip()
def callbackReadMem( self, event ):
event.Skip()
def callbackEraseMem( self, event ):
event.Skip()
def callbackWriteMem( self, event ):
event.Skip()
def callbackExecuteApp( self, event ):
event.Skip()
def callbackViewMem( self, event ):
event.Skip()
def callbackClearMem( self, event ):
event.Skip()
def callbackClearLog( self, event ):
event.Skip()
def callbackSaveLog( self, event ):
event.Skip()
``` |
{
"source": "john-hawkins/Dataset_Summarizer",
"score": 3
} |
#### File: Dataset_Summarizer/dfsummarizer/funcs.py
```python
from io import StringIO
import pandas as pd
import numpy as np
import operator
import math
import os
from .config import max_filesize
from .FlajoletMartin import FMEstimator
"""
dfsummarizer.funcs: Core functions of the dfsummarizer package.
analyse_df( pandas_dataframe): return a sumamry dataframe of the input dataframe
analyse_df_in_chunks(path_to_dataset): Read the dataset in chunks and provide a summary
"""
########################################################################################
def analyse_file(path_to_file):
df = load_complete_dataframe(path_to_file)
summary = analyse_df(df)
return summary
########################################################################################
def analyse_df(df):
"""
Given a pandas dataframe that is already in memory we generate a table of summary
statistics and descriptors.
"""
colnames = df.columns
records = len(df)
df = coerce_dates(df)
rez = pd.DataFrame(columns=('Name', 'Type', 'Unique Vals', 'Unique', 'Nulls', 'Min', 'Mean', 'Max'))
for name in colnames:
nacount = len(df[df[name].isna()])
napercent = round(100*nacount/records,1)
uniques = df[name].unique().tolist()
if np.nan in uniques :
uniques.remove(np.nan)
unicount = len(uniques)
unipercent = round(100*unicount/records,1)
mode = df[name].mode(dropna=False)[0]
#if (type(mode) == float) & np.isnan(mode):
# mode = "NaN"
if mode != mode:
mode = "NaN"
#valtype = infer_type(str(type(df.loc[1,name])), unicount, uniques)
valtype = infer_type_2( df.loc[:,name], 0, unicount, uniques)
if (valtype == "Char") :
lenvec = df[name].apply(lambda x: len_or_null(x))
themin = round(lenvec.min(),3) # "-"
themean = round(lenvec.mean(),3) #"-"
themax = round(lenvec.max(),3) #"-"
elif (valtype == "Bool") :
newvec = df[name].apply(lambda x: booleanize(x))
themin = round(newvec.min(),3)
themean = round(newvec.mean(),3)
themax = round(newvec.max(),3)
else:
if (valtype != "Date") :
themin = round(df[name].min(),3)
themean = round(df[name].mean(),3)
themax = round(df[name].max(),3)
else :
themin = str(df[name].min())[0:10]
themean = str(df[name].mean())[0:10] #"-"
themax = str(df[name].max())[0:10]
values_to_add = {
'Name':name,
'Type':valtype,
'Mode':mode,
'Unique Vals':unicount,
'Unique':unipercent,
'Nulls':napercent,
'Min':themin,
'Mean': themean,
'Max':themax
}
rez = rez.append(values_to_add, ignore_index=True)
return rez
########################################################################################
def analyse_file_in_chunks(path_to_file):
"""
Given a path to a large dataset we will iteratively load it in chunks and build
out the statistics necessary to summarise the whole dataset.
"""
fsize = os.stat(path_to_file).st_size
sample_prop = max_filesize / fsize
line_count = count_lines(path_to_file)
chunks = round(line_count * sample_prop)
temp = {}
data_iterator = pd.read_csv(path_to_file, chunksize=chunks, low_memory=False)
total_chunks = 0
for index, chunk in enumerate(data_iterator, start=0):
startpoint = 0 + (index*chunks)
total_chunks = index + 1
temp = update_temp_summary(temp, chunk, startpoint)
summary = generate_final_summary(temp, total_chunks)
return summary
########################################################################################
def generate_final_summary(temp, total_chunks):
rez = pd.DataFrame(columns=('Name', 'Mode', 'Type', 'Unique Vals', 'Unique', 'Nulls', 'Min', 'Mean', 'Max'))
for name in temp.keys():
col = temp[name]
total = col['nulls'] + col['nonnulls']
mode = max(col['val_counts'].items(), key=operator.itemgetter(1))[0]
unicount = col['uniques'].estimate()
if unicount > total:
uniprop = 1.0
unicount = total
else:
uniprop = unicount / total
unipercent = round(100 * uniprop, 1)
napercent = round((100 * col['nulls']) / total, 1)
if (col['type'] != "Date") :
themean = col['sum'] / total
else:
themean = col['mean']
values_to_add = {
'Name':name,
'Mode':mode,
'Type': col['type'],
'Unique Vals':unicount,
'Unique':unipercent,
'Nulls':napercent,
'Min': col['min'],
'Mean': themean,
'Max': col['max']
}
rez = rez.append(values_to_add, ignore_index=True)
return rez
########################################################################################
def clean_dict(df, col):
temp = df[col].value_counts(dropna=False)
indeces = temp.index
newie = []
for i in indeces:
if np.isnan(i):
newie.append("NaN")
else:
newie.append(i)
temp.index = newie
return temp.to_dict()
def combine_dicts(a, b, op=operator.add):
return {**a, **b, **{k: op(a[k], b[k]) for k in a.keys() & b}}
########################################################################################
def update_temp_summary(temp, df, startpoint):
colnames = df.columns
records = len(df)
df = coerce_dates(df)
for name in colnames:
if name in temp:
rez = temp[name]
else:
rez = { "type":[], "val_counts":{}, "sum":0, "mean":np.nan,
"min":np.nan, "max":np.nan,
"uniques":FMEstimator(), "nulls":0,
"nonnulls":0
}
nacount = len(df[df[name].isna()])
nonnulls = len(df) - nacount
val_counts = clean_dict(df, name)
uniques = df[name].unique().tolist()
if np.nan in uniques :
uniques.remove(np.nan)
unicount = len(uniques)
uniprop = unicount / len(df)
valtype = infer_type_2( df.loc[:,name], startpoint, unicount, uniques)
if (valtype == "Char") :
lenvec = df[name].apply(lambda x: len_or_null(x))
themin = round(lenvec.min(),3) # "-"
thesum = round(lenvec.sum(),3) #"-"
themax = round(lenvec.max(),3) #"-"
elif (valtype == "Bool") :
newvec = df[name].apply(lambda x: booleanize(x))
themin = round(newvec.min(),3)
thesum = round(newvec.sum(),3)
themax = round(newvec.max(),3)
else:
if (valtype != "Date") :
themin = round(df[name].min(),3)
thesum = round(df[name].sum(),3)
themax = round(df[name].max(),3)
else :
themin = str(df[name].min())[0:10]
themean = df[name].mean()
themax = str(df[name].max())[0:10]
rez['type'] = valtype
if (valtype != "Date") :
rez['sum'] = rez['sum'] + thesum
else:
if isNaN(rez['mean']):
rez['mean'] = themean
# else:
# rez['mean'] = rez['mean'] + (rez['mean'] - themean)/2
# ABOVE IS OFF FOR THE MOMENT - i.e Keep the first mean
rez['nulls'] = rez['nulls'] + nacount
if isNaN( rez['min'] ) or themin < rez['min']:
rez['min'] = themin
if isNaN( rez['max'] ) or themax > rez['max']:
rez['max'] = themax
rez['uniques'].update_all(uniques)
#rez['uniques'] += uniprop
rez['val_counts'] = combine_dicts(rez['val_counts'], val_counts)
rez['nonnulls'] = rez['nonnulls'] + nonnulls
temp[name] = rez
return temp
########################################################################################
def extract_file_extension(path_to_file):
return os.path.splitext(path_to_file)[1]
########################################################################################
def load_complete_dataframe(path_to_file):
"""
We load the entire dataset into memory, using the file extension to determine
the expected format. We are using encoding='latin1' because it ppears to
permit loading of the largest variety of files.
Representation of strings may not be perfect, but is not important for generating a
summarization of the entire dataset.
"""
extension = extract_file_extension(path_to_file).lower()
if extension == ".csv":
df = pd.read_csv(path_to_file, encoding='latin1', low_memory=False)
return df
if extension == ".tsv":
df = pd.read_csv(path_to_file, encoding='latin1', sep='\t', low_memory=False)
return df
if extension == ".xls" or extension == ".xlsx" or extension == ".odf" :
df = pd.read_excel(path_to_file)
return df
raise ValueError("Unsupported File Type")
########################################################################################
def infer_type_2( thecolumn, startpoint, unicount, uniques):
thetype = get_first_non_null_type(thecolumn, startpoint)
return infer_type(thetype, unicount, uniques)
########################################################################################
def get_first_non_null_type(thecolumn, startpoint):
thetype = ""
index = startpoint
while thetype == "":
temptype = str(type(thecolumn[index]))
tempval = thecolumn[index]
if tempval == np.nan:
thetype = ""
elif tempval is None:
thetype = ""
elif temptype == "<class 'pandas._libs.tslibs.nattype.NaTType'>":
thetype = ""
else:
thetype = temptype
index = index + 1
return thetype
########################################################################################
def infer_type(thetype, unicount, uniques):
valtype = "Char"
if thetype == "<class 'numpy.float64'>" :
valtype = "Float"
if thetype == "<class 'numpy.int64'>" :
valtype = "Int"
if thetype == "<class 'pandas._libs.tslib.Timestamp'>" :
valtype = "Date"
if thetype == "<class 'pandas._libs.tslibs.timestamps.Timestamp'>" :
valtype = "Date"
if thetype == "<class 'numpy.bool_'>":
valtype = "Bool"
if thetype == "<class 'bool'>":
valtype = "Bool"
# Additional Inference of Booleans by strings with 2 unique values
# and common names as additional criteria
if (valtype == "Char") :
if unicount == 2:
temp = [x.lower() for x in uniques if x is not None]
temp.sort()
if (temp == ['no', 'yes']):
valtype = "Bool"
if (temp == ['n', 'y']):
valtype = "Bool"
if (temp == ['false', 'true']):
valtype = "Bool"
if (temp == ['f', 't']):
valtype = "Bool"
return valtype
########################################################################################
def count_lines(path_to_file):
"""
Return a count of total lines in a file. In a way that filesize is irrelevant
"""
count = 0
for line in open(path_to_file): count += 1
return count
########################################################################################
def len_or_null(val):
"""
Alternative len function that will simply return numpy.NA for invalid values
This is need to get sensible results when running len over a column that may contain nulls
"""
try:
return len(val)
except:
return np.nan
########################################################################################
def isNaN(num):
return num != num
########################################################################################
def booleanize(x):
if isNaN(x) :
return x
elif x is None :
return x
elif str(type(x)) == "<class 'bool'>":
return x
else :
x = x.lower()
if x == "yes" or x == "y" or x == "true" or x == "t" or x == 1:
return 1
else :
return 0
########################################################################################
def coerce_dates(df):
return df.apply(
lambda col: pd.to_datetime(col, errors='ignore')
if col.dtypes == object
else col,
axis=0
)
########################################################################################
def print_latex(summary):
print("\\begin{table}[h!]")
print(" \\begin{center}")
print(" \\caption{Data Summary Table}")
print(" \\label{tab:table1}")
print(" \\begin{tabular}{l|l|r|r|r|r|r} ")
print(" \\textbf{Name} & \\textbf{Type} & \\textbf{Unique Vals \%} & \\textbf{Nulls \%} & \\textbf{Mode} & \\textbf{Min} & \\textbf{Mean} & \\textbf{Max}\\\\")
print(" \\hline")
for i in range(len(summary)):
print(" ", summary.loc[i,"Name"],
"&", summary.loc[i,"Type"],
"&", summary.loc[i,"Unique Vals"], "%"
"&", summary.loc[i,"Nulls"], "%"
"&", summary.loc[i,"Mode"],
"&", summary.loc[i,"Min"],
"&", summary.loc[i,"Mean"],
"&", summary.loc[i,"Max"], "\\\\")
print(" \\end{tabular}")
print(" \\end{center}")
print("\\end{table}")
########################################################################################
def get_spaces(spacer):
rez = ""
for i in range(spacer):
rez = rez + " "
return rez
########################################################################################
def get_type_spacer(t):
if (t == "Int") :
return " "
if (t == "Char") :
return " "
if (t == "Date") :
return " "
if (t == "Float") :
return " "
return " "
########################################################################################
def get_percent_spacer(p):
if (p==100.0):
return " "
elif (p>=10):
return " "
else:
return " "
########################################################################################
def get_padded_number(n):
if (n == "-"):
return " - "
if (str(n).replace('.','',1).replace('-','',1).isdigit()):
if (n<0):
adjus = -1
else:
adjus = 0
if (abs(n)<10):
return get_spaces(8 - after_decimal(n) + adjus) + str(n) + " "
if (abs(n)<100):
return get_spaces(7 - after_decimal(n) + adjus) + str(n)+ " "
if (abs(n)<1000):
return get_spaces(6 - after_decimal(n) + adjus) + str(n)+ " "
if (abs(n)<10000):
return get_spaces(5 - after_decimal(n) + adjus) + str(n)+ " "
if (abs(n)<100000):
return get_spaces(4 - after_decimal(n) + adjus) + str(n)+ " "
if (abs(n)<1000000):
return get_spaces(3 - after_decimal(n) + adjus) + str(n)+ " "
if (abs(n)<10000000):
return get_spaces(2 - after_decimal(n) + adjus) + str(n)+ " "
else:
number = "{:.2e}".format(n)
return get_spaces(2 + adjus) + number + " "
else:
return str(n) + " "
########################################################################################
def after_decimal(n):
arr = str(n).split(".")
if( len(arr)==2 ):
return len(arr[1])
else:
return -1
########################################################################################
def print_csv(s):
output = StringIO()
s.to_csv(output, index=False)
print(output.getvalue())
########################################################################################
def get_padded_val2(val, spacer):
filler = ""
if spacer > 10:
filler = (get_spaces(spacer - 10))
if type(val) == int:
return filler + get_padded_number(val)
if type(val) == float:
return filler + get_padded_number(val)
else:
printval = str(val)
return (get_spaces(spacer - len(printval)) + printval + " ")
def get_padded_val(val):
return str(val) + " - " + str(type(val))
########################################################################################
def print_markdown(s):
longest_name = max(s["Name"].apply(lambda x: len_or_null(x)))
if(longest_name>4):
name_spacer = longest_name+2
else:
name_spacer = 6
longest_mode = max(s["Mode"].apply(lambda x: len_or_null(str(x))))
if(longest_mode>10):
mode_spacer = longest_mode
else:
mode_spacer = 10
print("| Name ", get_spaces(name_spacer-6),
"| Type | Unique Vals | Nulls | Mode ", get_spaces(mode_spacer-4),
"| Min | Mean | Max |", sep="")
print("| ---- ", get_spaces(name_spacer-6),
"| ------ | ----------- | ------- | ---- ", get_spaces(mode_spacer-4),
"| --- | ---- | --- |", sep="")
for i in range(len(s)):
print("| ", s.loc[i,"Name"],
get_spaces(name_spacer - len(s.loc[i,"Name"]) - 1 ),
"| ", s.loc[i,"Type"], get_type_spacer(s.loc[i,"Type"]),
"| ", get_padded_number(s.loc[i,"Unique Vals"]),
"| ", get_percent_spacer(s.loc[i,"Nulls"]), s.loc[i,"Nulls"],"% ",
"| ", get_padded_val2(s.loc[i,"Mode"], mode_spacer),
"| ", get_padded_number(s.loc[i,"Min"]),
"| ", get_padded_number(s.loc[i,"Mean"]),
"| ", get_padded_number(s.loc[i,"Max"]), "|", sep="")
########################################################################################
def round_down(n, decimals=0):
"""
Round down a number to a specifed number of decimal places
"""
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
``` |
{
"source": "john-hawkins/Minimum_Required_MLModel_Estimator",
"score": 3
} |
#### File: Minimum_Required_MLModel_Estimator/minvime/estimator_regression.py
```python
import random
import numpy as np
import pandas as pd
from .generator import extract_distribution_from_sample
from .generator import produce_distribution_sample
from .generator import resample_toward_mean
from .generator import generate_min_max_baseline
from .generator import generate_candidate_predictions
from .generator import copy_with_noise
######################################################################
def estimate_model_requirements_thresholded(dist, cases, pred_value, under_pred, under_pred_unit, under_pred_threshold,
over_pred, over_pred_unit, over_pred_threshold, minroi):
candidates = generate_candidate_predictions(dist)
rez_rmse = 0
rez_mape = 0
rez_mae = 0
rez_return = 0
current_roi = -999
for candidate in candidates:
roi = calculate_candidate_roi(dist, candidate, pred_value, under_pred, under_pred_unit, over_pred, over_pred_unit)
if current_roi == -999 or ( roi >= minroi and current_roi > roi):
rez_rmse, rez_mape, rez_mae = calculate_candidate_metrics(dist, candidate)
return rez_rmse, rez_mape, rez_mae
######################################################################
def estimate_model_requirements_proportional( dist, cases, pred_value, under_pred, under_pred_unit, over_pred, over_pred_unit, minroi):
""" Given a sample of target values and requirements -- estimate baseline performance characteristics """
candidates = generate_candidate_predictions(dist)
rez_rmse = 0
rez_mape = 0
rez_mae = 0
rez_return = 0
current_roi = -999
for candidate in candidates:
roi = calculate_candidate_roi(dist, candidate, pred_value, under_pred, under_pred_unit, over_pred, over_pred_unit)
if current_roi == -999 or ( roi >= minroi and current_roi > roi):
rez_rmse, rez_mape, rez_mae = calculate_candidate_metrics(dist, candidate)
return rez_rmse, rez_mape, rez_mae
########################################################################
def calculate_candidate_metrics(dist, candidate):
errors = [ x-y for x,y in zip(dist, candidate) ]
sqrerrors = [ (x-y)*(x-y) for x,y in zip(dist, candidate) ]
zero_adj_dist = [ 0.0001 if x==0 else x for x in dist]
abspcterror = [ 100*abs(x-y)/x for x,y in zip(zero_adj_dist, candidate) ]
rmse = np.sqrt(np.mean(sqrerrors))
mae = np.mean(np.abs(errors))
mape = np.mean(abspcterror)
return rmse, mape, mae
########################################################################
def calculate_candidate_roi(dist, candidate, pred_value, under_pred, under_pred_unit, over_pred, over_pred_unit):
total = 0
for pred, act in zip(candidate, dist):
error = pred - act
abs_error = np.abs(error)
if act == 0:
p_error = 100 * abs_error/0.0001
else:
p_error = 100 * abs_error/act
if error<0:
if under_pred_unit=='percent':
total = total + pred_value - (under_pred * p_error)
else:
total = total + pred_value - (under_pred * abs_error)
else:
if over_pred_unit=='percent':
total = total + pred_value - (over_pred * p_error)
else:
total = total + pred_value - (over_pred * abs_error)
return total
```
#### File: Minimum_Required_MLModel_Estimator/minvime/generator.py
```python
import random
import numpy as np
import pandas as pd
######################################################################
def extract_distribution_from_sample(filepath):
""" Extract a sample of target values from a file on the given path """
try:
df = pd.read_csv(filepath)
indecies = [ np.issubdtype(x, np.number) for x in df.dtypes]
only_numeric = df.loc[:,indecies]
if len(only_numeric.columns)==0:
return [], " your sample file: Please provide a CSV with a column of numeric values."
else:
return list(only_numeric.iloc[:,0]), ""
#try:
# df = pd.read_csv('evaluate.py')
except pd.errors.ParserError:
return [], "Problem Parsing your sample file: Please provide a CSV with a column of numeric values."
except:
return [], "There was an unanticipated problem with your file. Please provide a CSV with a column of numeric values."
######################################################################
def produce_distribution_sample(mean, max, min):
""" Given some simple parameters we generate a sample of target values. TODO: This needs work """
# START WITH SAMPLES BETWEEN MIN AND MAX
baseline = generate_min_max_baseline(min, max)
threshold = (max-min)/200
enhanced = resample_toward_mean(baseline, mean, threshold)
return enhanced, ""
######################################################################
def resample_toward_mean(baseline, mean, threshold):
current_mean = np.mean(baseline)
rez = baseline.copy()
print("Target Mean:", mean, " baseline sample: ", len(baseline) )
while abs(mean - current_mean) > threshold:
temp = rez.copy()
new_sample = random.sample(rez, 1)[0]
temp.append(new_sample)
temp_mean = np.mean(temp)
if abs(mean-temp_mean)<abs(mean - current_mean):
current_mean = temp_mean
rez = temp
print("Sample accepted. New Mean:", current_mean)
else:
print("Sample rejected.")
return rez
######################################################################
def generate_min_max_baseline(min, max, sample_size=1000):
difference = max-min
return [min + (difference * x/(sample_size-1)) for x in range(sample_size)]
########################################################################
def generate_candidate_predictions(dist):
sigma = np.std(dist)
factors = [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
candidates = []
for index, factor in enumerate(factors):
for variant in range(10):
candidates.append( copy_with_noise(dist, factor*sigma) )
return candidates
########################################################################
def copy_with_noise(dist, scale):
rez = list(map(lambda x: x + scale*np.random.normal(), dist))
return rez
``` |
{
"source": "johnhawkinson/juriscraper",
"score": 2
} |
#### File: united_states/state/arkctapp.py
```python
from juriscraper.opinions.united_states.state import ark
class Site(ark.Site):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.url = 'http://opinions.aoc.arkansas.gov/weblink8/Browse.aspx?startid=39308'
self.court_id = self.__module__
```
#### File: united_states/state/fla.py
```python
import re
from lxml import html
from datetime import datetime
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.exceptions import InsanityException
from juriscraper.lib.string_utils import convert_date_string
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.regex = re.compile("(S?C\d+-\d+)(.*)")
self.regex_date = re.compile(r'(?:.*\s)?(\w+\s\d+,\s\d{4})(?:.*)?')
self.url = 'http://www.floridasupremecourt.org/decisions/opinions.shtml'
def _download(self, request_dict={}):
html = super(Site, self)._download(request_dict)
self.base_path = "//h2[contains(., '%s')]" % self.extract_year_from_h1(html)
return html
def _get_case_names(self):
path = '{base}/text()/following::ul[1]//li' \
'//a[not(contains(., "Notice"))][not(contains(., "Rehearing Order"))]'.format(
base=self.base_path)
case_names = []
for e in self.html.xpath(path):
s = ' '.join(e.xpath('.//text()'))
try:
case_name = self.regex.search(s).group(2)
if not case_name.strip():
continue
else:
case_names.append(case_name)
except AttributeError:
pass
return case_names
def _get_download_urls(self):
path = '{base}/text()/following::ul[1]//li' \
'//a[not(contains(., "Notice"))][not(contains(., "Rehearing Order"))]'.format(
base=self.base_path)
urls = []
for e in self.html.xpath(path):
try:
case_name_check = self.regex.search(html.tostring(e, method='text', encoding='unicode')).group(2)
if not case_name_check.strip():
continue
else:
urls.append(e.xpath('@href')[0])
except AttributeError:
pass
return urls
def _get_case_dates(self):
case_dates = []
for e in self.html.xpath(self.base_path):
text = e.text_content()
if 'No opinions released' in text:
continue
date_string = self.regex_date.search(text).group(1)
count = 0
for a in e.xpath('./following::ul[1]//li//a[not(contains(., "Notice"))][not(contains(., "Rehearing Order"))]'):
try:
case_name_check = self.regex.search(html.tostring(a, method='text', encoding='unicode')).group(2)
if case_name_check.strip():
count += 1
except AttributeError:
pass
case_dates.extend([convert_date_string(date_string)] * count)
return case_dates
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_names)
def _get_docket_numbers(self):
path = '{base}/text()/following::ul[1]//li' \
'//a[not(contains(., "Notice"))][not(contains(., "Rehearing Order"))]'.format(base=self.base_path)
docket_numbers = []
for a in self.html.xpath(path):
try:
case_name_check = self.regex.search(html.tostring(a, method='text', encoding='unicode')).group(2)
if not case_name_check.strip():
continue
else:
docket_numbers.append(self.regex.search(html.tostring(a, method='text', encoding='unicode')).group(1))
except AttributeError:
pass
return docket_numbers
def extract_year_from_h1(self, html):
"""For testability with example files from previous years,
we can't use the current year in the base_path search, and
instead need to extract the year from the pages <h1> tag.
This is also handy early in the calendar year if/when the
court is publishing new opinions for the end of the previous
year
"""
year_string = html.xpath('//h1')[0].text_content().split()[3]
# Basic validation of year
if len(year_string) != 4 or not year_string.startswith('20'):
raise InsanityException('Extracted year "%s") appears to be invalid' % year_string)
# If running live scrape, year should always be this year or last year
if self.method != 'LOCAL':
this_year = datetime.today().year
if int(year_string) not in [this_year, this_year - 1]:
raise InsanityException('Year ("%s") too far in the future or past' % year_string)
return year_string
```
#### File: united_states/state/iowa.py
```python
import re
import time
from datetime import date
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.string_utils import titlecase, clean_if_py3
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.court_id = self.__module__
self.year = date.today().year
self.url = 'http://www.iowacourts.gov/About_the_Courts/Supreme_Court/Supreme_Court_Opinions/Opinions_Archive/index.asp'
def _download(self, request_dict={}):
if self.method == 'LOCAL':
# Note that this is returning a list of HTML trees.
html_trees = [super(Site, self)._download(request_dict=request_dict)]
else:
html_l = OpinionSite._download(self)
html_trees = []
for url in html_l.xpath("//td[@width='49%']//tr[contains(., ', {year}')]/td[5]/a/@href".format(year=self.year)):
html_tree = self._get_html_tree_by_url(url, request_dict)
html_trees.append(html_tree)
return html_trees
def _get_case_names(self):
case_names = []
for html_tree in self.html:
case_names.extend(self._return_case_names(html_tree))
return case_names
@staticmethod
def _return_case_names(html_tree):
path = "//*[contains(concat(' ',@id,' '),' wfLabel')]/text()"
return [titlecase(s.strip().lower()) for s in html_tree.xpath(path)]
def _get_download_urls(self):
download_urls = []
for html_tree in self.html:
download_urls.extend(self._return_download_urls(html_tree))
return download_urls
@staticmethod
def _return_download_urls(html_tree):
path = "//*[contains(concat(' ',@id,' '),' wfLabel')]/preceding::tr[2]/td[1]/a/@href"
return list(html_tree.xpath(path))
def _get_case_dates(self):
case_dates = []
for html_tree in self.html:
case_dates.extend(self._return_dates(html_tree))
return case_dates
@staticmethod
def _return_dates(html_tree):
path = "//*[contains(concat(' ',@id,' '),' wfHeader') and not(contains(., 'Iowa'))]/text()"
dates = []
text = clean_if_py3(html_tree.xpath(path)[0])
case_date = date.fromtimestamp(time.mktime(time.strptime(text.strip(), '%B %d, %Y')))
dates.extend([case_date] * int(html_tree.xpath("count(//*[contains(concat(' ',@id,' '),' wfLabel')])")))
return dates
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_dates)
def _get_docket_numbers(self):
docket_numbers = []
for html_tree in self.html:
docket_numbers.extend(self._return_docket_numbers(html_tree))
return docket_numbers
@staticmethod
def _return_docket_numbers(html_tree):
path = "//*[contains(concat(' ',@id,' '),' wfLabel')]/preceding::tr[2]/td[1]/a/text()"
return [clean_if_py3(re.sub(r'Nos?.', '', e).strip())
for e in html_tree.xpath(path)]
```
#### File: juriscraper/pacer/utils.py
```python
import re
import requests
import tldextract
from six import string_types
from ..lib.string_utils import force_unicode
def get_pacer_court_info():
r = requests.get("https://court-version-scraper.herokuapp.com/courts.json")
return r.json()
def get_courts_from_json(j):
courts = []
for k, v in j.items():
for court in v['courts']:
court['type'] = k
courts.append(court)
return courts
def get_court_id_from_url(url):
"""Extract the court ID from the URL."""
parts = tldextract.extract(url)
return parts.subdomain.split('.')[1]
def get_pacer_case_id_from_docket_url(url):
"""Extract the pacer case ID from the docket URL.
In: https://ecf.almd.uscourts.gov/cgi-bin/DktRpt.pl?56120
Out: 56120
In: https://ecf.azb.uscourts.gov/cgi-bin/iquery.pl?625371913403797-L_9999_1-0-663150
Out: 663150
"""
param = url.split('?')[1]
if 'L' in param:
return param.rsplit('-', 1)[1]
return param
def get_pacer_doc_id_from_doc1_url(url):
"""Extract the pacer document ID from the doc1 URL. Coerce the fourth digit
to zero.
In: https://ecf.almd.uscourts.gov/doc1/01712427473
Out: 01702427473
In: /doc1/01712427473
Out: 01702427473
Note that results are strings, not ints, because many of the strings start
with zero.
See tests for more examples.
"""
url = url.rsplit('/', 1)[1].split('?')[0]
url = url[:3] + "0" + url[4:]
return url
def reverse_goDLS_function(s):
"""Extract the arguments from the goDLS JavaScript function.
In: goDLS('/doc1/01712427473','56121','69','','','1','','');return(false);
Out: {
'form_post_url': '/doc1/01712427473',
'caseid': '56121',
'de_seq_num': '69',
'got_receipt': '',
'pdf_header': '',
'pdf_toggle_possible': '1',
'magic_num': '',
'hdr': '',
}
The key names correspond to the form field names in the JavaScript on PACER,
but we don't actually know what each of these values does. Our best
speculation is:
- form_post_url: Where the form is posted to. The HTML 'action' attribute.
- caseid: The internal PACER ID for the case.
- de_seq_num: Unclear. This seems to be the internal ID for the document,
but this field can be omitted without any known issues.
- got_receipt: If set to '1', this will bypass the receipt page and
immediately direct you to the page where the PDF is embedded in an
iframe.
- pdf_header: Can be either 1 or 2. 1: Show the header. 2: No header.
- pdf_toggle_possible: This seems to always be 1. Could be that some courts
do not allow the header to be turned off, but we haven't discoered that
yet.
- magic_num: This is used for the "One free look" downloads.
- hdr: Unclear what HDR stands for but on items that have attachments,
passing this parameter bypasses the download attachment screen and takes
you directly to the PDF that you're trying to download. For an example,
see document 108 from 1:12-cv-00102 in tnmd, which is a free opinion that
has an attachment.
"""
args = re.findall("\'(.*?)\'", s)
return {
'form_post_url': args[0],
'caseid': args[1],
'de_seq_num': args[2],
'got_receipt': args[3],
'pdf_header': args[4],
'pdf_toggle_possible': args[5],
'magic_num': args[6],
'hdr': args[7],
}
def make_doc1_url(court_id, pacer_doc_id, skip_attachment_page):
"""Make a doc1 URL.
If skip_attachment_page is True, we replace the fourth digit with a 1
instead of a zero, which bypasses the attachment page.
"""
if skip_attachment_page and pacer_doc_id[3] == '0':
# If the fourth digit is a 0, replace it with a 1
pacer_doc_id = pacer_doc_id[:3] + '1' + pacer_doc_id[4:]
return 'https://ecf.%s.uscourts.gov/doc1/%s' % (court_id,
pacer_doc_id)
def is_pdf(response):
"""Determines whether the item downloaded is a PDF or something else."""
if response.headers.get('content-type') == 'application/pdf':
return True
return False
def clean_pacer_object(obj):
"""Clean a list or dict that is part of a scraping response.
PACER data is notoriously horrible, so this function attempts to clean up
common problems that it may have. You can pass in either a dict or a list,
and it will be cleaned recursively.
Supported cleanup includes:
1. Removing spaces before commas.
1. Stripping whitespace from the ends.
1. Normalizing white space.
1. Forcing unicode.
:param obj: A dict or list containing string objects.
:return: A dict or list with the string values cleaned.
"""
if isinstance(obj, list):
l = []
for i in obj:
l.append(clean_pacer_object(i))
return l
elif isinstance(obj, dict):
d = {}
for k, v in obj.items():
d[k] = clean_pacer_object(v)
return d
elif isinstance(obj, string_types):
s = ' '.join(obj.strip().split())
s = force_unicode(s)
return re.sub('\s+,', ',', s)
else:
return obj
``` |
{
"source": "john-hawkins/Text_Feature_Generator",
"score": 3
} |
#### File: Text_Feature_Generator/texturizer/comparison.py
```python
import sys
import numpy as np
import jellyfish
import textdistance
import pandas as pd
#################################################################################
def add_comparison_features(df,columns):
"""
This is the entry point to add all the core text similarity features.
Note: We left out <NAME> from the set of metrics because it
takes close to an order of magnitude longer to compute.
Initial version just includes 4 string edit distance metrics.
"""
return add_string_match_features(df,columns)
#################################################################################
def add_string_match_features(df,columns):
"""
Return a copy of a dataframe with features describing matching
between the set of named text columns
"""
def sm_features(x, col1, col2):
if (x[col1] != x[col1]) or (x[col2] != x[col2]):
jd = np.nan
ld = np.nan
ji = np.nan
sd = np.nan
else:
raw_text1 = x[col1].lower()
raw_text2 = x[col2].lower()
jd = jellyfish.jaro_distance(raw_text1,raw_text2)
ld = jellyfish.levenshtein_distance(raw_text1,raw_text2)
ji = textdistance.jaccard(raw_text1,raw_text2)
sd = textdistance.sorensen(raw_text1,raw_text2 )
return jd, ld, ji, sd
col_number = len(columns)
for i in range( col_number-1 ):
for j in range(i+1,col_number):
col1 = columns[i]
col2 = columns[j]
prefix = col1 + "_vs_" + col2
df[[prefix+'_jd', prefix+'_ld',prefix+'_ji',prefix+'_sd']] = df.apply(sm_features, col1=col1,col2=col2, axis=1, result_type="expand")
return df
#################################################################################
def add_ratcliff_obershelp(df,columns):
"""
Return a copy of a dataframe with features describing matching
between the set of named text columns
"""
def sm_features(x, col1, col2):
if (x[col1] != x[col1]) or (x[col2] != x[col2]):
ro = np.nan
else:
raw_text1 = x[col1].lower()
raw_text2 = x[col2].lower()
ro = textdistance.ratcliff_obershelp(raw_text1,raw_text2)
return ro
col_number = len(columns)
for i in range( col_number-1 ):
for j in range(i+1,col_number):
col1 = columns[i]
col2 = columns[j]
prefix = col1 + "_vs_" + col2
df[[prefix+'_rat_obers']] = df.apply(sm_features, col1=col1,col2=col2, axis=1, result_type="expand")
return df
```
#### File: Text_Feature_Generator/texturizer/reason.py
```python
import pkg_resources
import pandas as pd
import numpy as np
import math
import os
import re
from .process import load_word_pattern
"""
texturizer.reason: Reason and Argument feature flags
Some ideas taken from these articles
Logical Reasoning: <NAME>
https://www.flexiblemindtherapy.com/uploads/6/5/5/2/65520823/logical-reasoning.pdf
"""
########################################################################################
premises_pat = load_word_pattern('premises.dat')
premises_re = re.compile(premises_pat)
reasoning_pat = load_word_pattern('reasoning.dat')
reasoning_re = re.compile(reasoning_pat)
conclusions_pat = load_word_pattern('conclusions.dat')
conclusions_re = re.compile(conclusions_pat)
########################################################################################
def add_text_reason_features(df, columns):
"""
Given a pandas dataframe and a set of column names.
calculate the reasoning features and add them.
"""
rez = df.copy()
for col in columns:
rez = add_reason_counts(rez, col)
return rez
########################################################################################
def add_reason_counts(df, col):
"""
Given a pandas dataframe and a column name.
Count the number of keyword matches for each trait
"""
df[col+'_premise']=df[col].str.count(premises_pat, flags=re.IGNORECASE)
df[col+'_reason']=df[col].str.count(reasoning_pat, flags=re.IGNORECASE)
df[col+'_conclusion']=df[col].str.count(conclusions_pat, flags=re.IGNORECASE)
return df
```
#### File: Text_Feature_Generator/texturizer/texturizer.py
```python
import pandas as pd
import sys
import os
from .process import load_complete_dataframe
from .process import process_file_in_chunks
from .process import initialise_profile
from .process import print_profiles
from .process import print_output
from .featurize import generate_feature_function
from .config import max_filesize
def main():
"""Main texturizer application entry point.
parses out CL options and determine the size of the file.
Then process the file for the requested features
"""
if len(sys.argv) < 2:
print("ERROR: MISSING ARGUMENTS")
print_usage(sys.argv)
exit(1)
else:
params = get_cmd_line_params(sys.argv)
if not os.path.exists(params["dataset"]):
print("ERROR: Dataset does not exist")
print_usage(sys.argv)
exit(1)
initialise_profile()
feature_func = generate_feature_function(params)
filesize = os.stat(params["dataset"]).st_size
if filesize<max_filesize:
df = load_complete_dataframe( params["dataset"] )
simple = feature_func(df)
print_output( simple )
else:
process_file_in_chunks(params["dataset"], feature_func)
print_profiles()
#############################################################
def get_cmd_line_params(argv):
""" parse out the option from an array of command line arguments """
data = argv[-1]
options = argv[1:-1]
result = {"dataset":data,
"columns":[],
"profanity":False,
"sentiment":False,
"emoticons":False,
"topics":False,
"count_matches":False,
"traits":False,
"reason":False,
"rhetoric":False,
"pos":False,
"literacy":False,
"scarcity":False,
"comparison":False,
"embedding":False,
"normalize_embedding":False,
"normalize_topics":False,
}
for o in options:
parts = o.split("=")
if parts[0] == "-literacy":
result["literacy"]=True
if parts[0] == "-profanity":
result["profanity"]=True
if parts[0] == "-scarcity":
result["scarcity"]=True
if parts[0] == "-sentiment":
result["sentiment"]=True
if parts[0] == "-topics":
result["topics"]=True
if len(parts)>1:
if parts[1] == 'count':
result["count_matches"]=True
if parts[1] == 'normalize':
result["count_matches"]=True
result["normalize_topics"]=True
if parts[0] == "-traits":
result["traits"]=True
if parts[0] == "-reason":
result["reason"]=True
if parts[0] == "-rhetoric":
result["rhetoric"]=True
if parts[0] == "-pos":
result["pos"]=True
if parts[0] == "-emoticons":
result["emoticons"]=True
if parts[0] == "-embedding":
result["embedding"]=True
if len(parts)>1:
if parts[1] == 'normalize':
result["normalize_embedding"]=True
if parts[0] == "-comparison":
result["comparison"]=True
if parts[0] == "-columns":
cols = parts[1].split(",")
result["columns"]=cols
return result
#############################################################
def print_usage(args):
""" Command line application usage instrutions. """
print("USAGE ")
print(args[0], " [ARGS] <PATH TO DATASET>")
print(" <PATH TO DATASET> - Supported file types: csv, tsv, xls, xlsx, odf")
print(" [ARGS] In most cases these are switches that turn on the feature type")
print(" -columns=<COMMA SEPARATED LIST>. REQUIRED")
print(" -topics Default: False. Indicators for words from common topics.")
print(" -topics=count Count matching words from common topics.")
print(" -topics=normalize Count matching topic key words and normalize over topics.")
print(" -traits Default: False. Word usage for personality traits.")
print(" -reason Default: False. Word usage for reasoning: premises, conclusions.")
print(" -rhetoric Default: False. Word usage for rhetorical devices.")
print(" -pos Default: False. Part of speech proportions.")
print(" -literacy Default: False. Checks for simple literacy markers.")
print(" -profanity Default: False. Profanity check flags.")
print(" -sentiment Default: False. Words counts for positive and negative sentiment.")
print(" -scarcity Default: False. Word scarcity scores.")
print(" -emoticons Default: False. Emoticon check flags.")
print(" -embedding Default: False. Aggregate of Word Embedding Vectors.")
print(" -embedding=normalize Normalised Aggregate of Word Embedding Vectors.")
print(" -comparison Default: False. Cross-column comparisons using edit distances.")
print("")
```
#### File: Text_Feature_Generator/texturizer/traits.py
```python
import pkg_resources
import pandas as pd
import numpy as np
import math
import os
import re
from .process import load_word_pattern
"""
texturizer.traits: Personality trait feature flags
This module performs word or phrase matching to generate features
that can be indicative of personality traits in a writer or speaker.
Some ideas taken from these articles
https://www.scientificamerican.com/article/you-are-what-you-say/
https://hbr.org/2011/12/your-use-of-pronouns-reveals-your-personality
"""
########################################################################################
nuance_pat = load_word_pattern('nuance.dat')
nuance_re = re.compile(nuance_pat)
explain_pat = load_word_pattern('explain.dat')
explain_re = re.compile(explain_pat)
singular_pat = "\\bi\\b|\\bme\\b|\\bmyself\\b|\\bmy\\b|\\bmine\\b"
singular_re = re.compile(singular_pat)
plural_pat = "\\bwe\\b|\\bus\\b|\\bour\\b|\\bourselves\\b"
plural_re = re.compile(plural_pat)
quotation_pat = "\"[ a-zA-Z0-9.,?!:;']*\""
########################################################################################
def add_text_trait_features(df, columns):
"""
Given a pandas dataframe and a set of column names.
calculate the personality trait features and add them.
"""
rez = df.copy()
for col in columns:
rez = add_trait_counts(rez, col)
return rez
########################################################################################
def add_trait_counts(df, col):
"""
Given a pandas dataframe and a column name.
Count the number of keyword matches for each trait
"""
df[col+'_explain']=df[col].str.count(explain_pat, flags=re.IGNORECASE)
df[col+'_nuance']=df[col].str.count(nuance_pat, flags=re.IGNORECASE)
df[col+'_singular']=df[col].str.count(singular_pat, flags=re.IGNORECASE)
df[col+'_plural']=df[col].str.count(plural_pat, flags=re.IGNORECASE)
df[col+'_quotations']=df[col].str.count(quotation_pat, flags=re.IGNORECASE)
return df
``` |
{
"source": "john-hawkins/textplainer",
"score": 3
} |
#### File: textplainer/textplainer/cli.py
```python
import pandas as pd
import sys
import os
from .models import load_and_test_model
from .explain import explain
def main():
"""
Main function is the entry point for the command line application.
It expects to find the required parameters in ```sys.argv```
"""
if len(sys.argv) < 4:
print("ERROR: MISSING ARGUMENTS")
print_usage(sys.argv)
exit(1)
else:
params = get_cmd_line_params(sys.argv)
if not os.path.exists(params["model"]):
print("ERROR: Model to test does not exist")
print_usage(sys.argv)
exit(1)
if not os.path.exists(params["src"]):
print("ERROR: Path to src code does not exist")
print_usage(sys.argv)
exit(1)
if not os.path.exists(params["dataset"]):
print("ERROR: Testing data does not exist")
print_usage(sys.argv)
exit(1)
try:
rez = explain(params["model"],
params["src"],
params["dataset"],
params["column"], params)
print(rez)
except Exception as err:
print(err)
#############################################################
def get_cmd_line_params(argv):
"""
Function to parse out the options and parameters from an array of
command line arguments and return them in a dictionary.
Note: there are multiple options here that are not yet implemented.
TODO: Switch and use a standard argument parsing library.
:param argv: The array of command line arguments recieved by the app
:type argv: Array(String), required
:returns: A dictionary of required values
:rtye: Dictionary
"""
column = argv[-1]
data = argv[-2]
src = argv[-3]
model = argv[-4]
options = argv[1:-4]
result = {"dataset":data,
"column":column,
"model":model,
"src":src
}
return result
#############################################################
def print_usage(args):
"""
Print out the command line application usage instructions.
:returns: Null
:rtye: Null
"""
print("USAGE ")
print(args[0], " [ARGS] <MODEL> <SRC> <DATA> <COLUMN>")
print(" <MODEL> - Path to the pickled model. *")
print(" <SRC> - Path to the src code for the model. ^")
print(" <DATA> - Path to a dataset to be explained.")
print(" <COLUMN> - Name of the column that contains text data.")
print("")
print("NOTES: ")
print(" * Model must adhere to the interface defined in ModelInterface.py")
print(" ^ Must be a path to a directory of source code with __init__.py")
```
#### File: textplainer/textplainer/models.py
```python
import pickle
import sys
class CustomUnpickler(pickle.Unpickler):
def add_path(self, path):
sys.path.append(path)
def find_class(self, module, name):
return super().find_class(module, name)
def load_model(path_to_model, path_to_class):
"""
Load and un-pickle a ML model. Model will need to implement the interface.
:returns: A model object that implements the ModelInterface
:rtye: object
"""
unpickler = CustomUnpickler(open(path_to_model, 'rb'))
unpickler.add_path(path_to_class)
return unpickler.load()
def score_dataset(model, data):
"""
Apply a model to the given dataset.
:returns: A numpy array of model score same length as the dataset
:rtye: numpy array
"""
return model.predict(data)
def load_and_test_model(path_to_model, path_to_class ):
"""
Load and un-pickle a ML model.
Then test if it implements the required interface.
Else throw exception.
:returns: A model object that implements the ModelInterface
:rtye:
"""
model = load_model(path_to_model, path_to_class)
funcs = dir(model)
if 'predict' not in funcs:
raise Exception("ERROR: Model does not implement function 'predict'")
else:
return model
``` |
{
"source": "john-hawkins/texturizer",
"score": 3
} |
#### File: texturizer/texturizer/featurize.py
```python
import functools
import pandas as pd
import numpy as np
from .process import start_profile
from .process import end_profile
from .simple import add_text_summary_features
from .pos import add_text_pos_features
from .topics import add_text_topics_features
from .profanity import add_text_profanity_features
from .traits import add_text_trait_features
from .rhetoric import add_text_rhetoric_features
from .sentiment import add_text_sentiment_features
from .literacy import add_text_literacy_features
from .emoticons import add_text_emoticon_features
from .comparison import add_comparison_features
from .scarcity import add_scarcity_features
from .embedding import add_text_embedding_features
"""
texturizer.featurize: Core functions to apply a set of features to a data frame.
"""
########################################################################################
def process_df(df, params):
"""
process_df: Function that co-ordinates the process of generating the features
"""
start_profile("simple")
simple = add_text_summary_features( df, params["columns"] )
end_profile("simple")
if params["comparison"] :
start_profile("comparison")
simple = add_comparison_features( simple, params["columns"] )
end_profile("comparison")
if params["profanity"] :
start_profile("profanity")
simple = add_text_profanity_features( simple, params["columns"] )
end_profile("profanity")
if params["sentiment"] :
start_profile("sentiment")
simple = add_text_sentiment_features( simple, params["columns"] )
end_profile("sentiment")
if params["scarcity"] :
start_profile("scarcity")
simple = add_scarcity_features( simple, params["columns"] )
end_profile("scarcity")
if params["emoticons"] :
start_profile("emoticons")
simple = add_text_emoticon_features( simple, params["columns"] )
end_profile("emoticons")
if params["embedding"] :
start_profile("embedding")
if params["normalize_embedding"] :
simple = add_text_embedding_features( simple, params["columns"], 'normalize' )
else:
simple = add_text_embedding_features( simple, params["columns"] )
end_profile("embedding")
if params["topics"] :
start_profile("topics")
if params["count_matches"] :
if params["normalize_topics"] :
simple = add_text_topics_features( simple, params["columns"], 'normalize' )
else:
simple = add_text_topics_features( simple, params["columns"], 'count' )
else:
simple = add_text_topics_features( simple, params["columns"] )
end_profile("topics")
if params["traits"] :
start_profile("traits")
simple = add_text_trait_features( simple, params["columns"] )
end_profile("traits")
if params["rhetoric"] :
start_profile("rhetoric")
simple = add_text_rhetoric_features( simple, params["columns"] )
end_profile("rhetoric")
if params["pos"] :
start_profile("pos")
simple = add_text_pos_features( simple, params["columns"] )
end_profile("pos")
if params["literacy"] :
start_profile("literacy")
simple = add_text_literacy_features( simple, params["columns"] )
end_profile("literacy")
return simple
########################################################################################
def generate_feature_function(parameters):
"""
This function will take the processed command line arguments that determine
the feature to apply and partially apply them to the process_df function.
Returning a function that can be used to apply those parameters to multiple
chunks of a dataframe.
"""
return functools.partial(process_df, params = parameters)
``` |
{
"source": "johnheap/VAPPER-Galaxy",
"score": 2
} |
#### File: johnheap/VAPPER-Galaxy/Tryp_T.py
```python
import subprocess
import pandas as pd
import re
import os
import sys
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
pList = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'P9', 'P10', 'P11', 'P12', 'P13', 'P14', 'P15']
quietString = "" #"">> Vap_log.txt 2>&1"
def transcriptMapping(inputname, strain, forwardFN,reverseFN):
#where is our Reference data -
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path+"/data/Reference/Tc148" #default
if strain == "Tc148":
refName = dir_path+"/data/Reference/Tc148"
if strain == "IL3000":
refName = dir_path+"/data/Reference/IL3000"
#argString = "bowtie2 -x Refe4rence/IL3000 -1 data/"+forwardFN+" -2 data/"+reverseFN+" -S "+inputname+".sam" #>log.txt
#argString = "bowtie2 -x Reference/Tc148 -1 data/"+forwardFN+" -2 data/"+reverseFN+" -S "+inputname+".sam" #>log.txt
argString = "bowtie2 -x "+refName+" -1 "+forwardFN+" -2 "+reverseFN+" -S "+inputname+".sam"+quietString #>log.txt
#print(argString)
returncode = subprocess.call(argString, shell=True)
def processSamFiles(inputname):
#debug use a mapping sam file we have already found
#dir_path = os.path.dirname(os.path.realpath(__file__))
#bugName = dir_path+"/data/T_Test" #defasult
cur_path = os.getcwd()
samName = cur_path+"/"+inputname
#argString = "samtools view -bS "+bugName+" > "+inputname+".bam"
argString = "samtools view -bS "+inputname+".sam > "+samName+".bam"+quietString
#print(argString)
returncode = subprocess.call(argString, shell=True)
#argString = "samtools sort "+bugName+" -o "+inputname+".sorted"
argString = "samtools sort "+samName+".bam -o "+samName+".sorted"+quietString
#print("argstring = "+argString)
returncode = subprocess.call(argString, shell=True)
#argString = "samtools index "+bugName+".sorted "+inputname+".sorted.bai"
argString = "samtools index "+samName+".sorted "+samName+".sorted.bai"+quietString
#print("argstring = " + argString)
returncode = subprocess.call(argString, shell=True)
def transcriptAbundance(inputname, strain):
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path + "/data/Reference/ORFAnnotation.gtf" # defasult
if strain == "Tc148":
refName = dir_path + "/data/Reference/ORFAnnotation.gtf"
if strain == "IL3000":
refName = dir_path + "/data/Reference/IL3000.gtf"
#argString = "cufflinks -G Reference/IL3000.gtf -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"
#argString = "cufflinks -G Reference/ORFAnnotation.gtf -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"
argString = "cufflinks -q -G "+refName+" -o "+inputname+".cuff -u -p 8 "+inputname+".sorted"+quietString
returncode = subprocess.call(argString, shell = True)
def convertToFasta(inputName, strain): #equivalent to Sara's awk scripte
dir_path = os.path.dirname(os.path.realpath(__file__))
refName = dir_path + "/data/Reference/ORFAnnotation.gtf" # default
if strain == "Tc148":
refName = dir_path + "/data/Reference/148_prot.fasta"
if strain == "IL3000":
refName = dir_path + "/data/Reference/IL3000_prot.fasta"
cuff_df = pd.read_csv(inputName+".cuff/genes.fpkm_tracking", sep='\t')
cuff_df = cuff_df[(cuff_df['FPKM'] > 0)]
cuff_df.to_csv("cuffTest.csv")
gene_id_List = cuff_df['gene_id'].tolist()
#print(gene_id_List)
#print ("Found from 8880="+str(found))
# need to load in IL3000_prot.fasta
# for each line with >TcIL3000_1_1940
# search within cuff_df[gene_id] for match
# add it to the outfile. (need to save it as used by hmmer later
number = 0
all = 0
with open(inputName+"_6frame.fas", 'w') as outfile:
ref = open(refName,'r')
#ref = open(r"Reference/IL3000_prot.fasta",'r')
n = 0
line = ref.readline()
while line:
if line[0] == '>':
all = all+1
ln = line[1:] #remove >
ln = ln.rstrip() #remove /n /r etc
#print (ln)
if ln in gene_id_List:
number = number+1
outfile.write(line)
line = ref.readline()
if line:
while line[0] != '>':
outfile.write(line)
line=ref.readline()
if not line:
break;
else:
line = ref.readline()
else:
line =ref.readline()
ref.close()
print(str(len(gene_id_List))+":"+str(number)+" from "+str(all))
return cuff_df
def HMMerMotifSearch(name, strain, cuff_df):
motifs = ['1', '2a', '2b', '3', '4a', '4b', '4c', '5', '6', '7', '8a', '8b', '9a', '9b',
'9c', '10a', '10b', '11a', '11b', '12', '13a', '13b', '13c', '13d', '14', '15a', '15b', '15c']
dir_path = os.path.dirname(os.path.realpath(__file__))
phylopath = dir_path + "/data/Motifs/Phylotype"
lineCounts = []
compoundList = []
for m in motifs:
argString = "hmmsearch "+phylopath + m + ".hmm " + name + "_6frame.fas > Phy" + m + ".out"
print(argString)
subprocess.call(argString, shell=True)
hmmResult = open("Phy" + m + ".out", 'r')
regex = r"Tc148[0-9]{1,8}"
if strain == "Tc148":
regex = r"Tc148[0-9]{1,8}"
if strain == "IL3000":
regex = r"TcIL3000_[0-9]{1,4}_[0-9]{1,5}"
n = 0
outList = []
for line in hmmResult:
m = re.search(regex, line)
if m:
outList.append(""+m.group())
n += 1
if re.search(r"inclusion", line):
print("inclusion threshold reached")
break
compoundList.append(outList)
lineCounts.append(n)
hmmResult.close()
#print(lineCounts)
#print(cuff_df)
concatGroups = [1, 2, 1, 3, 1, 1, 1, 2, 3, 2, 2, 1, 4, 1, 3]
countList = []
weightList = []
countIndex = 0
totalCount = 0
totalWeigth = 0
for c in concatGroups:
a = []
weight = []
for n in range(0, c):
a = a + compoundList.pop(0)
t = set(a)
countList.append(len(t))
wa = 0
for w in t:
wt = cuff_df.loc[cuff_df['gene_id'] == w, 'FPKM'].iloc[0]
#print(w)
#print(wt)
wa = wa+wt
weightList.append(wa)
totalWeigth+=wa
totalCount += len(t)
countList.append(totalCount)
weightList.append(totalWeigth)
#print(countList)
#print("--------")
#print(weightList)
#print("--------")
return countList,weightList
def relativeFrequencyTable(countList, name, htmlresource):
relFreqList = []
c = float(countList[15])
for i in range(0, 15):
relFreqList.append(countList[i] / c)
data = {'Phylotype': pList, 'Relative Frequency': relFreqList}
relFreq_df = pd.DataFrame(data)
j_fname = htmlresource+ "/" + name + "_t_relative_frequency.csv"
relFreq_df.to_csv(j_fname)
return relFreqList # 0-14 = p1-p15 counts [15] = total counts
def weightedFrequencyTable(countList, name, htmlresource):
relFreqList = []
c = float(countList[15])
for i in range(0, 15):
relFreqList.append(countList[i] / c)
data = {'Phylotype': pList, 'Weighted Frequency': relFreqList}
relFreq_df = pd.DataFrame(data)
j_fname = htmlresource+ "/" + name + "_t_weighted_frequency.csv"
relFreq_df.to_csv(j_fname)
return relFreqList # 0-14 = p1-p15 counts [15] = total counts
def createStackedBar(name,freqList,strain,pdf,html_resource):
palette = ["#0000ff", "#6495ed", "#00ffff", "#caff70",
"#228b22", "#528b8b", "#00ff00", "#a52a2a",
"#ff0000", "#ffff00", "#ffa500", "#ff1493",
"#9400d3", "#bebebe", "#000000", "#ff00ff"]
VAP_148 = [0.072, 0.032, 0.032, 0.004, 0.007,
0.005, 0.202, 0.004, 0.006, 0.014,
0.130, 0.133, 0.054, 0.039, 0.265]
VAP_IL3000 = [0.073, 0.040, 0.049, 0.018, 0.060,
0.055, 0.054, 0.025, 0.012, 0.060,
0.142, 0.100, 0.061, 0.078, 0.172]
cmap = plt.cm.get_cmap('tab20')
palette = [cmap(i) for i in range(cmap.N)]
if strain == "Tc148":
VAPtable = VAP_148
VAPname='Tc148\nGenome VAP'
if strain == "IL3000":
VAPtable = VAP_IL3000
VAPname= 'IL3000\nGenome VAP'
width = 0.35 # the width of the bars: can also be len(x) sequence
plots = []
fpos = 0
vpos = 0
for p in range(0, 15):
tp = plt.bar(0, freqList[p], width, color= palette[p], bottom = fpos)
fpos +=freqList[p]
tp = plt.bar(1, VAPtable[p], width, color= palette[p], bottom = vpos)
vpos +=VAPtable[p]
plots.append(tp)
plt.xticks([0,1],[name,VAPname])
plt.legend(plots[::-1],['p15','p14','p13','p12','p11','p10','p9','p8','p7','p6','p5','p4','p3','p2','p1'])
title = "Figure Legend: The transcriptomic Variant Antigen Profile of $\itTrypanosoma$ $\itcongolense$ estimated as phylotype " \
"proportion adjusted for transcript abundance and the reference genomic Variant Antigen Profile. " \
"\nData was produced with the 'Variant Antigen Profiler' (<NAME> et al., 2019)."
#plt.title(title, wrap="True")
#plt.text(-0.2, -0.05, title, va="top", transform=ax.transAxes, wrap="True")
plt.text(-0.3, -0.15, title, va="top", wrap="True")
plt.tight_layout(pad=1.5)
plt.subplots_adjust(bottom = 0.3,top=0.99,left=0.125,right=0.9,hspace=0.2,wspace=0.2)
plt.savefig(html_resource + "/stackedbar.png")
if pdf == 'PDF_Yes':
plt.savefig(html_resource + "/stackedbar.pdf")
#plt.show()
def createHTML(name,htmlfn,htmlresource,freqList,weightList):
#assumes imgs are heatmap.png, dheatmap.png, vapPCA.png and already in htmlresource
htmlString = r"<html><title>T.congolense VAP</title><body><div style='text-align:center'><h2><i>Trypanosoma congolense</i> Variant Antigen Profile</h2><h3>"
htmlString += name
htmlString += r"<br>Transcriptomic Analysis</h3></p>"
htmlString += "<p style = 'margin-left:20%; margin-right:20%'>Table Legend: Variant Antigen Profiles of a transcriptome of <i>Trypanosoma congolense</i> estimated as phylotype proportion. " \
"Weighted frequency refers to the phylotype proportion based transcript abundance. " \
"Data was produced with the 'Variant Antigen Profiler' (<NAME> et al., 2019).</p> "
htmlString += r"<style> table, th, tr, td {border: 1px solid black; border-collapse: collapse;}</style>"
htmlString += r"<table style='width:50%;margin-left:25%;text-align:center'><tr><th>Phylotype</th><th>Relative Frequency</th><th>Weighted Frequency</th></tr>"
tabString = ""
# flush out table with correct values
for i in range(0, 15):
f = format(freqList[i], '.4f')
w = format(weightList[i], '.4f')
tabString += "<tr><td>phy" + str(i + 1) + "</td><td>" + f + "</td><td>" + w + "</td></tr>"
htmlString += tabString + "</table><br><br><br><br><br>"
htmlString += r"<p> <h3>Stacked Bar chart of Phylotype Frequency</h3> The 'weighted' relative frequency of each phylotype alongside the VAP of selected strain.</p>"
imgString = r"<img src = 'stackedbar.png' alt='Stacked bar chart of phylotype variation' style='max-width:100%'><br><br>"
htmlString += imgString
# htmlString += r"<p><h3>The Deviation Heat Map and Dendogram</h3>The phylotype variation expressed as the deviation from your sample mean compared to the model dataset</p>"
# imgString = r"<img src = 'dheatmap.png' alt='Deviation Heatmap' style='max-width:100%'><br><br>"
# htmlString += imgString
# htmlString += r"<p><h3>The Variation PCA plot</h3>PCA analysis corresponding to absolute variation. Colour coded according to location</p>"
# imgString = r"<img src = 'vapPCA.png' alt='PCA Analysis' style='max-width:100%'><br><br>"
# htmlString += imgString + r"</div></body></html>"
with open(htmlfn, "w") as htmlfile:
htmlfile.write(htmlString)
#argdict = {'name':2, 'pdfexport': 3, 'strain': 4, 'forward': 5, 'reverse': 6, 'html_file': 7, 'html_resource': 8}
def transcriptomicProcess(args,dict):
transcriptMapping(args[dict['name']], args[dict['strain']], args[dict['forward']], args[dict['reverse']]) #uses bowtie
processSamFiles(args[dict['name']]) #uses samtools
transcriptAbundance(args[dict['name']],args[dict['strain']]) #uses cufflinks -> ?.cuff/*.*
cuff_df = convertToFasta(args[dict['name']],args[dict['strain']])
countList, weightList = HMMerMotifSearch(args[dict['name']],args[dict['strain']], cuff_df)
relFreqList = relativeFrequencyTable(countList,args[dict['name']],args[dict['html_resource']])
relWeightList = weightedFrequencyTable(weightList,args[dict['name']],args[dict['html_resource']])
createStackedBar(args[dict['name']],relWeightList, args[dict['strain']],args[dict['pdfexport']],args[dict['html_resource']])
createHTML(args[dict['name']],args[dict['html_file']],args[dict['html_resource']], relFreqList, relWeightList)
if __name__ == "__main__":
#print("Commencing Transcript Mapping")
#transcriptMapping("T_Test", "Transcripts.1","Transcripts.2")
#print("Processimg Sam Files")
#processSamFiles("T_Test")
#print("Assessing Transcript Abundance")
#transcriptAbundance("T_Test")
#print ("Converting to Fasta Subset")
#cuff_df = convertToFasta("T_Test")
#print("Commencing HMMer search")
#countList, weightList = HMMerMotifSearch("T_Test",cuff_df)
#relativeFrequencyTable(countList,'T_Test')
#weightedFrequencyTable(weightList,'T_Test')
relFreqList = [0.111842105,0.059210526,0.026315789,0.013157895,
0.006578947,0.013157895,0.032894737,0.019736842,
0.039473684,0.046052632,0.217105263,0.065789474,
0.151315789,0.059210526,0.138157895]
relWeightList = [0.07532571,0.05900545,0.009601452,0.042357532,0.01236219,0.001675663,0.04109726,
0.097464248,0.057491666,0.05826875,0.279457473,0.070004772,0.065329007,0.085361298,0.045197529]
createStackedBar('T_Test',relWeightList, 'Tc148','PDF_Yes','results')
createHTML("t_test","results/t_test.html","results",relFreqList,relWeightList)
``` |
{
"source": "johnhellion/idle.cafe",
"score": 3
} |
#### File: plugins/api/api.py
```python
import os
from pelican import signals
# Generate an "API" for the blog
# It is actually a JavaScript array that can be easily consumed
# and allows to search for an article, via title or slug
# Template for the content
JS_BASE = '''const API = [
{}
];
'''
# Output filename
FILENAME = 'api.js'
class APIGenerator():
def __init__(self, context, settings, path, theme, output_path):
self.context = context
self.output_path = output_path
# Slugs to exclude
self.exclude = self.context['API_EXCLUDE_SLUGS']
def generate_output(self, writer):
# Final file path
path = os.path.join(self.output_path, FILENAME)
# Extract pages and articles
content = \
self.context['articles'] + \
self.context['pages']
# Remove the content that must be excluded
content = [c for c in content if c.slug not in self.exclude]
# Get all the slugs, and titles
slugs = [c.slug for c in content]
titles = [c.title for c in content]
# Escape quotes in the title
titles = [title.replace('\'', '\\\'') for title in titles]
# Format objects
objs = [
f'{{ title: \'{title}\', slug: \'{slug}\' }}'
for title, slug in zip(titles, slugs)
]
# JavaScript array content
js_array_elements = ',\n '.join(objs)
# Put content into array
js = JS_BASE.format(js_array_elements)
# Write JS file
with open(path, 'w+') as fd:
fd.write(js)
def get_generators(generators):
return APIGenerator
def register():
signals.get_generators.connect(get_generators)
```
#### File: plugins/readtime/readtime.py
```python
import re
import math
from pelican import signals
from pelican.generators import ArticlesGenerator
from html.parser import HTMLParser
# Add a readtime property to the articles
# Based on simple WPM count
# Source code blocks are ignored
# We are assuming that the HTML is well formated
WPM = 230 # Words Per Minute
CODE_BLOCK_TAGS = ['pre'] # We ignore blocks of code
class MyHTMLParser(HTMLParser):
def __init__(self):
super().__init__()
self.acc = []
self.code_stack = 0
def handle_starttag(self, tag, _):
if tag in CODE_BLOCK_TAGS:
# Keep track that a code block as opened
self.code_stack += 1
def handle_endtag(self, tag):
if tag in CODE_BLOCK_TAGS:
# A code block has closed
self.code_stack -= 1
def handle_data(self, data):
if self.code_stack == 0:
# This means we are NOT in a code block
self.acc.append(data)
def get_data(self):
return ''.join(self.acc)
def strip_tags(html_data):
p = MyHTMLParser()
p.feed(html_data)
# May be able to detect HTML malformations
assert(p.code_stack == 0)
return p.get_data()
def add_readtime_property(document):
text = strip_tags(document.content)
words = re.split(r'[^0-9A-Za-z]+', text)
nb_words = len(words)
minutes = max(1, int(math.ceil(nb_words / WPM)))
document.readtime = minutes
def run(generators):
for g in generators:
if isinstance(g, ArticlesGenerator):
for a in g.articles:
add_readtime_property(a)
def register():
signals.all_generators_finalized.connect(run)
```
#### File: plugins/sitemap/sitemap.py
```python
import os
import urllib.parse
from pelican import signals, contents
# Generate an XML sitemap for the blog
# The XML sitemap is NOT manually sent to Google but it is publicaly
# available
# The output filename
FILENAME = 'sitemap.xml'
# Table for change frequencies
# These are default values that can be overriden in the configuration file
# of the blog
# The underscore values come from Pelican
CHANGE_FREQUENCIES = {
'_index': 'daily',
'_articles': 'monthly',
'_pages': 'monthly',
'_default': 'weekly',
}
# Table for the priorities
# These are default values that can be overriden in the configuration file
# of the blog
PRIORITIES = {
'_default': 0.5
}
# In order to generate the sitemap, we use a bunch of Python templates
# that we glue together
# Last modificagtion template
DATE_TEMPLATE = '\n <lastmod>{}</lastmod>'
# URL Template
URL_TEMPLATE = ''' <url>
<loc>{loc}</loc>{lastmod}
<changefreq>{changefreq}</changefreq>
<priority>{priority}</priority>
</url>'''
# Root template
SITEMAP_TEMPLATE = '''<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
{}
</urlset>
'''
# Get the content priority associated with a Pelican content object
def get_content_priority(content):
if content.slug in PRIORITIES:
return PRIORITIES[content.slug]
return PRIORITIES['_default']
# Get the content change frequency associated with a Pelican content object
def get_content_change_frequency(content):
if content.slug in CHANGE_FREQUENCIES:
return CHANGE_FREQUENCIES[content.slug]
if isinstance(content, contents.Article):
return CHANGE_FREQUENCIES['_articles']
if isinstance(content, contents.Page):
return CHANGE_FREQUENCIES['_pages']
return CHANGE_FREQUENCIES['_default']
# Get the last modification date for a Pelican content object
def get_content_last_date(content):
# Prioritize the last update date
if hasattr(content, 'modified'):
return content.modified
if hasattr(content, 'date'):
return content.date
return None
class SitemapGenerator():
def __init__(self, context, settings, path, theme, output_path):
self.context = context
self.output_path = output_path
# Merge constants with configuration
CHANGE_FREQUENCIES.update(context['CHANGE_FREQUENCIES'])
PRIORITIES.update(context['PRIORITIES'])
# Get slugs to exclude
self.exclude = self.context['API_EXCLUDE_SLUGS']
def generate_output(self, writer):
# Final file path
path = os.path.join(self.output_path, FILENAME)
# Extract pages and articles
content = \
self.context['articles'] + \
self.context['pages']
# Remove the content that must be excluded
content = [c for c in content if c.slug not in self.exclude]
# Store all the url blocks
buffer = []
# Iterate over all pages, articles, mixed
for c in content:
# Date can be YYYY-MM-DD or nothing
date = get_content_last_date(c)
date_formated = None
if date is not None:
date_formated = DATE_TEMPLATE.format(date.strftime('%Y-%m-%d'))
# Join site url and content slug
url = urllib.parse.urljoin(self.context['SITEURL'], c.slug)
# Update frequency
frequency = get_content_change_frequency(c)
# Document priority
priority = get_content_priority(c)
# Store the URL block
buffer.append(URL_TEMPLATE.format(
loc=url,
lastmod=date_formated or '',
changefreq=frequency,
priority=priority
))
# Don't forget the index page
buffer.append(URL_TEMPLATE.format(
loc=self.context['SITEURL'],
lastmod=None,
changefreq=CHANGE_FREQUENCIES['_index'],
priority=PRIORITIES['_default']
))
# Join all the URL blocks into the final template
sitemap = SITEMAP_TEMPLATE.format('\n'.join(buffer))
# Write sitemap to disk
with open(path, 'w+') as f:
f.write(sitemap)
def get_generators(generators):
return SitemapGenerator
def register():
signals.get_generators.connect(get_generators)
``` |
{
"source": "johnhendrick/adventofcode2021",
"score": 3
} |
#### File: adventofcode2021/adventcode/day10.py
```python
from adventcode.utils import read_file
import re
file_path = './input/day10.txt'
def parse_file(file_content=read_file(file_path)):
rows = file_content.split('\n')
return rows
data = parse_file()
scoring = {
')': 3,
']': 57,
'}': 1197,
'>': 25137,
None: 0
}
bracket_pairs = ['()', '[]', '{}', '<>']
pair_dict = {ele[0]: ele[1] for ele in bracket_pairs}
def scan(line, remove_left=True):
while any(x in line for x in bracket_pairs):
for br in bracket_pairs:
line = line.replace(br, '')
if remove_left:
fault = re.sub(r'[([{<]', '', line)
else:
# extended for part2
fault = line[::-1]
for key in pair_dict.keys():
fault = fault.replace(key, pair_dict[key])
return fault
return fault[0] if len(fault) > 0 else None
score = 0
incomplete_rows = []
for i, row in enumerate(data):
found = scan(row)
if found:
score += scoring.get(found)
else:
incomplete_rows.append(i)
print(score)
# part 2
scoringv2 = {
')': 1,
']': 2,
'}': 3,
'>': 4
}
collect = []
for pos in incomplete_rows:
score2 = 0
unmatched = scan(data[pos], remove_left=False)
for ele in unmatched:
score2 = score2*5 + scoringv2.get(ele)
collect.append(score2)
print(sorted(collect)[round(len(collect)/2)])
```
#### File: adventofcode2021/adventcode/day12.py
```python
from adventcode.utils import read_file
from collections import Counter
import numpy as np
file_path = './input/day12.txt'
def parse_file(file_content=read_file(file_path)):
rows = file_content.split('\n')
pairs = [tuple(row.split('-')) for row in rows]
rooms = dict()
for row in rows:
temp = row.split('-')
for ele in temp:
rooms[ele] = []
for pair in pairs:
rooms[pair[0]].append(pair[1])
rooms[pair[1]].append(pair[0])
return rooms, list(rooms.keys())
links, rooms = parse_file()
all_paths = []
def clean_blocked(lower_visted, max_):
counts = Counter(lower_visted)
exclude = ['start', 'end']
output = []
max_visit_hit = np.any(np.array(list(counts.values())) == max_)
for ele in lower_visted:
if ((ele in exclude) or max_visit_hit) and ele.islower():
output.append(ele)
return output
def traverse(path=[], step='start', visited=[], blocked=[], max_=1):
path = path + [step]
if step.islower():
visited = visited + [step]
blocked = clean_blocked(visited, max_)
next_rooms = [room for room in links.get(step) if room not in blocked]
if step == 'end':
all_paths.append(path)
return 1
elif len(next_rooms) == 0:
return 0
else:
count = 0
for next_ in next_rooms:
count += traverse(path=path, step=next_,
visited=visited, blocked=blocked, max_=max_)
return count
unique_paths = traverse()
# print(all_paths)
print(unique_paths)
# part 2
all_paths = []
unique_paths_v2 = traverse(max_=2)
print(unique_paths_v2)
```
#### File: adventofcode2021/adventcode/day13.py
```python
from adventcode.utils import read_file
import re
import numpy as np
file_path = './input/day13.txt'
def parse_file(file_content=read_file(file_path)):
rows = re.split(r'\n\n', file_content)
steps = rows[1].split('\n')
steps = [parse_isntruction(step) for step in steps]
dots = rows[0].split('\n')
dots = [tuple(line.split(',')) for line in dots]
dots = [(int(loc[1]), int(loc[0])) for loc in dots]
max_x = max([dot[0] for dot in dots])
max_y = max([dot[1] for dot in dots])
paper = np.zeros(shape=(max_x+1, max_y+1))
for dot in dots:
paper[dot] = 1
return paper, steps
def parse_isntruction(text):
x, y = None, None
plane = re.search(r'(\S)=', text)[1]
content = re.split(r'[xy]=', text)[-1]
if plane == 'x':
x = int(content)
else:
y = int(content)
return (x, y)
def fold(fold, paper):
# fold up
if fold[1] and fold[0] is None:
part1 = paper[:fold[1], :]
part2 = np.flip(paper[fold[1]+1:, :], 0)
# fold left
if fold[0] and fold[1] is None:
part1 = np.flip(paper[:, :fold[0]], 1)
part2 = paper[:, fold[0]+1:]
folded = part1 + part2
folded[folded > 1] = 1
return folded
paper, steps = parse_file()
paper = fold(steps[0], paper)
print((paper == 1).sum())
# part2
paper, steps = parse_file()
for step in steps:
paper = fold(step, paper)
print(np.flip(paper, 1))
```
#### File: adventofcode2021/adventcode/day14.py
```python
from adventcode.utils import read_file
import re
from collections import Counter
file_path = './input/day14.txt'
def parse_file(file_content=read_file(file_path)):
template, rules = re.split(r'\n\n', file_content)
rules = {ele[0]: ele[1]
for ele in [rule.split(' -> ') for rule in rules.split('\n')]}
return template, rules
def insert(source_str, insert_str, pos):
return source_str[:pos]+insert_str+source_str[pos:]
def enrich(text):
i = 0
count = 0
output = text
while (i+2) < len(text)+1:
substr = text[i:i+2]
value = rules.get(substr)
if value:
output = insert(output, value, i+1+count)
count += 1
i += 1
return output
template, rules = parse_file()
for i in range(10):
template = enrich(template)
result = Counter(template)
print(max(result.values()) - min(result.values()))
# part 2
template, rules = parse_file()
def init_overlap(text):
collect = []
i = 0
while (i+2) < len(text)+1:
collect.append(text[i:i+2])
i += 1
return Counter(collect), Counter(text)
pair_count, char_count = init_overlap(template)
for i in range(40):
curr_pair = pair_count.copy()
for ele in list(curr_pair.keys()):
if rules.get(ele):
if pair_count.get(ele[0] + rules.get(ele)) is None:
pair_count[ele[0] + rules.get(ele)] = 0
if pair_count.get(rules.get(ele) + ele[1]) is None:
pair_count[rules.get(ele) + ele[1]] = 0
if char_count.get(rules.get(ele)) is None:
char_count[rules.get(ele)] = 0
pair_count[ele[0] + rules.get(ele)] += curr_pair[ele]
pair_count[rules.get(ele) + ele[1]] += curr_pair[ele]
pair_count[ele] -= curr_pair[ele]
char_count[rules.get(ele)] += curr_pair[ele]
print(max(char_count.values()) - min(char_count.values()))
```
#### File: adventofcode2021/adventcode/day15.py
```python
from adventcode.utils import read_file
import numpy as np
import heapq
file_path = './input/day15.txt'
def parse_file(file_content=read_file(file_path)):
rows = file_content.split('\n')
rows = [list(row) for row in rows]
return rows
sample = """1163751742
1381373672
2136511328
3694931569
7463417111
1319128137
1359912421
3125421639
1293138521
2311944581"""
cave = parse_file()
def dijkstra(nodes, edges, source=(0, 0)):
path_lengths = {v: float('inf') for v in nodes}
path_lengths[source] = 0
adjacent = {v: {} for v in nodes}
for (u, v), w_uv in edges.items():
adjacent[u][v] = w_uv
# temp = [v for v in nodes]
pq = [(0, source)]
while len(pq) > 0:
curr_d, curr_v = heapq.heappop(pq)
if curr_d > path_lengths[curr_v]:
continue
for n, w in adjacent[curr_v].items():
distance = curr_d + w
if distance < path_lengths[n]:
path_lengths[n] = distance
heapq.heappush(pq, (distance, n))
return path_lengths
def surround(i, j, cave=cave):
def within(a, z):
return a in range(z)
edges = [(i-1, j), (i+1, j), (i, j+1), (i, j-1)]
edges = [edge for edge in edges if within(
edge[1], y) and within(edge[0], x)]
edge_values = {((i, j), edge): int(
cave[edge[1]][edge[0]]) for edge in edges}
return edge_values
x, y = len(cave[0]), len(cave)
nodes = []
edges = {}
for i in range(x):
for j in range(y):
nodes.append((i, j))
edges.update(surround(i, j))
print(dijkstra(nodes, edges).get((x-1, y-1)))
# part 2
def increment(ele, i):
return (ele+i) % 9 if ((ele+i) % 9) != 0 else 9
def expand(arr):
arr = np.array([[int(ele) for ele in row] for row in arr])
arr = np.concatenate((arr, np.vectorize(increment)(arr, 1),
np.vectorize(increment)(arr, 2),
np.vectorize(increment)(arr, 3),
np.vectorize(increment)(arr, 4)), axis=1)
arr = np.concatenate((arr, np.vectorize(increment)(arr, 1),
np.vectorize(increment)(arr, 2),
np.vectorize(increment)(arr, 3),
np.vectorize(increment)(arr, 4)))
return arr.tolist()
cave2 = expand(parse_file())
x, y = len(cave2[0]), len(cave2)
nodes = []
edges = {}
for i in range(x):
for j in range(y):
nodes.append((i, j))
edges.update(surround(i, j, cave=cave2))
print(dijkstra(nodes, edges).get((x-1, y-1)))
```
#### File: adventofcode2021/adventcode/day17.py
```python
import re
file_path = './input/day15.txt'
sample = "target area: x=20..30, y=-10..-5"
input_ = "target area: x=14..50, y=-267..-225"
def parse_file(file_content=input_):
x = re.search(r'x=(.*\d),', file_content).group(1).split('..')
x = [int(ele) for ele in x]
y = re.search(r'y=(.*\d)', file_content).group(1).split('..')
y = [int(ele) for ele in y]
return x, y
x1, y1 = parse_file(input_)
def dynamics(x=0, y=0, vx=0, vy=0):
# simulate till landed
top_y = y
while x <= x1[1] and y >= y1[0]:
prev = {'x': x, 'y': y,
'vx': vx, 'vy': vy, 'max_y': top_y}
x += vx
y += vy
if y > top_y:
top_y = y
vy -= 1
if abs(vx) > 0:
if vx > 0:
vx -= 1
if vx < 0:
vx += 1
if landed(prev):
return prev
else:
return None
def max_x(vx):
return vx*(1+vx)/2
def landed(params):
return ((params['x'] >= x1[0]) and
(params['x'] <= x1[1]) and
(params['y'] >= y1[0]) and
(params['y'] <= y1[1]))
param = {
'vx': 7,
'vy': None
}
top_y = 0
for vy_ in range(0, 1000):
param['vy'] = vy_
landing = dynamics(**param)
if landing:
if landing['max_y'] > top_y:
top_y = landing['max_y']
print(top_y)
# part2
combination = 0
for vx_ in range(0, x1[1]+1):
for vy_ in range(y1[0], 1000):
param['vx'] = vx_
param['vy'] = vy_
landing = dynamics(**param)
if landing:
combination += 1
print(combination)
```
#### File: adventofcode2021/adventcode/day1.py
```python
import pandas as pd
url = "https://adventofcode.com/2021/day/1/input"
data = pd.read_csv('./input/input.txt', names=['input'], sep='\n')
data_list = data.input.tolist()
def increase_count(data_list):
"""
Args:
data_list (list): input list
Returns:
int: number of increment
"""
count = 0
for i, ele in enumerate(data_list):
if i < len(data_list)-1:
if ele < data_list[i+1]:
count += 1
else:
pass
return count
count = increase_count(data_list)
print(count)
data['roll3'] = data.input.rolling(window=3).sum()
# part 2
print(increase_count(data.roll3.to_list()))
```
#### File: adventofcode2021/adventcode/utils.py
```python
def read_file(file_path):
with open(file_path) as f:
return f.read()
```
#### File: adventofcode2021/tests/day4_test.py
```python
import numpy as np
import pytest
from adventcode.day4 import update_board, check_winner, refresh
@pytest.fixture
def test_board():
return np.array([[67, 57, 2, 21, 19],
[11, 79, 74, 45, 95],
[42, 90, 68, 47, 62],
[80, 61, 1, 0, 39],
[43, 76, 40, 27, 66]])
@pytest.fixture
def test_file():
return refresh
@pytest.mark.parametrize("test_board, num, expected", [
(None, 90, np.array([[67, 57, 2, 21, 19],
[11, 79, 74, 45, 95],
[42, -1, 68, 47, 62],
[80, 61, 1, 0, 39],
[43, 76, 40, 27, 66]])),
(None, 999, np.array([[67, 57, 2, 21, 19],
[11, 79, 74, 45, 95],
[42, 90, 68, 47, 62],
[80, 61, 1, 0, 39],
[43, 76, 40, 27, 66]])),
(None, 0, np.array([[67, 57, 2, 21, 19],
[11, 79, 74, 45, 95],
[42, 90, 68, 47, 62],
[80, 61, 1, -1, 39],
[43, 76, 40, 27, 66]]))
], indirect=["test_board"])
def test_update_board(test_board, num, expected):
np.testing.assert_array_equal(
update_board(num, test_board), expected)
@pytest.mark.parametrize("test_board, expected", [
(np.array([[67, 57, 2, 21, 19],
[11, 79, 74, 45, 95],
[42, 90, 68, 47, 62],
[-1, -1, -1, -1, -1],
[43, 76, 40, 27, 66]]), True),
(np.array([[67, 57, 2, -1, 19],
[11, 79, 74, -1, 95],
[42, 90, 68, -1, 62],
[80, 61, 1, -1, 39],
[43, 76, 40, -1, 66]]), True),
(np.array([[67, 57, 2, 21, 19],
[11, 79, 74, 45, 95],
[42, 90, 68, 47, 62],
[80, 61, 1, -1, 39],
[43, 76, 40, 27, 66]]), False),
(np.array([[67, 57, 2, 21, 19],
[11, 79, 74, 45, 95],
[42, 90, 68, 47, 62],
[-1, -1, -1, -1, 39],
[43, 76, 40, 27, 66]]), False)
])
def test_check_winner(test_board, expected):
assert check_winner(test_board) == expected
def test_nums_parse_type():
nums, _ = refresh('./input/day4sample.txt')
assert isinstance(nums, list)
def test_nums_parse_length():
nums, _ = refresh('./input/day4sample.txt')
assert len(nums) == 27
def test_boards_parse_type():
_, boards = refresh('./input/day4sample.txt')
assert isinstance(boards, list)
def test_boards_parse_length():
_, boards = refresh('./input/day4sample.txt')
assert len(boards) == 3
def tests_boards_are_numpy():
_, boards = refresh('./input/day4sample.txt')
types = set([type(board) for board in boards])
assert (len(types) == 1) and (isinstance(boards[0], np.ndarray))
``` |
{
"source": "John-Hennig/jpype",
"score": 2
} |
#### File: test/jpypetest/test_classhints.py
```python
import jpype
import common
class MyImpl(object):
def blah(self):
pass
class ClassProxy:
def __init__(self, proxy):
self.proxy = proxy
class ArrayProxy:
def __init__(self, proxy):
self.proxy = proxy
class StringProxy:
def __init__(self, proxy):
self.proxy = proxy
class ClassHintsTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
self.Custom = jpype.JClass("jpype.classhints.Custom")
self.ClassHintsTest = jpype.JClass("jpype.classhints.ClassHintsTest")
@jpype.JImplements("jpype.classhints.Custom")
class MyCustom(object):
def __init__(self, arg):
self.arg = arg
self.MyCustom = MyCustom
def testCharSequence(self):
Instant = jpype.JClass("java.time.Instant")
s = "2019-12-21T05:26:13.223189Z"
self.assertTrue(str(Instant.parse(s)), s)
def testInstant(self):
import datetime
now = datetime.datetime.utcnow()
Instant = jpype.JClass("java.time.Instant")
self.assertIsInstance(jpype.JObject(now, Instant), Instant)
def testPath(self):
import pathlib
JPath = jpype.JClass("java.nio.file.Path")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JPath), JPath)
def testFile(self):
import pathlib
JFile = jpype.JClass("java.io.File")
self.assertIsInstance(jpype.JObject(
pathlib.Path(__file__).absolute(), JFile), JFile)
def testConvertExact(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call("hello")
@jpype.JConversion(self.Custom, exact=str)
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call("hello")
self.assertIsInstance(cht.input, self.MyCustom)
self.assertEqual(cht.input.arg, "hello")
def testConvertAttribute(self):
cht = self.ClassHintsTest
with self.assertRaises(TypeError):
cht.call(MyImpl())
@jpype.JConversion(self.Custom, attribute="blah")
def StrToCustom(jcls, args):
return self.MyCustom(args)
cht.call(MyImpl())
self.assertIsInstance(cht.input, self.MyCustom)
self.assertIsInstance(cht.input.arg, MyImpl)
def testClassCustomizer(self):
@jpype.JConversion("java.lang.Class", instanceof=ClassProxy)
def ClassCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass('java.lang.Class')._hints
self.assertTrue(ClassProxy in hints.implicit)
def testArrayCustomizer(self):
@jpype.JConversion(jpype.JInt[:], instanceof=ArrayProxy)
def ArrayCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass(jpype.JInt[:])._hints
self.assertTrue(ArrayProxy in hints.implicit)
def testStringCustomizer(self):
@jpype.JConversion("java.lang.String", instanceof=StringProxy)
def STringCustomizer(jcls, obj):
return obj.proxy
hints = jpype.JClass("java.lang.String")._hints
self.assertTrue(StringProxy in hints.implicit)
``` |
{
"source": "John-Hennig/KDE-diffusion",
"score": 2
} |
#### File: KDE-diffusion/tests/test_1d.py
```python
"""Tests the 1d kernel density estimation."""
########################################
# Dependencies #
########################################
from kde_diffusion import kde1d
from pathlib import Path
from numpy import isclose, load
from pytest import raises
########################################
# Fixtures #
########################################
reference = None
def setup_module():
global reference
here = Path(__file__).parent
reference = load(here/'reference1d.npz')
########################################
# Tests #
########################################
def test_reference():
x = reference['x']
N = reference['N']
assert N == len(x)
n = reference['n']
xmin = reference['xmin']
xmax = reference['xmax']
(density, grid, bandwidth) = kde1d(x, n, (xmin, xmax))
assert isclose(density, reference['density']).all()
assert isclose(grid, reference['grid']).all()
assert isclose(bandwidth, reference['bandwidth']).all()
def test_arguments():
(density, grid, bandwidth) = kde1d([-2, -1, 0, +1, +2]*20, 4)
assert len(grid) == 4
assert isclose(grid.min(), -2.4)
assert isclose(grid.max(), +1.2)
(density, grid, bandwidth) = kde1d([-2, -1, 0, +1, +2]*20, 4, 2)
assert isclose(grid.min(), -2)
assert isclose(grid.max(), +1)
with raises(ValueError):
kde1d([-2, -1, 0, +1, +2]*10, 4)
``` |
{
"source": "JohnHenryGaspay/djangoapitestproject",
"score": 3
} |
#### File: src/todo/serializers.py
```python
from rest_framework import serializers
# import our models
from todo import models
class ToDoListItem(serializers.ModelSerializer):
"""
Create the first serializer for ToDoListItems
This is a model serializer, so it will autogenerate a
full serializer from the model.
"""
class Meta:
# specify the model to use
model = models.ToDoListItem
# specify the field names we want to show in the api
fields = ('id', 'todo_list_id', 'title', 'description')
class ToDoList(serializers.ModelSerializer):
"""
Create a second serializer for the ToDoList model.
This will have the above serializer nested within it.
"""
items = ToDoListItem(many=True)
class Meta:
# specify the ToDoList model
model = models.ToDoList
# specify the fields from ToDoList that we want our API to return/consume
fields = ('id', 'title', 'items')
def create(self, validated_data):
"""
Override the default create method so that we can serialize
whole ToDoList objects with ToDoListItems.
:param validated_data: A Dictionary of values to use for object creation.
:type validated_data: OrderedDict
:returns ToDoList: A ToDoList model object
"""
# remove the value of items from the ToDoList validated data. We'll use this later
items_data = validated_data.pop('items')
# create a new ToDoList with the validated data passed in
todo_list = models.ToDoList.objects.create(**validated_data)
# for each item in the 'items' validated data
for item_data in items_data:
# modify it's validated data to reference the ToDoList we just made
item_data['todo_list_id'] = todo_list.id
# Create the ToDoListItem with the item data
models.ToDoListItem.objects.create(**item_data)
# after this return the todo_list we made
return todo_list
``` |
{
"source": "johnhenry/pulumi-docker",
"score": 2
} |
#### File: examples/aws-py/__main__.py
```python
import base64
import pulumi_aws as aws
import pulumi_docker as docker
import pulumi
# Get registry info (creds and endpoint) so we can build/publish to it.
def get_registry_info(rid):
creds = aws.ecr.get_credentials(registry_id=rid)
decoded = base64.b64decode(creds.authorization_token).decode()
parts = decoded.split(':')
if len(parts) != 2:
raise Exception("Invalid credentials")
return docker.ImageRegistry(creds.proxy_endpoint, parts[0], parts[1])
for i in range(3):
# Create a private ECR registry.
repo = aws.ecr.Repository('my-repo-%i' % i, name='image-%i' % i)
registry = repo.registry_id.apply(get_registry_info)
# Build and publish the image.
docker.Image(
'my-image-%i' % i,
build=docker.DockerBuild(context='app', args={'parameter': str(i)}),
image_name=repo.repository_url,
registry=registry,
)
```
#### File: python/pulumi_docker/__init__.py
```python
from .container import *
from .docker import *
from .get_network import *
from .get_plugin import *
from .get_registry_image import *
from .image import *
from .network import *
from .plugin import *
from .provider import *
from .registry_image import *
from .remote_image import *
from .secret import *
from .service import *
from .service_config import *
from .utils import *
from .volume import *
from ._inputs import *
from . import outputs
# Make subpackages available:
from . import (
config,
)
def _register_module():
import pulumi
from . import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "docker:index/container:Container":
return Container(name, pulumi.ResourceOptions(urn=urn))
elif typ == "docker:index/network:Network":
return Network(name, pulumi.ResourceOptions(urn=urn))
elif typ == "docker:index/plugin:Plugin":
return Plugin(name, pulumi.ResourceOptions(urn=urn))
elif typ == "docker:index/registryImage:RegistryImage":
return RegistryImage(name, pulumi.ResourceOptions(urn=urn))
elif typ == "docker:index/remoteImage:RemoteImage":
return RemoteImage(name, pulumi.ResourceOptions(urn=urn))
elif typ == "docker:index/secret:Secret":
return Secret(name, pulumi.ResourceOptions(urn=urn))
elif typ == "docker:index/service:Service":
return Service(name, pulumi.ResourceOptions(urn=urn))
elif typ == "docker:index/serviceConfig:ServiceConfig":
return ServiceConfig(name, pulumi.ResourceOptions(urn=urn))
elif typ == "docker:index/volume:Volume":
return Volume(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("docker", "index/container", _module_instance)
pulumi.runtime.register_resource_module("docker", "index/network", _module_instance)
pulumi.runtime.register_resource_module("docker", "index/plugin", _module_instance)
pulumi.runtime.register_resource_module("docker", "index/registryImage", _module_instance)
pulumi.runtime.register_resource_module("docker", "index/remoteImage", _module_instance)
pulumi.runtime.register_resource_module("docker", "index/secret", _module_instance)
pulumi.runtime.register_resource_module("docker", "index/service", _module_instance)
pulumi.runtime.register_resource_module("docker", "index/serviceConfig", _module_instance)
pulumi.runtime.register_resource_module("docker", "index/volume", _module_instance)
class Package(pulumi.runtime.ResourcePackage):
_version = _utilities.get_semver_version()
def version(self):
return Package._version
def construct_provider(self, name: str, typ: str, urn: str) -> pulumi.ProviderResource:
if typ != "pulumi:providers:docker":
raise Exception(f"unknown provider type {typ}")
return Provider(name, pulumi.ResourceOptions(urn=urn))
pulumi.runtime.register_resource_package("docker", Package())
_register_module()
``` |
{
"source": "JohnHenrySplitMyHeart/bikesanity",
"score": 3
} |
#### File: bikesanity/entities/journal.py
```python
from .content_blocks import Image
from .page import Page
class TocEntry:
def __init__(self, original_id, title, url):
self.original_id = original_id
self.title = title
self.url = url
self.page = None
def set_page(self, page):
self.page = page
class Journal:
def __init__(self, journal_id, original_html):
self.journal_id = journal_id
self.original_html = original_html
self.postprocessed_html = None
self.journal_title = None
self.journal_subtitle = None
self.journal_author = None
self.distance_statement = None
self.locales = []
self.cover_image = None
self.toc = []
self.single_page = False
self.js = {}
self.css = {}
def add_toc_entry(self, original_id, title, url):
toc_entry = TocEntry(original_id, title, url)
self.toc.append(toc_entry)
def add_single_page(self, page: Page):
toc_entry = TocEntry(self.journal_id, self.journal_title, None)
toc_entry.set_page(page)
self.toc.append(toc_entry)
def add_cover_image(self, image: Image):
self.cover_image = image
def save_original_source(self, local_handler):
# Save the HTML itself
local_handler.save_html_original('index.html', self.postprocessed_html if self.postprocessed_html else self.original_html)
# Save the cover image if one exists
if self.cover_image: local_handler.save_image_original(self.cover_image)
# Save the JS and CSS
for js, content in self.js.items():
local_handler.save_js_resource(js, content)
for css, content in self.css.items():
local_handler.save_css_resource(css, content)
def save_resources(self, local_handler):
# Save the cover image if one exists
if self.cover_image: local_handler.save_image_resource(self.cover_image)
def clear_resources(self):
if self.cover_image: self.cover_image.clear_resources()
for js in self.js.values():
if js:
js.close()
for css in self.css.values():
if css:
css.close()
def update_data_model(self):
if not hasattr(self, 'single_page'): self.single_page = False
```
#### File: bikesanity/io_utils/file_handler.py
```python
import hashlib
import shutil
import os
import mimetypes
class FileHandler:
HASH_BUFFER_SIZE = 6553600
def __init__(self):
pass
def output_binary_to_file(self, filename, data):
os.makedirs(os.path.dirname(filename), exist_ok=True)
data.seek(0)
with open(filename, 'wb') as handle:
shutil.copyfileobj(data, handle)
def output_bytes_to_file(self, filename, bytes):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'wb') as handle:
handle.write(bytes)
def output_text_to_file(self, filename, text):
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as handle:
handle.write(text)
def remove_directory(self, path):
try:
shutil.rmtree(path)
except:
pass
def file_exists(self, filename):
return os.path.isfile(filename)
def get_file_content(self, filename):
if not self.file_exists(filename):
raise RuntimeError('File did not exist: {0}'.format(filename))
with open(filename, 'r', encoding="utf8") as handle:
return handle.read()
def get_binary_content(self, filename):
if not self.file_exists(filename):
raise RuntimeError('File did not exist: {0}'.format(filename))
with open(filename, 'rb') as handle:
return handle.read()
def get_binary(self, filename):
if not self.file_exists(filename):
raise RuntimeError('File did not exist: {0}'.format(filename))
return open(filename, 'rb')
def file_size(self, filename):
return os.path.getsize(filename)
def calculate_sha1_hash(self, filename):
sha1 = hashlib.sha1()
with open(filename, 'rb') as f:
while True:
data = f.read(self.HASH_BUFFER_SIZE)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
def extension_from_type(self, mime_type):
extension = mimetypes.guess_extension(mime_type)
if extension and extension[0] == '.': extension = extension[1:]
return extension
```
#### File: bikesanity/io_utils/throttled.py
```python
import datetime
import random
import time
from bikesanity.io_utils import log_handler as log_handler
from .base_session import BaseSession
class ThrottledSession(BaseSession):
STANDARD_REQUEST_RATE_LIMITER = 25
STANDARD_REQUEST_RATE_LIMITER_MIN = 25
STANDARD_REQUEST_RATE_LIMITER_MAX = 30
FAILED_REQUEST_RATE_LIMITER = 5
def __init__(self):
super().__init__()
self.last_request = None
self.current_rate_limit = self.STANDARD_REQUEST_RATE_LIMITER
def _stochastic_delay(self):
return random.randrange(self.STANDARD_REQUEST_RATE_LIMITER_MIN, self.STANDARD_REQUEST_RATE_LIMITER_MAX)
def _wait_since_last_request(self):
rate_limit_delay = self._stochastic_delay()
if not self.last_request:
self.last_request = datetime.datetime.now()
while (datetime.datetime.now() - self.last_request).total_seconds() < rate_limit_delay:
time.sleep(0.2)
self.last_request = datetime.datetime.now()
def make_request(self, url):
super().make_request(url)
try:
self._wait_since_last_request()
return self.session.get(url, headers=self.headers)
except Exception as exc:
# Log the exception, delay a while, then raise
log_handler.log.error("Error connecting when downloading {0}".format(url))
time.sleep(self.FAILED_REQUEST_RATE_LIMITER)
raise
def make_stream_request(self, url):
super().make_stream_request(url)
return self.session.get(url, headers=self.headers, stream=True)
```
#### File: output_transformers/pdf_templates/base_pdf_template.py
```python
import os
import shutil
from fpdf import FPDF
from bikesanity.io_utils.resources import create_temp_from_resource
class BasePdfTemplate(FPDF):
DEJAVU_FONT = 'DejaVu'
A4_WIDTH = 210
A4_HEIGHT = 297
MARGIN = 20
TOP_MARGIN = 10
PAGE_WIDTH = A4_WIDTH - (MARGIN*2)
IMAGE_DPI = 200
MM_PER_INCH = 25.4
CAPTION_WIDTH = 150
def __init__(self, title, author, part=None):
self.draw_header = False
self.journal_title = title
self.author = author
self.part = part
super().__init__(orientation='P', unit='mm', format='A4')
self.tmp_files = []
self.load_font_resource('DejaVuSans.ttf', '')
self.load_font_resource('DejaVuSans-Bold.ttf', 'B')
self.load_font_resource('DejaVuSans-Oblique.ttf', 'I')
self.page_title = title
self.setup_pages()
self.image_pair = False
self.page_added = True
self.image_path = None
def load_font_resource(self, font_name, weight):
# Get a temporary file from the named resource
temp_font_file = create_temp_from_resource(['fonts', font_name])
# Add the font from this temporary file (only method FPDF supports)
self.add_font(self.DEJAVU_FONT, weight, temp_font_file, uni=True)
# Remove the temp file once its loaded
self.tmp_files.append(temp_font_file)
def setup_pages(self):
self.set_font(self.DEJAVU_FONT, '', 14)
self.set_margins(self.MARGIN, self.TOP_MARGIN, self.MARGIN)
self.add_page()
def update_page_title(self, name):
self.draw_header = False
self.page_title = name
def limit_title(self, title, max_width=PAGE_WIDTH):
terms = title.split(' ')
terms_to_use = []
for i in range(0, len(terms)):
terms_to_use.append(terms[i])
title = ' '.join(terms_to_use)
if self.get_string_width(title) > max_width: break
return title
def header(self):
if not self.draw_header:
self.draw_header = True
return
self.set_font(self.DEJAVU_FONT, 'B', 12)
# Limit title if too long
title = self.limit_title(self.page_title)
# Calculate width of title and position
w = self.get_string_width(title) + 6
self.set_x((210 - w) / 2)
# Title
self.cell(w, 9, title, 0, 1, 'C', 0)
# Line break
self.ln(10)
self.page_top = self.get_y()
def footer(self):
# No footer on first few pages
if self.page_no() < 3: return
# Don't draw footer if content overlaps
if self.get_y() > self.A4_HEIGHT - self.TOP_MARGIN: return
# Position at 1.5 cm from bottom
self.set_y(-15)
self.set_font(self.DEJAVU_FONT, 'I', 8)
# Text color in gray
self.set_text_color(128)
footer_text = '{0} by {1}{2} - Page {3}'.format(self.journal_title, self.author, ' (part {0})'.format(self.part) if self.part else '', self.page_no()-2)
# Page number
self.cell(0, 10, footer_text, 0, 0, 'C')
def cover_title(self, title, subtitle, author, distance_statement, part=None):
self.set_font(self.DEJAVU_FONT, 'B', 20)
self.ln(15)
# Title
self.multi_cell(0, 20, title, 0, 'C', 0)
self.ln(1)
if part:
self.set_font(self.DEJAVU_FONT, '', 20)
self.cell(0, 5, 'Part {0}'.format(part), 0, 0, 'C')
self.ln(6)
# Line break
self.ln(6)
self.set_font(self.DEJAVU_FONT, '', 16)
self.multi_cell(0, 10, subtitle, 0, 'C', 0)
self.ln(4)
self.set_font(self.DEJAVU_FONT, 'I', 10)
self.multi_cell(0, 5, distance_statement, 0, 'C', 0)
self.ln(5)
self.set_font(self.DEJAVU_FONT, '', 16)
self.multi_cell(0, 20, author, 0, 'C', 0)
self.ln(8)
def add_toc(self, toc_items):
self.set_font(self.DEJAVU_FONT, 'B', 18)
self.cell(0, 5, 'Table of Contents', 0, 0, 'C', 0)
self.ln(15)
self.set_font(self.DEJAVU_FONT, 'I', 9)
for toc_item in toc_items:
if toc_item.is_header:
self.set_font(self.DEJAVU_FONT, 'B', 9)
else:
self.set_font(self.DEJAVU_FONT, 'I', 9)
# Limit the title if it's too long
title = self.limit_title(toc_item.title, 125)
str_size = self.get_string_width(title)
self.cell(str_size+2, 9, title)
# Filling dots
page_cell_size=self.get_string_width(toc_item.page_no) + 2
dot_length = self.PAGE_WIDTH - str_size - page_cell_size - 10
nb = int(dot_length // self.get_string_width('.'))
dots = '.' * nb
self.cell(dot_length, 9, dots, 0, 0, 'R')
# Page number
self.cell(page_cell_size, 9, toc_item.page_no, 0, 1,'R')
def section_title(self, title):
self.set_font(self.DEJAVU_FONT, 'B', 18)
self.multi_cell(0, 10, title, 0, 'C', 0)
self.ln(20)
def chapter_title(self, label, date, distance, total_distance):
self.set_font(self.DEJAVU_FONT, 'B', 14)
# Colors of frame, background and text
self.set_draw_color(0, 0, 0)
self.set_fill_color(230, 230, 0)
# Thickness of frame (1 mm)
self.set_line_width(0.5)
# Background color
self.set_fill_color(200, 220, 255)
# Title
self.multi_cell(0, 10, label, 1, 'C', 1)
# Line break
self.ln(4)
if not distance: return
if total_distance and date:
distance_statement = '{0} - of total {1} - on {2}'.format(distance, total_distance, date)
elif total_distance:
distance_statement = '{0} - of total {1}'.format(distance, total_distance)
else:
distance_statement = distance
self.set_font(self.DEJAVU_FONT, 'I', 10)
self.cell(0, 5, distance_statement, 0, 0, 'L', 0)
self.ln(20)
def add_image_format_tolerant(self, image_path, x=None, y=None, width=None, height=None):
for ext in [ None, '.jpeg', '.png']:
if self.try_add_image(image_path, x, y, width, height, ext):
break
def try_add_image(self, image_path, x, y, width, height, ext=None):
updated_ext = image_path[:image_path.rfind('.')] + ext if ext else image_path
if ext and image_path != updated_ext:
shutil.copyfile(image_path, updated_ext)
try:
self.image(updated_ext, x=x, y=y, w=width if width else 0, h=height if height else 0)
return True
except Exception as exc:
return False
def clipping_rect(self, x, y, w, h, outline=False):
op= 'S' if outline else 'n'
self._out('q {0} {1} {2} {3} re W {4}'.format(
x * self.k,
(self.h - y) * self.k,
w * self.k,
-h * self.k,
op
))
def unset_clipping(self):
self._out('Q')
def cleanup_tmp_files(self):
for tmp_file in self.tmp_files:
try:
os.remove(tmp_file)
os.remove(tmp_file + '.pkl')
os.remove(tmp_file + '.cw127.pkl')
except:
pass
```
#### File: bikesanity/processing/load_disk_journal.py
```python
import os
import bikesanity.io_utils.log_handler as log_handler
from bikesanity.entities.journal import Journal
from bikesanity.entities.page import Page
from bikesanity.io_utils.local_journal import LocalJournalHandler
from bikesanity.services.retrievers import LocalRetriever, ExportRetriever
from bikesanity.interpreter.journal_content import JournalContent
from bikesanity.interpreter.page_interpreter import PageInterpreter
class LoadDiskJournal:
DOWNLOAD_DIRECTORY = 'downloads'
EXPORTED_DIRECTORY = 'exported'
PROCESSED_DIRECTORY = 'processed'
def __init__(self, input_location, output_location, journal_id, exported=False, progress_callback=None):
self.input_location = os.path.join(input_location, self.EXPORTED_DIRECTORY if exported else self.DOWNLOAD_DIRECTORY)
self.output_location = os.path.join(output_location, self.PROCESSED_DIRECTORY)
self.progress_callback = progress_callback
self.journal_id = journal_id
self.input_handler = LocalJournalHandler(self.input_location, journal_id)
self.output_handler = LocalJournalHandler(self.output_location, journal_id)
# Ensure the output is clear
self.output_handler.remove_directory('')
os.makedirs(self.output_location, exist_ok=True)
self.retriever = ExportRetriever(self.input_handler) if exported else LocalRetriever(self.input_handler)
self.outputter = LocalRetriever(self.output_handler)
self.journal_crawler = JournalContent(self.retriever)
self.page_crawler = PageInterpreter(self.retriever)
def progress_update(self, percent):
if self.progress_callback:
self.progress_callback(progress=percent)
def get_process_location(self):
return self.output_handler.get_base_path()
def load_journal_from_disk(self):
# Retrieve and process the journal content
journal = self.journal_crawler.retrieve_journal(None, self.journal_id)
self.progress_update(percent=10)
journal = self._process_journal(journal)
return journal
def _process_journal(self, journal: Journal):
journal_id = journal.journal_id
# Handle standard multi-page journals with a ToC
if journal.toc:
log_handler.log.warning('Processing multiple pages for {0}'.format(journal_id))
# Iterate over all the retrieved pages and pull them separately.
page_count = 0
for toc in journal.toc:
if toc.url:
page = self._process_page(toc.original_id)
toc.set_page(page)
# Calculate percentage per page, to keep consumers updated
self.progress_update(((page_count / len(journal.toc)) * 80) + 10)
page_count += 1
else:
log_handler.log.warning('Processing single page for {0}'.format(journal_id))
# Handle single-page journals/articles that have all the content on the title page
journal.single_page = True
# Create a single new page and set with the title page html
content_page = Page(journal_id=journal_id, original_id=journal_id, original_html=journal.original_html)
# Process it as a normal page and add it to the ToC
content_page = self._process_page(content_page, single=True)
journal.add_single_page(content_page)
self.progress_update(percent=90)
# Save and clear any resources not associated with pages
journal.save_resources(self.output_handler)
journal.clear_resources()
# Finally serialize the parsed data structure and output
log_handler.log.info('Serializing data for {0}'.format(journal_id), extra={'journal_id': journal_id})
self.output_handler.serialize_and_save_journal(journal)
self.progress_update(percent=100)
log_handler.log.info('Completed {0}'.format(journal_id), extra={'journal_id': journal_id})
return journal
def _process_page(self, page_id, single=False):
log_handler.log.warning('Processing page {0} for {1}'.format(page_id, self.journal_id))
# Process the page and associated pics and maps
page = self.page_crawler.retrieve_page(self.journal_id, page_id, None)
page = self.page_crawler.parse_page(page, single=single)
# Save locally and clear resources loaded into the page
page.save_resources(self.output_handler)
page.clear_resources()
return page
```
#### File: bikesanity/services/html_postprocessor.py
```python
import re
from bikesanity.entities.content_blocks import Image
from bikesanity.entities.journal import Journal
from bikesanity.entities.page import Page
class HtmlPostProcessor:
def __init__(self):
pass
def postprocess_journal(self, journal: Journal):
if not journal.original_html: return
html = journal.original_html.decode()
# Cover image
if journal.cover_image:
html = self.replace_image(html, journal.cover_image)
# Table of contents links
for toc in journal.toc:
if toc.original_id:
html = self.replace_toc_link(html, toc)
journal.postprocessed_html = html.encode()
def postprocess_page(self, page: Page):
# Postprocess the page itself
page_html = page.original_html.decode()
page.postprocessed_html = self.postprocess_page_html(page, page_html)
# Postprocess additional page HTML, if any
page.additional_postprocessed_html = []
for additional_page in page.additional_html_pages:
additional_html = additional_page.decode()
page.additional_postprocessed_html.append(self.postprocess_page_html(page, additional_html))
def postprocess_page_html(self, page: Page, page_html):
# Fix JS and CSS links
page_html = self.fix_js_css_links(page_html)
# Fix multipage links
page_html = self.replace_multipage_part_links(page_html, page)
# Switch maps to use the OSM basemap so they work locally
page_html = self.switch_maps_to_osm(page_html)
# Post process images and links
for content in page.contents:
if isinstance(content, Image):
page_html = self.replace_image(page_html, content)
return page_html.encode()
def replace_toc_link(self, html, toc) -> str:
pattern = '<a href=\\"/doc/page/.*?ID=\\"{0}\\">'.format(toc.original_id)
replacement = '<a href="{0}.html" ID="{1}">'.format(toc.original_id, toc.original_id)
return re.sub(pattern, replacement, html)
def replace_image(self, html, image: Image):
image_path = re.escape(image.original_path_small)
pattern = '<a href=\\".*\\">\\s*<img src=\\"/{0}.*?\\"'.format(image_path)
replacement = '<a href="{0}"><img src="{1}"'.format(image.original_path_fullsize, image.original_path_small)
return re.sub(pattern, replacement, html)
def switch_maps_to_osm(self, page_html):
return re.sub('\"basemap\":\"stadia\"', '"basemap":"osm"', page_html)
def fix_js_css_links(self, page_html):
page_html = re.sub('<script src=\\"/javascript', '<script src="javascript', page_html)
page_html = re.sub("<script src='/javascript", "<script src='javascript", page_html) # Yes, really. NG is an idiot.
page_html = re.sub('<script type=\\"text/javascript\\" src=\\"/javascript', '<script type="text/javascript" src="javascript', page_html)
page_html = re.sub('<link rel=\\"stylesheet\\" href=\\"/css/', '<link rel="stylesheet" href="css/', page_html)
page_html = re.sub("<link href='/css", "<link href='css", page_html)
return page_html
def replace_multipage_part_links(self, page_html, page: Page):
page_html = re.sub(r'(>>> <A HREF=\")(.*?)(\d+)(\">)', r'\g<1>{0}_\g<3>.html\g<4>'.format(page.original_id), page_html)
page_html = re.sub(r'(<<< <A HREF=\")(.*?)(\">)', r'\g<1>{0}.html\g<3>'.format(page.original_id), page_html)
page_html = re.sub(r'(<<< <A HREF=\")(.*?)(\d+)(\">)', r'\g<1>{0}_\g<3>.html\g<4>'.format(page.original_id), page_html)
return page_html
``` |
{
"source": "john-hen/Subsy",
"score": 3
} |
#### File: Subsy/subsy/timestamp.py
```python
"""Helpers for formatting and parsing time-stamps."""
import re
formats = {
'default', 'hh:mm:ss.ms',
'SubRip', 'srt', '.srt', 'hh:mm:ss,ms',
}
def format(milliseconds, format=None):
(s, ms) = divmod(milliseconds, 1000)
(h, s) = divmod(s, 3600)
(m, s) = divmod(s, 60)
if format in (None, 'default', 'hh:mm:ss.ms'):
return f'{h:02d}:{m:02d}:{s:02d}.{ms:03d}'
elif format in ('SubRip', 'srt', '.srt', 'hh:mm:ss,ms'):
return f'{h:02d}:{m:02d}:{s:02d},{ms:03d}'
else:
raise ValueError(f'Unknown time-stamp format "{format}".')
def parse(text, format=None):
if format in (None, 'default', 'hh:mm:ss.ms'):
match = re.match(r'(\d\d):(\d\d):(\d\d).(\d\d\d)', text)
elif format in ('SubRip', 'srt', '.srt', 'hh:mm:ss,ms'):
match = re.match(r'(\d\d):(\d\d):(\d\d),(\d\d\d)', text)
else:
raise ValueError(f'Unknown time-stamp format "{format}".')
h = int(match.group(1))
m = int(match.group(2))
s = int(match.group(3))
ms = int(match.group(4))
return ((h*60 + m)*60 + s)*1000 + ms
``` |
{
"source": "john-hewitt/conditional-probing",
"score": 2
} |
#### File: conditional-probing/vinfo/dataset.py
```python
import os
import h5py
import torch
import torch.nn as nn
from torch.utils.data import Dataset, IterableDataset, DataLoader
import Levenshtein as levenshtein
from tqdm import tqdm
from yaml import YAMLObject
from transformers import AutoTokenizer, AutoModel
from allennlp.modules.elmo import batch_to_ids
from utils import TRAIN_STR, DEV_STR, TEST_STR, InitYAMLObject
BATCH_SIZE = 50
"""
Classes for loading, caching, and yielding text datasets
"""
#class Dataset(Dataset, InitYAMLObject):
# """
# Base class for objects that serve batches of
# tensors. For decoration/explanation only
# """
# yaml_tag = '!Dataset'
class IterableDatasetWrapper(Dataset):#(IterableDataset):
"""
Wrapper class to pass to a DataLoader so it doesn't
think the underlying generator should have a len() fn.
But I gave up on this for various reasons so it's just
a normal dataset, here in case I try again.
"""
def __init__(self, generator):
self.generator = generator #[x for x in generator]
def __iter__(self):
return iter(self.generator)
def __len__(self):
return len(self.generator)
def __getitem__(self, idx):
return self.generator[idx]
class ListDataset(Dataset, InitYAMLObject):
"""
Container class for collecting multiple annotation or
representation datasets and a single target task dataset
, and serving all of them
"""
yaml_tag = '!ListDataset'
def __init__(self, args, data_loader, output_dataset, input_datasets):
"""
Arguments:
output_datset:
"""
self.args = args
self.input_datasets = input_datasets
self.output_dataset = output_dataset
self.data_loader = data_loader
self.train_data = None
self.dev_data = None
self.test_data = None
def get_train_dataloader(self, shuffle=True):
"""Returns a PyTorch DataLoader object with the training data
"""
if self.train_data is None:
self.train_data = list(self.load_data(TRAIN_STR))
#generator = IterableDatasetWrapper(self.load_data(TRAIN_STR))
generator = IterableDatasetWrapper(self.train_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def get_dev_dataloader(self, shuffle=False):
"""Returns a PyTorch DataLoader object with the dev data
"""
if self.dev_data is None:
self.dev_data = list(self.load_data(DEV_STR))
#generator = IterableDatasetWrapper(self.load_data(DEV_STR))
generator = IterableDatasetWrapper(self.dev_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def get_test_dataloader(self, shuffle=False):
"""Returns a PyTorch DataLoader object with the test data
"""
if self.test_data is None:
self.test_data = list(self.load_data(TEST_STR))
#generator = IterableDatasetWrapper(self.load_data(TEST_STR))
generator = IterableDatasetWrapper(self.test_data)
return DataLoader(generator, batch_size=BATCH_SIZE, shuffle=shuffle, collate_fn=self.collate_fn)
def load_data(self, split_string):
"""Loads data from disk into RAM tensors for passing to a network on GPU
Iterates through the training set once, passing each sentence to each
input Dataset and the output Dataset
"""
for sentence in tqdm(self.data_loader.yield_dataset(split_string),desc='[loading]'):
input_tensors = []
for dataset in self.input_datasets:
input_tensors.append(dataset.tensor_of_sentence(sentence, split_string))
output_tensor = self.output_dataset.tensor_of_sentence(sentence, split_string)
yield (input_tensors, output_tensor, sentence)
def collate_fn(self, observation_list):
"""
Combines observations (input_tensors, output_tensor, sentence) tuples
input_tensors is of the form ((annotation, alignment), ..., (annotation, alignment))
output_tensor is of the form (annotation, alignment),
to batches of observations ((batches_input_1, batches_input_2), batches_output, sentences)
"""
sentences = (x[2] for x in observation_list)
max_corpus_token_len = max((len(x) for x in sentences))
input_annotation_tensors = []
input_alignment_tensors = []
input_tensor_count = len(observation_list[0][0])
for input_tensor_index in range(input_tensor_count):
max_annotation_token_len = max([x[0][input_tensor_index][0].shape[0] for x in observation_list])
intermediate_annotation_list = []
intermediate_alignment_list = []
for input_annotation, input_alignment in ((x[0][input_tensor_index][0],
x[0][input_tensor_index][1]) for x in observation_list):
if len(input_annotation.shape) == 1: # word-level ids
new_annotation_tensor = torch.zeros(max_annotation_token_len, dtype=torch.long)
new_annotation_tensor[:len(input_annotation)] = input_annotation
elif len(input_annotation.shape) == 2: # characeter-level ids
new_annotation_tensor = torch.zeros(max_annotation_token_len, input_annotation.shape[1]).long()
new_annotation_tensor[:len(input_annotation),:] = input_annotation
intermediate_annotation_list.append(new_annotation_tensor)
new_alignment_tensor = torch.zeros(max_annotation_token_len, max_corpus_token_len)
new_alignment_tensor[:input_alignment.shape[0], :input_alignment.shape[1]] = input_alignment
intermediate_alignment_list.append(new_alignment_tensor)
input_annotation_tensors.append(torch.stack(intermediate_annotation_list).to(self.args['device']))
input_alignment_tensors.append(torch.stack(intermediate_alignment_list).to(self.args['device']))
intermediate_annotation_list = []
intermediate_alignment_list = []
max_output_annotation_len = max([x[1][0].shape[0] for x in observation_list])
for output_annotation, output_alignment in (x[1] for x in observation_list):
new_annotation_tensor = torch.zeros(max_output_annotation_len, dtype=torch.long)
new_annotation_tensor[:len(output_annotation)] = output_annotation
intermediate_annotation_list.append(new_annotation_tensor)
output_annotation_tensor = torch.stack(intermediate_annotation_list).to(self.args['device'])
sentences = [x[2] for x in observation_list]
return ((input_annotation_tensors, input_alignment_tensors), output_annotation_tensor, sentences)
class ELMoData(InitYAMLObject):
"""
Loading and serving minibatches of tokens to input to
ELMo, as mediated by allennlp.
"""
yaml_tag = '!ELMoData'
def __init__(self, args):
self.args = args
def tensor_of_sentence(self, sentence, split_string):
"""
Provides character indices for a single sentence.
"""
words = [x[1] for x in sentence]
alignment = torch.eye(len(words))
return batch_to_ids([words])[0,:,:], alignment
#for index, token in enumerate([x[1] for x in sentence]):
class HuggingfaceData(InitYAMLObject):
"""
Loading and serving minibatches of tokens to input
to a Huggingface-loaded model.
"""
yaml_tag = '!HuggingfaceData'
def __init__(self, args, model_string, cache=None):
print('Constructing HuggingfaceData of {}'.format(model_string))
self.tokenizer = AutoTokenizer.from_pretrained(model_string) #, add_prefix_space=True)
self.args = args
self.cache = cache
self.task_name = 'hfacetokens.{}'.format(model_string)
self.cache_is_setup = False
def levenshtein_matrix(self, string1, string2):
opcodes = levenshtein.opcodes(string1, string2)
mtx = torch.zeros(len(string1), len(string2))
cumulative = 0
for opcode in opcodes:
opcode_type, str1b, str1e, str2b, str2e = opcode
if opcode_type in {'equal', 'replace'}:
diff = str1e - str1b
for i in range(diff):
mtx[str1b+i,str2b+i] = 1
if opcode_type == 'delete':
diff = str1e - str1b
for i in range(diff):
mtx[str1b+i, str2b] = 1
if opcode_type == 'insert':
diff = str2e - str2b
for i in range(diff):
mtx[str1b, str2b+i] = 1
return mtx
def token_to_character_alignment(self, tokens):
ptb_sentence_length = sum((len(tok) for tok in tokens))
ptb_string_token_alignment = []
cumulative = 0
for token in tokens:
new_alignment = torch.zeros(ptb_sentence_length)
for i, char in enumerate(token):
if char == ' ':
continue
new_alignment[i+cumulative] = 1
new_alignment = new_alignment / sum(new_alignment)
cumulative += len(token)
ptb_string_token_alignment.append(new_alignment)
return torch.stack(ptb_string_token_alignment)
def de_ptb_tokenize(self, tokens):
tokens_with_spaces = []
new_tokens_with_spaces = []
ptb_sentence_length = sum((len(tok) for tok in tokens))
token_alignments = []
cumulative = 0
for i, _ in enumerate(tokens):
token = tokens[i]
next_token = tokens[i+1] if i < len(tokens)-1 else '<EOS>'
# Handle LaTeX-style quotes
if token.strip() in {"``", "''"}:
new_token = '"'
elif token.strip() == '-LRB-':
new_token = '('
elif token.strip() == '-RRB-':
new_token = ')'
elif token.strip() == '-LSB-':
new_token = '['
elif token.strip() == '-RSB-':
new_token = ']'
elif token.strip() == '-LCB-':
new_token = '{'
elif token.strip() == '-RCB-':
new_token = '}'
else:
new_token = token
use_space = (token.strip() not in {'(', '[', '{', '"', "'", '``', "''"} and
next_token.strip() not in {"'ll", "'re", "'ve", "n't",
"'s", "'LL", "'RE", "'VE",
"N'T", "'S", '"', "'", '``', "''", ')', '}', ']',
'.', ';', ':', '!', '?'}
and i != len(tokens) - 1)
new_token = new_token.strip() + (' ' if use_space else '')
new_tokens_with_spaces.append(new_token)
tokens_with_spaces.append(token)
new_alignment = torch.zeros(ptb_sentence_length)
for index, char in enumerate(token):
new_alignment[index+cumulative] = 1
#new_alignment = new_alignment / sum(new_alignment)
for new_char in new_token:
token_alignments.append(new_alignment)
cumulative += len(token)
return new_tokens_with_spaces, torch.stack(token_alignments)
def hface_ontonotes_alignment(self, sentence):
tokens = [x[1] for x in sentence]
tokens = [ x + (' ' if i !=len(tokens)-1 else '') for (i, x) in enumerate(tokens)]
raw_tokens, ptb_to_deptb_alignment = self.de_ptb_tokenize(tokens)
raw_string = ''.join(raw_tokens)
ptb_token_to_ptb_string_alignment = self.token_to_character_alignment(tokens)
#tokenizer = transformers.AutoTokenizer.from_pretrained('roberta-base')
hface_tokens = self.tokenizer.tokenize(raw_string)
hface_tokens_with_spaces = [x+ (' ' if i != len(hface_tokens)-1 else '')for (i, x) in enumerate(hface_tokens)]
hface_token_to_hface_string_alignment = self.token_to_character_alignment(hface_tokens_with_spaces)
hface_string = ' '.join(hface_tokens)
hface_character_to_deptb_character_alignment = self.levenshtein_matrix(hface_string, raw_string)
unnormalized_alignment = torch.matmul(torch.matmul(hface_token_to_hface_string_alignment.to(self.args['device']), hface_character_to_deptb_character_alignment.to(self.args['device'])),
torch.matmul(ptb_token_to_ptb_string_alignment.to(self.args['device']), ptb_to_deptb_alignment.to(self.args['device']).t()).t())
return (unnormalized_alignment / torch.sum(unnormalized_alignment, dim=0)).cpu(), hface_tokens, raw_string
def _setup_cache(self):
"""
Constructs readers for caches that exist
and writers for caches that do not.
"""
if self.cache is None:
return
if self.cache_is_setup:
return
# Check cache readable/writeable
train_cache_path, train_cache_readable, train_cache_writeable = \
self.cache.get_cache_path_and_check(TRAIN_STR, self.task_name)
dev_cache_path, dev_cache_readable, dev_cache_writeable = \
self.cache.get_cache_path_and_check(DEV_STR, self.task_name)
test_cache_path, test_cache_readable, test_cache_writeable = \
self.cache.get_cache_path_and_check(TEST_STR, self.task_name)
# If any of the train/dev/test are neither readable nor writeable, do not use cache.
if ((not train_cache_readable and not train_cache_writeable) or
(not dev_cache_readable and not dev_cache_writeable) or
(not test_cache_readable and not test_cache_writeable)):
self.cache = None
print("Not using the cache at all, since at least of one "
"of {train,dev,test} cache neither readable nor writable.")
return
# Load readers or writers
self.train_cache_writer = None
self.dev_cache_writer = None
self.test_cache_writer = None
if train_cache_readable:
f = h5py.File(train_cache_path, 'r')
self.train_cache_tokens = (torch.tensor(f[str(i)+'tok'][()]) for i in range(len(f.keys())))
self.train_cache_alignments = (torch.tensor(f[str(i)+'aln'][()]) for i in range(len(f.keys())))
elif train_cache_writeable:
#self.train_cache_writer = h5py.File(train_cache_path, 'w')
self.train_cache_writer = self.cache.get_hdf5_cache_writer(train_cache_path)
self.train_cache_tokens = None
self.train_cache_alignments = None
else:
raise ValueError("Train cache neither readable nor writeable")
if dev_cache_readable:
f2 = h5py.File(dev_cache_path, 'r')
self.dev_cache_tokens = (torch.tensor(f2[str(i)+'tok'][()]) for i in range(len(f2.keys())))
self.dev_cache_alignments = (torch.tensor(f2[str(i)+'aln'][()]) for i in range(len(f2.keys())))
elif dev_cache_writeable:
#self.dev_cache_writer = h5py.File(dev_cache_path, 'w')
self.dev_cache_writer = self.cache.get_hdf5_cache_writer(dev_cache_path)
self.dev_cache_tokens = None
self.dev_cache_alignments = None
else:
raise ValueError("Dev cache neither readable nor writeable")
if test_cache_readable:
f3 = h5py.File(test_cache_path, 'r')
self.test_cache_tokens = (torch.tensor(f3[str(i)+'tok'][()]) for i in range(len(f3.keys())))
self.test_cache_alignments = (torch.tensor(f3[str(i)+'aln'][()]) for i in range(len(f3.keys())))
elif test_cache_writeable:
#self.test_cache_writer = h5py.File(test_cache_path, 'w')
self.test_cache_writer = self.cache.get_hdf5_cache_writer(test_cache_path)
self.test_cache_tokens = None
self.test_cache_alignments = None
else:
raise ValueError("Test cache neither readable nor writeable")
self.cache_is_setup = True
def tensor_of_sentence(self, sentence, split):
self._setup_cache()
if self.cache is None:
labels = self._tensor_of_sentence(sentence, split)
return labels
# Otherwise, either read from or write to cache
if split == TRAIN_STR and self.train_cache_tokens is not None:
return next(self.train_cache_tokens), next(self.train_cache_alignments)
if split == DEV_STR and self.dev_cache_tokens is not None:
return next(self.dev_cache_tokens), next(self.dev_cache_alignments)
if split == TEST_STR and self.test_cache_tokens is not None:
return next(self.test_cache_tokens), next(self.test_cache_alignments)
cache_writer = (self.train_cache_writer if split == TRAIN_STR else (
self.dev_cache_writer if split == DEV_STR else (
self.test_cache_writer if split == TEST_STR else None)))
if cache_writer is None:
raise ValueError("Unknown split: {}".format(split))
wordpiece_indices, alignments = self._tensor_of_sentence(sentence, split)
tok_string_key = str(len(list(filter(lambda x: 'tok' in x, cache_writer.keys())))) + 'tok'
tok_dset = cache_writer.create_dataset(tok_string_key, wordpiece_indices.shape)
tok_dset[:] = wordpiece_indices
aln_string_key = str(len(list(filter(lambda x: 'aln' in x, cache_writer.keys())))) + 'aln'
aln_dset = cache_writer.create_dataset(aln_string_key, alignments.shape)
aln_dset[:] = alignments
return wordpiece_indices, alignments
def _tensor_of_sentence(self, sentence, split):
alignment, wordpiece_strings, raw_string = self.hface_ontonotes_alignment(sentence)
# add [SEP] and [CLS] empty alignments
empty = torch.zeros(1, alignment.shape[1])
alignment = torch.cat((empty, alignment, empty))
#wordpiece_indices = torch.tensor(self.tokenizer(wordpiece_strings)
wordpiece_indices = torch.tensor(self.tokenizer(raw_string).input_ids) #, is_split_into_words=True))
return wordpiece_indices, alignment
def _naive_tensor_of_sentence(self, sentence, split_string):
"""
Converts from a tuple-formatted sentence (e.g, from conll-formatted data)
to a Torch tensor of integers representing subword piece ids for input to
a Huggingface-formatted neural model
"""
# CLS token given by tokenizer
wordpiece_indices = []
wordpiece_alignment_vecs = [torch.zeros(len(sentence))]
# language tokens
for index, token in enumerate([x[1] for x in sentence]):
new_wordpieces = self.tokenizer.tokenize(token)
wordpiece_alignment = torch.zeros(len(sentence))
wordpiece_alignment[index] = 1
for wordpiece in new_wordpieces:
wordpiece_alignment_vecs.append(torch.clone(wordpiece_alignment))
wordpiece_indices.extend(new_wordpieces)
# SEP token given by tokenizer
wordpiece_indices = torch.tensor(self.tokenizer.encode(wordpiece_indices))
wordpiece_alignment_vecs.append(torch.zeros(len(sentence)))
wordpiece_alignment_vecs = torch.stack(wordpiece_alignment_vecs)
return wordpiece_indices, wordpiece_alignment_vecs
class AnnotationData(InitYAMLObject):
"""
Loading and serving minibatches of data from annotations
"""
yaml_tag = '!AnnotationDataset'
def __init__(self, args, task):
self.args = args
self.task = task
#self.task.setup_cache()
def tensor_of_sentence(self, sentence, split_string):
"""
Converts from a tuple-formatted sentence (e.g, from conll-formatted data)
to a Torch tensor of integers representing the annotation
"""
alignment = torch.eye(len(sentence))
return self.task.labels_of_sentence(sentence, split_string), alignment
class Loader(InitYAMLObject):
"""
Base class for objects that read datasets from disk
and yield sentence buffers for tokenization and labeling
Strictly for description
"""
yaml_tag = '!Loader'
class OntonotesReader(Loader):
"""
Minutae for reading the Ontonotes dataset,
as formatted as described in the readme
"""
yaml_tag = '!OntonotesReader'
def __init__(self, args, train_path, dev_path, test_path, cache):
print('Constructing OntoNotesReader')
self.train_path = train_path
self.dev_path = dev_path
self.test_path = test_path
self.cache = cache
@staticmethod
def sentence_lists_of_stream(ontonotes_stream):
"""
Yield sentences from raw ontonotes stream
Arguments:
ontonotes_stream: iterable of ontonotes file lines
Yields:
a buffer for each sentence in the stream; elements
in the buffer are lists defined by TSV fields of the
ontonotes stream
"""
buf = []
for line in ontonotes_stream:
if line.startswith('#'):
continue
if not line.strip():
yield buf
buf = []
else:
buf.append([x.strip() for x in line.split('\t')])
if buf:
yield buf
def yield_dataset(self, split_string):
"""
Yield a list of attribute lines, given by ontonotes_fields,
for each sentence in the training set of ontonotes
"""
path = (self.train_path if split_string == TRAIN_STR else
(self.dev_path if split_string == DEV_STR else
(self.test_path if split_string == TEST_STR else
None)))
if path is None:
raise ValueError("Unknown split string: {}".format(split_string))
with open(path) as fin:
for sentence in OntonotesReader.sentence_lists_of_stream(fin):
yield sentence
class SST2Reader(Loader):
"""
Minutae for reading the Stanford Sentiment (SST-2)
dataset, as downloaded from the GLUE website.
"""
yaml_tag = '!SST2Reader'
def __init__(self, args, train_path, dev_path, test_path, cache):
print('Constructing SST2Reader')
self.train_path = train_path
self.dev_path = dev_path
self.test_path = test_path
self.cache = cache
@staticmethod
def sentence_lists_of_stream(sst2_stream):
"""
Yield sentences from raw sst2 stream
Arguments:
sst2_stream: iterable of sst2_stream lines
Yields:
a buffer for each sentence in the stream;
elements in the buffer are lists defined by TSV
fields of the ontonotes stream
"""
_ = next(sst2_stream) # Get rid of the column labels
for line in sst2_stream:
word_string, label_string = [x.strip() for x in line.split('\t')]
word_tokens = word_string.split(' ')
indices = [str(i) for i, _ in enumerate(word_tokens)]
label_tokens = [label_string for _ in word_tokens]
yield list(zip(indices, word_tokens, label_tokens))
def yield_dataset(self, split_string):
"""
Yield a list of attribute lines, given by ontonotes_fields,
for each sentence in the training set of ontonotes
"""
path = (self.train_path if split_string == TRAIN_STR else
(self.dev_path if split_string == DEV_STR else
(self.test_path if split_string == TEST_STR else
None)))
if path is None:
raise ValueError("Unknown split string: {}".format(split_string))
with open(path) as fin:
for sentence in SST2Reader.sentence_lists_of_stream(fin):
yield sentence
```
#### File: conditional-probing/vinfo/utils.py
```python
from yaml import YAMLObject
IGNORE_LABEL_INDEX = -100
TRAIN_STR = 'train'
DEV_STR = 'dev'
TEST_STR = 'test'
PTB_UNIVERSAL_CONVERSION_STRING = 'ptb_to_upos'
WSD_COARSENING_CONVERSION_STRING = 'wsd_coarse'
def get_results_root(config):
pass
def get_experiment_dir(config):
pass
def get_default_ontonotes_fieldnames():
"""
"""
class InitYAMLObject(YAMLObject):
@classmethod
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
arg_dict = loader.construct_mapping(node, deep=True)
print('Constructing', cls)
return cls(**arg_dict)
# The map as given here is from
# https://raw.githubusercontent.com/slavpetrov/universal-pos-tags/master/en-ptb.map
ptb_to_univ_map = {'!': '.', '#': '.', '$': '.', "''": '.', '(': '.', ')': '.',
',': '.', '-LRB-': '.', '-RRB-': '.', '.': '.', ':': '.',
'?': '.', 'CC': 'CONJ', 'CD': 'NUM', 'CD|RB': 'X', 'DT': 'DET',
'EX': 'DET', 'FW': 'X', 'IN': 'ADP', 'IN|RP': 'ADP', 'JJ': 'ADJ',
'JJR': 'ADJ', 'JJRJR': 'ADJ', 'JJS': 'ADJ', 'JJ|RB': 'ADJ',
'JJ|VBG': 'ADJ', 'LS': 'X', 'MD': 'VERB', 'NN': 'NOUN', 'NNP': 'NOUN',
'NNPS': 'NOUN', 'NNS': 'NOUN', 'NN|NNS': 'NOUN', 'NN|SYM': 'NOUN',
'NN|VBG': 'NOUN', 'NP': 'NOUN', 'PDT': 'DET', 'POS': 'PRT',
'PRP': 'PRON', 'PRP$': 'PRON', 'PRP|VBP': 'PRON', 'PRT': 'PRT',
'RB': 'ADV', 'RBR': 'ADV', 'RBS': 'ADV', 'RB|RP': 'ADV', 'RB|VBG': 'ADV',
'RN': 'X', 'RP': 'PRT', 'SYM': 'X', 'TO': 'PRT', 'UH': 'X',
'VB': 'VERB', 'VBD': 'VERB', 'VBD|VBN': 'VERB', 'VBG': 'VERB',
'VBG|NN': 'VERB', 'VBN': 'VERB', 'VBP': 'VERB', 'VBP|TO': 'VERB',
'VBZ': 'VERB', 'VP': 'VERB', 'WDT': 'DET', 'WH': 'X',
'WP': 'PRON', 'WP$': 'PRON', 'WRB': 'ADV', '``': '.'}
# But it doesn't include all the tags in Ontonotes... so we make decisions about them here.
ptb_to_univ_map['HYPH'] = '.' # Suggested by the behavior of the EWT treebank
ptb_to_univ_map['AFX'] = 'X' # Suggested by the behavior of the EWT treebank
ptb_to_univ_map['XX'] = 'X' # Speech influencies
coarse_wsd_map = {# Tags that are mapped to the ignore tag (counts <10 in train set)
"11.26":"-", "11.4":"-", "12.12":"-", "12.7":"-", "13.8":"-",
"16.10":"-", "16.8":"-", "17.9":"-", "31":"-", "32":"-", "33":"-",
"5.7":"-", "6.2":"-", "6.4":"-", "7.14":"-", "7.19":"-",
"7.24":"-", "7.27":"-", "7.6":"-", "11.10":"-", "11.17":"-",
"11.8":"-", "11.9":"-", "13.17":"-", "16.11":"-", "16.9":"-",
"25":"-", "5.11":"-", "5.9":"-", "7.12":"-", "7.16":"-",
"7.17":"-", "7.26":"-", "7.31":"-", "7.5":"-", "11.15":"-",
"11.19":"-", "11.24":"-", "11.31":"-", "12.13":"-", "13.1":"-",
"13.21":"-", "13.6":"-", "17.6":"-", "29":"-", "5.12":"-",
"7.32":"-", "7.7":"-", "18":"-", "23":"-", "6.1":"-",
"7.13":"-", "7.21":"-", "7.29":"-", "12.6":"-", "13.2":"-",
"24":"-", "5.2":"-", "5.4":"-", "7.11":"-", "7.8":"-",
"8.12":"-", "11.12":"-", "11.32":"-", "16.2":"-", "16.3":"-",
"5.5":"-", "7.9":"-", "11.38":"-", "11.7":"-", "12.8":"-",
"11.20":"-", "11.33":"-", "17":"-", "11.13":"-", "11.23":"-",
"11.6":"-", "13.9":"-",
# In dev or test (didn't check which) but not in train
"12.1":"-",
"17.1":"-",
"5.1":"-",
"7.10":"-",
"7.18":"-",
# Tags that are kept as-is (counts >=10 in train set)
"11.5":"11.5", "26":"26", "11.1":"11.1", "16.1":"16.1", "16.5":"16.5",
"7.15":"7.15", "7.28":"7.28", "13.5":"13.5", "7.3":"7.3", "20":"20",
"21":"21", "19":"19", "11.3":"11.3", "16.4":"16.4", "5.8":"5.8",
"5.6":"5.6", "13.4":"13.4", "11.37":"11.37", "7.4":"7.4", "7.1":"7.1",
"7.2":"7.2", "16":"16", "11.2":"11.2", "14.1":"14.1", "13":"13",
"15":"15", "14":"14", "11":"11", "10":"10", "9":"9",
"8":"8", "12":"12", "7":"7", "6":"6", "5":"5",
"4":"4", "3":"3", "2":"2", "1":"1", "-":"-"
}
def get_conversion_dict(conversion_name):
"""Retrieves a hard-coded label conversion dictionary.
When coarsening the label set of a task based on a predefined
conversion scheme like Penn Treebank tags to Universal PoS tags,
this function provides the map, out of a fixed list of known
maps addressed by a keyword string.
"""
if conversion_name == PTB_UNIVERSAL_CONVERSION_STRING:
return ptb_to_univ_map
elif conversion_name == WSD_COARSENING_CONVERSION_STRING:
return coarse_wsd_map
else:
raise ValueError("Unknown conversion name: {}".format(conversion_name))
``` |
{
"source": "JohnHillegass/celpy",
"score": 3
} |
#### File: celpy/parser/parser.py
```python
import os,sys,inspect,collections
cwd = os.getcwd()
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
os.chdir(currentdir)
import _parser
os.chdir(cwd)
# to use this code in your end-user python file, import it as follows:
# from parser import parser
# and then refer to everything using parser. prefix
# packages imported by this package listed below:
import go
# ---- Types ---
# Python type for slice []*expr.Expr
class Slice_Ptr_expr_Expr(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_parser.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_parser.IncRef(self.handle)
else:
self.handle = _parser.Slice_Ptr_expr_Expr_CTor()
_parser.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Iterable):
raise TypeError('Slice_Ptr_expr_Expr.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_parser.DecRef(self.handle)
def __str__(self):
s = 'parser.Slice_Ptr_expr_Expr len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'parser.Slice_Ptr_expr_Expr([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _parser.Slice_Ptr_expr_Expr_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return go.Ptr_expr_Expr(handle=_parser.Slice_Ptr_expr_Expr_elem(self.handle, key))
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_parser.Slice_Ptr_expr_Expr_set(self.handle, idx, value.handle)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, collections.Iterable):
raise TypeError('Slice_Ptr_expr_Expr.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _parser.Slice_Ptr_expr_Expr_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_parser.Slice_Ptr_expr_Expr_append(self.handle, value.handle)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []*expr.Expr_CreateStruct_Entry
class Slice_Ptr_expr_Expr_CreateStruct_Entry(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_parser.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_parser.IncRef(self.handle)
else:
self.handle = _parser.Slice_Ptr_expr_Expr_CreateStruct_Entry_CTor()
_parser.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Iterable):
raise TypeError('Slice_Ptr_expr_Expr_CreateStruct_Entry.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_parser.DecRef(self.handle)
def __str__(self):
s = 'parser.Slice_Ptr_expr_Expr_CreateStruct_Entry len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'parser.Slice_Ptr_expr_Expr_CreateStruct_Entry([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _parser.Slice_Ptr_expr_Expr_CreateStruct_Entry_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return go.Ptr_expr_Expr_CreateStruct_Entry(handle=_parser.Slice_Ptr_expr_Expr_CreateStruct_Entry_elem(self.handle, key))
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_parser.Slice_Ptr_expr_Expr_CreateStruct_Entry_set(self.handle, idx, value.handle)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, collections.Iterable):
raise TypeError('Slice_Ptr_expr_Expr_CreateStruct_Entry.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _parser.Slice_Ptr_expr_Expr_CreateStruct_Entry_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_parser.Slice_Ptr_expr_Expr_CreateStruct_Entry_append(self.handle, value.handle)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []parser.Macro
class Slice_parser_Macro(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_parser.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_parser.IncRef(self.handle)
else:
self.handle = _parser.Slice_parser_Macro_CTor()
_parser.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Iterable):
raise TypeError('Slice_parser_Macro.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_parser.DecRef(self.handle)
def __str__(self):
s = 'parser.Slice_parser_Macro len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'parser.Slice_parser_Macro([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _parser.Slice_parser_Macro_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return Macro(handle=_parser.Slice_parser_Macro_elem(self.handle, key))
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_parser.Slice_parser_Macro_set(self.handle, idx, value.handle)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, collections.Iterable):
raise TypeError('Slice_parser_Macro.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _parser.Slice_parser_Macro_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_parser.Slice_parser_Macro_append(self.handle, value.handle)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for map map[int64]*expr.Expr
class Map_int64_Ptr_expr_Expr(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_parser.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_parser.IncRef(self.handle)
else:
self.handle = _parser.Map_int64_Ptr_expr_Expr_CTor()
_parser.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Mapping):
raise TypeError('Map_int64_Ptr_expr_Expr.__init__ takes a mapping as argument')
for k, v in args[0].items():
_parser.Map_int64_Ptr_expr_Expr_set(self.handle, k, v)
def __del__(self):
_parser.DecRef(self.handle)
def __str__(self):
s = 'parser.Map_int64_Ptr_expr_Expr len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' {'
if len(self) < 120:
for k, v in self.items():
s += str(k) + '=' + str(v) + ', '
return s + '}'
def __repr__(self):
s = 'parser.Map_int64_Ptr_expr_Expr({'
for k, v in self.items():
s += str(k) + '=' + str(v) + ', '
return s + '})'
def __len__(self):
return _parser.Map_int64_Ptr_expr_Expr_len(self.handle)
def __getitem__(self, key):
return go.Ptr_expr_Expr(handle=_parser.Map_int64_Ptr_expr_Expr_elem(self.handle, key))
def __setitem__(self, key, value):
_parser.Map_int64_Ptr_expr_Expr_set(self.handle, key, value.handle)
def __delitem__(self, key):
return _parser.Map_int64_Ptr_expr_Expr_delete(self.handle, key)
def keys(self):
return go.Slice_int64(handle=_parser.Map_int64_Ptr_expr_Expr_keys(self.handle))
def values(self):
vls = []
kys = self.keys()
for k in kys:
vls.append(self[k])
return vls
def items(self):
vls = []
kys = self.keys()
for k in kys:
vls.append((k, self[k]))
return vls
def __iter__(self):
return iter(self.items())
def __contains__(self, key):
return _parser.Map_int64_Ptr_expr_Expr_contains(self.handle, key)
# Python type for map map[int64]int32
class Map_int64_int32(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_parser.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_parser.IncRef(self.handle)
else:
self.handle = _parser.Map_int64_int32_CTor()
_parser.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Mapping):
raise TypeError('Map_int64_int32.__init__ takes a mapping as argument')
for k, v in args[0].items():
_parser.Map_int64_int32_set(self.handle, k, v)
def __del__(self):
_parser.DecRef(self.handle)
def __str__(self):
s = 'parser.Map_int64_int32 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' {'
if len(self) < 120:
for k, v in self.items():
s += str(k) + '=' + str(v) + ', '
return s + '}'
def __repr__(self):
s = 'parser.Map_int64_int32({'
for k, v in self.items():
s += str(k) + '=' + str(v) + ', '
return s + '})'
def __len__(self):
return _parser.Map_int64_int32_len(self.handle)
def __getitem__(self, key):
return _parser.Map_int64_int32_elem(self.handle, key)
def __setitem__(self, key, value):
_parser.Map_int64_int32_set(self.handle, key, value)
def __delitem__(self, key):
return _parser.Map_int64_int32_delete(self.handle, key)
def keys(self):
return go.Slice_int64(handle=_parser.Map_int64_int32_keys(self.handle))
def values(self):
vls = []
kys = self.keys()
for k in kys:
vls.append(self[k])
return vls
def items(self):
vls = []
kys = self.keys()
for k in kys:
vls.append((k, self[k]))
return vls
def __iter__(self):
return iter(self.items())
def __contains__(self, key):
return _parser.Map_int64_int32_contains(self.handle, key)
#---- Constants from Go: Python can only ask that you please don't change these! ---
AccumulatorName = "__result__"
# ---- Global Variables: can only use functions to access ---
def AllMacros():
"""
AllMacros Gets Go Variable: parser.AllMacros
"""
return Slice_parser_Macro(handle=_parser.parser_AllMacros())
def Set_AllMacros(value):
"""
Set_AllMacros Sets Go Variable: parser.AllMacros
"""
if isinstance(value, go.GoClass):
_parser.parser_Set_AllMacros(value.handle)
else:
_parser.parser_Set_AllMacros(value)
def NoMacros():
"""
NoMacros Gets Go Variable: parser.NoMacros
"""
return Slice_parser_Macro(handle=_parser.parser_NoMacros())
def Set_NoMacros(value):
"""
Set_NoMacros Sets Go Variable: parser.NoMacros
"""
if isinstance(value, go.GoClass):
_parser.parser_Set_NoMacros(value.handle)
else:
_parser.parser_Set_NoMacros(value)
# ---- Interfaces ---
# Python type for interface parser.ExprHelper
class ExprHelper(go.GoClass):
"""ExprHelper assists with the manipulation of proto-based Expr values in a manner which is\nconsistent with the source position and expression id generation code leveraged by both\nthe parser and type-checker.\n"""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
"""
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_parser.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_parser.IncRef(self.handle)
else:
self.handle = 0
def Fold(self, iterVar, iterRange, accuVar, accuInit, condition, step, result):
"""Fold(str iterVar, object iterRange, str accuVar, object accuInit, object condition, object step, object result) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_Fold(self.handle, iterVar, iterRange.handle, accuVar, accuInit.handle, condition.handle, step.handle, result.handle))
def Ident(self, name):
"""Ident(str name) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_Ident(self.handle, name))
def LiteralBool(self, value):
"""LiteralBool(bool value) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_LiteralBool(self.handle, value))
def LiteralBytes(self, value):
"""LiteralBytes([]int value) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_LiteralBytes(self.handle, value.handle))
def LiteralDouble(self, value):
"""LiteralDouble(float value) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_LiteralDouble(self.handle, value))
def LiteralInt(self, value):
"""LiteralInt(long value) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_LiteralInt(self.handle, value))
def LiteralString(self, value):
"""LiteralString(str value) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_LiteralString(self.handle, value))
def LiteralUint(self, value):
"""LiteralUint(long value) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_LiteralUint(self.handle, value))
def NewMapEntry(self, key, val):
"""NewMapEntry(object key, object val) object"""
return go.Ptr_expr_Expr_CreateStruct_Entry(handle=_parser.parser_ExprHelper_NewMapEntry(self.handle, key.handle, val.handle))
def NewObjectFieldInit(self, field, init):
"""NewObjectFieldInit(str field, object init) object"""
return go.Ptr_expr_Expr_CreateStruct_Entry(handle=_parser.parser_ExprHelper_NewObjectFieldInit(self.handle, field, init.handle))
def OffsetLocation(self, exprID):
"""OffsetLocation(long exprID) object"""
return go.common_Location(handle=_parser.parser_ExprHelper_OffsetLocation(self.handle, exprID))
def PresenceTest(self, operand, field):
"""PresenceTest(object operand, str field) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_PresenceTest(self.handle, operand.handle, field))
def Select(self, operand, field):
"""Select(object operand, str field) object"""
return go.Ptr_expr_Expr(handle=_parser.parser_ExprHelper_Select(self.handle, operand.handle, field))
# Python type for interface parser.Macro
class Macro(go.GoClass):
"""Macro interface for describing the function signature to match and the MacroExpander to apply.\n\nNote: when a Macro should apply to multiple overloads (based on arg count) of a given function,\na Macro should be created per arg-count.\n"""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
"""
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_parser.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_parser.IncRef(self.handle)
else:
self.handle = 0
def ArgCount(self):
"""ArgCount() int"""
return _parser.parser_Macro_ArgCount(self.handle)
def Function(self):
"""Function() str"""
return _parser.parser_Macro_Function(self.handle)
def IsReceiverStyle(self):
"""IsReceiverStyle() bool"""
return _parser.parser_Macro_IsReceiverStyle(self.handle)
def MacroKey(self):
"""MacroKey() str"""
return _parser.parser_Macro_MacroKey(self.handle)
# ---- Structs ---
# ---- Slices ---
# ---- Maps ---
# ---- Constructors ---
# ---- Functions ---
def Unparse(expr, info):
"""Unparse(object expr, object info) str, str
Unparse takes an input expression and source position information and generates a human-readable
expression.
Note, unparsing an AST will often generate the same expression as was originally parsed, but some
formatting may be lost in translation, notably:
- All quoted literals are doubled quoted.
- Byte literals are represented as octal escapes (same as Google SQL).
- Floating point values are converted to the small number of digits needed to represent the value.
- Spacing around punctuation marks may be lost.
- Parentheses will only be applied when they affect operator precedence.
"""
return _parser.parser_Unparse(expr.handle, info.handle)
```
#### File: celpy/test/test.py
```python
import os,sys,inspect,collections
cwd = os.getcwd()
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
os.chdir(currentdir)
import _test
os.chdir(cwd)
# to use this code in your end-user python file, import it as follows:
# from test import test
# and then refer to everything using test. prefix
# packages imported by this package listed below:
import go
# ---- Types ---
# Python type for slice []*expr.Expr
class Slice_Ptr_expr_Expr(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_test.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_test.IncRef(self.handle)
else:
self.handle = _test.Slice_Ptr_expr_Expr_CTor()
_test.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Iterable):
raise TypeError('Slice_Ptr_expr_Expr.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_test.DecRef(self.handle)
def __str__(self):
s = 'test.Slice_Ptr_expr_Expr len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'test.Slice_Ptr_expr_Expr([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _test.Slice_Ptr_expr_Expr_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return go.Ptr_expr_Expr(handle=_test.Slice_Ptr_expr_Expr_elem(self.handle, key))
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_test.Slice_Ptr_expr_Expr_set(self.handle, idx, value.handle)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, collections.Iterable):
raise TypeError('Slice_Ptr_expr_Expr.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _test.Slice_Ptr_expr_Expr_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_test.Slice_Ptr_expr_Expr_append(self.handle, value.handle)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for slice []*expr.Expr_CreateStruct_Entry
class Slice_Ptr_expr_Expr_CreateStruct_Entry(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_test.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_test.IncRef(self.handle)
else:
self.handle = _test.Slice_Ptr_expr_Expr_CreateStruct_Entry_CTor()
_test.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Iterable):
raise TypeError('Slice_Ptr_expr_Expr_CreateStruct_Entry.__init__ takes a sequence as argument')
for elt in args[0]:
self.append(elt)
def __del__(self):
_test.DecRef(self.handle)
def __str__(self):
s = 'test.Slice_Ptr_expr_Expr_CreateStruct_Entry len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' ['
if len(self) < 120:
s += ', '.join(map(str, self)) + ']'
return s
def __repr__(self):
return 'test.Slice_Ptr_expr_Expr_CreateStruct_Entry([' + ', '.join(map(str, self)) + '])'
def __len__(self):
return _test.Slice_Ptr_expr_Expr_CreateStruct_Entry_len(self.handle)
def __getitem__(self, key):
if isinstance(key, slice):
return [self[ii] for ii in range(*key.indices(len(self)))]
elif isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError('slice index out of range')
return go.Ptr_expr_Expr_CreateStruct_Entry(handle=_test.Slice_Ptr_expr_Expr_CreateStruct_Entry_elem(self.handle, key))
else:
raise TypeError('slice index invalid type')
def __setitem__(self, idx, value):
if idx < 0:
idx += len(self)
if idx < len(self):
_test.Slice_Ptr_expr_Expr_CreateStruct_Entry_set(self.handle, idx, value.handle)
return
raise IndexError('slice index out of range')
def __iadd__(self, value):
if not isinstance(value, collections.Iterable):
raise TypeError('Slice_Ptr_expr_Expr_CreateStruct_Entry.__iadd__ takes a sequence as argument')
for elt in value:
self.append(elt)
return self
def __iter__(self):
self.index = 0
return self
def __next__(self):
if self.index < len(self):
rv = _test.Slice_Ptr_expr_Expr_CreateStruct_Entry_elem(self.handle, self.index)
self.index = self.index + 1
return rv
raise StopIteration
def append(self, value):
_test.Slice_Ptr_expr_Expr_CreateStruct_Entry_append(self.handle, value.handle)
def copy(self, src):
""" copy emulates the go copy function, copying elements into this list from source list, up to min of size of each list """
mx = min(len(self), len(src))
for i in range(mx):
self[i] = src[i]
# Python type for map map[int64]*expr.Expr
class Map_int64_Ptr_expr_Expr(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_test.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_test.IncRef(self.handle)
else:
self.handle = _test.Map_int64_Ptr_expr_Expr_CTor()
_test.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Mapping):
raise TypeError('Map_int64_Ptr_expr_Expr.__init__ takes a mapping as argument')
for k, v in args[0].items():
_test.Map_int64_Ptr_expr_Expr_set(self.handle, k, v)
def __del__(self):
_test.DecRef(self.handle)
def __str__(self):
s = 'test.Map_int64_Ptr_expr_Expr len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' {'
if len(self) < 120:
for k, v in self.items():
s += str(k) + '=' + str(v) + ', '
return s + '}'
def __repr__(self):
s = 'test.Map_int64_Ptr_expr_Expr({'
for k, v in self.items():
s += str(k) + '=' + str(v) + ', '
return s + '})'
def __len__(self):
return _test.Map_int64_Ptr_expr_Expr_len(self.handle)
def __getitem__(self, key):
return go.Ptr_expr_Expr(handle=_test.Map_int64_Ptr_expr_Expr_elem(self.handle, key))
def __setitem__(self, key, value):
_test.Map_int64_Ptr_expr_Expr_set(self.handle, key, value.handle)
def __delitem__(self, key):
return _test.Map_int64_Ptr_expr_Expr_delete(self.handle, key)
def keys(self):
return go.Slice_int64(handle=_test.Map_int64_Ptr_expr_Expr_keys(self.handle))
def values(self):
vls = []
kys = self.keys()
for k in kys:
vls.append(self[k])
return vls
def items(self):
vls = []
kys = self.keys()
for k in kys:
vls.append((k, self[k]))
return vls
def __iter__(self):
return iter(self.items())
def __contains__(self, key):
return _test.Map_int64_Ptr_expr_Expr_contains(self.handle, key)
# Python type for map map[int64]int32
class Map_int64_int32(go.GoClass):
""""""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameter is a python list that we copy from
"""
self.index = 0
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_test.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_test.IncRef(self.handle)
else:
self.handle = _test.Map_int64_int32_CTor()
_test.IncRef(self.handle)
if len(args) > 0:
if not isinstance(args[0], collections.Mapping):
raise TypeError('Map_int64_int32.__init__ takes a mapping as argument')
for k, v in args[0].items():
_test.Map_int64_int32_set(self.handle, k, v)
def __del__(self):
_test.DecRef(self.handle)
def __str__(self):
s = 'test.Map_int64_int32 len: ' + str(len(self)) + ' handle: ' + str(self.handle) + ' {'
if len(self) < 120:
for k, v in self.items():
s += str(k) + '=' + str(v) + ', '
return s + '}'
def __repr__(self):
s = 'test.Map_int64_int32({'
for k, v in self.items():
s += str(k) + '=' + str(v) + ', '
return s + '})'
def __len__(self):
return _test.Map_int64_int32_len(self.handle)
def __getitem__(self, key):
return _test.Map_int64_int32_elem(self.handle, key)
def __setitem__(self, key, value):
_test.Map_int64_int32_set(self.handle, key, value)
def __delitem__(self, key):
return _test.Map_int64_int32_delete(self.handle, key)
def keys(self):
return go.Slice_int64(handle=_test.Map_int64_int32_keys(self.handle))
def values(self):
vls = []
kys = self.keys()
for k in kys:
vls.append(self[k])
return vls
def items(self):
vls = []
kys = self.keys()
for k in kys:
vls.append((k, self[k]))
return vls
def __iter__(self):
return iter(self.items())
def __contains__(self, key):
return _test.Map_int64_int32_contains(self.handle, key)
#---- Constants from Go: Python can only ask that you please don't change these! ---
# ---- Global Variables: can only use functions to access ---
def Conditional():
"""
Conditional Gets Go Variable: test.Conditional
"""
return TestExpr(handle=_test.test_Conditional())
def Set_Conditional(value):
"""
Set_Conditional Sets Go Variable: test.Conditional
"""
if isinstance(value, go.GoClass):
_test.test_Set_Conditional(value.handle)
else:
_test.test_Set_Conditional(value)
def DynMap():
"""
DynMap Gets Go Variable: test.DynMap
"""
return TestExpr(handle=_test.test_DynMap())
def Set_DynMap(value):
"""
Set_DynMap Sets Go Variable: test.DynMap
"""
if isinstance(value, go.GoClass):
_test.test_Set_DynMap(value.handle)
else:
_test.test_Set_DynMap(value)
def Empty():
"""
Empty Gets Go Variable: test.Empty
"""
return TestExpr(handle=_test.test_Empty())
def Set_Empty(value):
"""
Set_Empty Sets Go Variable: test.Empty
"""
if isinstance(value, go.GoClass):
_test.test_Set_Empty(value.handle)
else:
_test.test_Set_Empty(value)
def Equality():
"""
Equality Gets Go Variable: test.Equality
"""
return TestExpr(handle=_test.test_Equality())
def Set_Equality(value):
"""
Set_Equality Sets Go Variable: test.Equality
"""
if isinstance(value, go.GoClass):
_test.test_Set_Equality(value.handle)
else:
_test.test_Set_Equality(value)
def Exists():
"""
Exists Gets Go Variable: test.Exists
"""
return TestExpr(handle=_test.test_Exists())
def Set_Exists(value):
"""
Set_Exists Sets Go Variable: test.Exists
"""
if isinstance(value, go.GoClass):
_test.test_Set_Exists(value.handle)
else:
_test.test_Set_Exists(value)
def ExistsWithInput():
"""
ExistsWithInput Gets Go Variable: test.ExistsWithInput
"""
return TestExpr(handle=_test.test_ExistsWithInput())
def Set_ExistsWithInput(value):
"""
Set_ExistsWithInput Sets Go Variable: test.ExistsWithInput
"""
if isinstance(value, go.GoClass):
_test.test_Set_ExistsWithInput(value.handle)
else:
_test.test_Set_ExistsWithInput(value)
def LogicalAnd():
"""
LogicalAnd Gets Go Variable: test.LogicalAnd
"""
return TestExpr(handle=_test.test_LogicalAnd())
def Set_LogicalAnd(value):
"""
Set_LogicalAnd Sets Go Variable: test.LogicalAnd
"""
if isinstance(value, go.GoClass):
_test.test_Set_LogicalAnd(value.handle)
else:
_test.test_Set_LogicalAnd(value)
def LogicalAndMissingType():
"""
LogicalAndMissingType Gets Go Variable: test.LogicalAndMissingType
"""
return TestExpr(handle=_test.test_LogicalAndMissingType())
def Set_LogicalAndMissingType(value):
"""
Set_LogicalAndMissingType Sets Go Variable: test.LogicalAndMissingType
"""
if isinstance(value, go.GoClass):
_test.test_Set_LogicalAndMissingType(value.handle)
else:
_test.test_Set_LogicalAndMissingType(value)
def LogicalOr():
"""
LogicalOr Gets Go Variable: test.LogicalOr
"""
return TestExpr(handle=_test.test_LogicalOr())
def Set_LogicalOr(value):
"""
Set_LogicalOr Sets Go Variable: test.LogicalOr
"""
if isinstance(value, go.GoClass):
_test.test_Set_LogicalOr(value.handle)
else:
_test.test_Set_LogicalOr(value)
def LogicalOrEquals():
"""
LogicalOrEquals Gets Go Variable: test.LogicalOrEquals
"""
return TestExpr(handle=_test.test_LogicalOrEquals())
def Set_LogicalOrEquals(value):
"""
Set_LogicalOrEquals Sets Go Variable: test.LogicalOrEquals
"""
if isinstance(value, go.GoClass):
_test.test_Set_LogicalOrEquals(value.handle)
else:
_test.test_Set_LogicalOrEquals(value)
def Select():
"""
Select Gets Go Variable: test.Select
"""
return TestExpr(handle=_test.test_Select())
def Set_Select(value):
"""
Set_Select Sets Go Variable: test.Select
"""
if isinstance(value, go.GoClass):
_test.test_Set_Select(value.handle)
else:
_test.test_Set_Select(value)
def TypeEquality():
"""
TypeEquality Gets Go Variable: test.TypeEquality
"""
return TestExpr(handle=_test.test_TypeEquality())
def Set_TypeEquality(value):
"""
Set_TypeEquality Sets Go Variable: test.TypeEquality
"""
if isinstance(value, go.GoClass):
_test.test_Set_TypeEquality(value.handle)
else:
_test.test_Set_TypeEquality(value)
# ---- Interfaces ---
# ---- Structs ---
# Python type for struct test.TestExpr
class TestExpr(go.GoClass):
"""TestExpr packages an Expr with SourceInfo, for testing.\n"""
def __init__(self, *args, **kwargs):
"""
handle=A Go-side object is always initialized with an explicit handle=arg
otherwise parameters can be unnamed in order of field names or named fields
in which case a new Go object is constructed first
"""
if len(kwargs) == 1 and 'handle' in kwargs:
self.handle = kwargs['handle']
_test.IncRef(self.handle)
elif len(args) == 1 and isinstance(args[0], go.GoClass):
self.handle = args[0].handle
_test.IncRef(self.handle)
else:
self.handle = _test.test_TestExpr_CTor()
_test.IncRef(self.handle)
if 0 < len(args):
self.Expr = args[0]
if "Expr" in kwargs:
self.Expr = kwargs["Expr"]
if 1 < len(args):
self.SourceInfo = args[1]
if "SourceInfo" in kwargs:
self.SourceInfo = kwargs["SourceInfo"]
def __del__(self):
_test.DecRef(self.handle)
def __str__(self):
pr = [(p, getattr(self, p)) for p in dir(self) if not p.startswith('__')]
sv = 'test.TestExpr{'
first = True
for v in pr:
if callable(v[1]):
continue
if first:
first = False
else:
sv += ', '
sv += v[0] + '=' + str(v[1])
return sv + '}'
def __repr__(self):
pr = [(p, getattr(self, p)) for p in dir(self) if not p.startswith('__')]
sv = 'test.TestExpr ( '
for v in pr:
if not callable(v[1]):
sv += v[0] + '=' + str(v[1]) + ', '
return sv + ')'
@property
def Expr(self):
return go.Ptr_expr_Expr(handle=_test.test_TestExpr_Expr_Get(self.handle))
@Expr.setter
def Expr(self, value):
if isinstance(value, go.GoClass):
_test.test_TestExpr_Expr_Set(self.handle, value.handle)
else:
raise TypeError("supplied argument type {t} is not a go.GoClass".format(t=type(value)))
@property
def SourceInfo(self):
return go.Ptr_expr_SourceInfo(handle=_test.test_TestExpr_SourceInfo_Get(self.handle))
@SourceInfo.setter
def SourceInfo(self, value):
if isinstance(value, go.GoClass):
_test.test_TestExpr_SourceInfo_Set(self.handle, value.handle)
else:
raise TypeError("supplied argument type {t} is not a go.GoClass".format(t=type(value)))
def Info(self, location):
"""Info(str location) object
Info returns a copy of the SourceInfo with the given location.
"""
return go.Ptr_expr_SourceInfo(handle=_test.test_TestExpr_Info(self.handle, location))
# ---- Slices ---
# ---- Maps ---
# ---- Constructors ---
# ---- Functions ---
def Compare(a, e):
"""Compare(str a, str e) bool
Compare compares two strings, a for actual, e for expected, and returns true or false. The comparison is done,
by filtering out whitespace (i.e. space, tabs and newline).
"""
return _test.test_Compare(a, e)
def DiffMessage(context, actual, expected):
"""DiffMessage(str context, str actual, str expected) str
DiffMessage creates a diff dump message for test failures.
"""
return _test.test_DiffMessage(context, actual, expected)
def ExprComprehension(id, iterVar, iterRange, accuVar, accuInit, loopCondition, loopStep, resultExpr):
"""ExprComprehension(long id, str iterVar, object iterRange, str accuVar, object accuInit, object loopCondition, object loopStep, object resultExpr) object
ExprComprehension returns a comprehension Expr.
"""
return go.Ptr_expr_Expr(handle=_test.test_ExprComprehension(id, iterVar, iterRange.handle, accuVar, accuInit.handle, loopCondition.handle, loopStep.handle, resultExpr.handle))
def ExprEntry(id, key, value):
"""ExprEntry(long id, object key, object value) object
ExprEntry creates a map entry for a create struct Expr.
"""
return go.Ptr_expr_Expr_CreateStruct_Entry(handle=_test.test_ExprEntry(id, key.handle, value.handle))
def ExprField(id, field, value):
"""ExprField(long id, str field, object value) object
ExprField creates a field entry for a create struct Expr.
"""
return go.Ptr_expr_Expr_CreateStruct_Entry(handle=_test.test_ExprField(id, field, value.handle))
def ExprIdent(id, name):
"""ExprIdent(long id, str name) object
ExprIdent creates an ident (variable) Expr.
"""
return go.Ptr_expr_Expr(handle=_test.test_ExprIdent(id, name))
def ExprLiteral(id, value):
"""ExprLiteral(long id, str value) object
ExprLiteral creates a literal (constant) Expr.
"""
return go.Ptr_expr_Expr(handle=_test.test_ExprLiteral(id, value))
def ExprSelect(id, operand, field):
"""ExprSelect(long id, object operand, str field) object
ExprSelect creates a select Expr.
"""
return go.Ptr_expr_Expr(handle=_test.test_ExprSelect(id, operand.handle, field))
``` |
{
"source": "johnhiott/dotfiles-1",
"score": 2
} |
#### File: dotfiles-1/mutt/offlineimaphelpers.py
```python
import os
import re
import subprocess
import sys
def get_env(var="EP"):
return os.environ.get(var)
def get_keychain_pass(account=None, server=None):
if not sys.platform == 'darwin':
return get_env()
home = os.environ.get('HOME')
user = os.environ.get('USER')
params = {
'security': '/usr/bin/security',
'command': 'find-internet-password',
'account': account,
'server': server,
'keychain': home + '/Library/Keychains/login.keychain',
'user': user,
}
command = "sudo -u %(user)s %(security)s -v %(command)s -g -a %(account)s -s %(server)s %(keychain)s" % params
output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
outtext = [l for l in output.splitlines()
if l.startswith('password: ')][0]
return re.match(r'password: "(.*)"', outtext).group(1)
``` |
{
"source": "johnhkchen/python-basics",
"score": 3
} |
#### File: python-basics/scripts/simple_listener.py
```python
import json
import asyncio
import websockets
def project_request(project_id):
return json.dumps({"type": "project", "project_id": project_id})
async def consumer(message):
if json.loads(message)["type"] == "project":
print("< " + message)
async def listener():
uri = "ws://localhost:8765"
async with websockets.connect(uri) as websocket:
async for message in websocket:
await consumer(message)
responses = asyncio.run(listener())
```
#### File: src/ws_client/client.py
```python
import json
import logging
import websockets
class Client:
version = "0.3.0"
async def connect(self, uri):
try:
async with websockets.connect(uri) as websocket:
self._websocket = websocket
except ConnectionRefusedError:
logging.warning("No connection available.")
def project_request(self, project_id):
return json.dumps({"type": "project", "project_id": project_id})
async def connect_and_request_project(self, uri, project_id):
async with websockets.connect(uri) as websocket:
await websocket.send(self.project_request(project_id))
response = await websocket.recv()
return response
```
#### File: src/ws_server/server.py
```python
import json
import asyncio
import websockets
class Server:
version = "0.3.0"
def __init__(self):
self.connections = set()
self.project_id = None
def num_connections(self):
return len(self.connections)
def connections_event(self):
return json.dumps({"type": "connections", "count": len(self.connections)})
def project_event(self):
return json.dumps({"type": "project", "project_id": self.project_id})
async def notify_connections(self):
if self.connections:
for client in self.connections:
await client.send(self.connections_event())
async def notify_project(self):
if self.project_id:
for client in self.connections:
await client.send(self.project_event())
async def register(self, client):
self.connections.add(client)
await self.notify_connections()
async def unregister(self, client):
self.connections.remove(client)
await self.notify_connections()
async def request_project(self, project_id):
self.project_id = project_id
await self.notify_project()
async def project_request_service(self, websocket, path):
# This is the main server process
await self.register(websocket)
try:
pass
finally:
await self.unregister(websocket)
async def start_server(self, port=6789):
server = await websockets.serve(self.project_request_service, "localhost", port)
return server
async def main():
server = Server()
controller = await server.start_server()
asyncio.get_event_loop().run_forever()
return controller
``` |
{
"source": "johnhkelley1/337-recipe",
"score": 3
} |
#### File: Team1/modules/steps.py
```python
from bs4 import BeautifulSoup
import nltk
from nltk.tokenize.treebank import TreebankWordTokenizer
import re
word_tokenize = nltk.tokenize.TreebankWordTokenizer().tokenize
def get(recipe,recipe_obj):
steps = []
stepsParsed = []
for i in recipe.findAll("span",class_="recipe-directions__list--item"):
steps.append(i.text)
alen = len(steps)
for i in range(alen - 1):
words = word_tokenize(steps[i])
words = [word.lower() for word in words]
steps[i] = steps[i].lower()
for item in recipe_obj:
if item == "none":
item = []
stepsParsed.append({"step_num":i+1})
stepsParsed[i]['ingredients'] = []
stepsParsed[i]['tools'] = []
stepsParsed[i]['methods'] = []
for ingredient in recipe_obj["ingredients"]:
if ingredient["name"] in steps[i]:
stepsParsed[i]['ingredients'].append(ingredient["name"])
if len(stepsParsed[i]['ingredients']) == 0:
stepsParsed[i]['ingredients'] = "none"
for tool in recipe_obj["cooking tools"]:
if tool in steps[i]:
stepsParsed[i]['tools'].append(tool)
if len(stepsParsed[i]['tools']) == 0:
stepsParsed[i]['tools'] = "none"
for method in recipe_obj["cooking methods"]:
if method in steps[i]:
stepsParsed[i]['methods'].append(method)
if len(stepsParsed[i]['methods']) == 0:
stepsParsed[i]['methods'] = "none"
time = "until done"
for j in range(len(words)):
for tword in TIME_WORDS:
if tword in words[j] and j > 0:
time = words[j-1] + " " + words[j]
stepsParsed[i]['time'] = time
return stepsParsed
TIME_WORDS = ['minute','second','hour']
``` |
{
"source": "johnhmacleod/traffic",
"score": 2
} |
#### File: traffic/server/traffic_server.py
```python
import os
import sys
from pubnub import Pubnub
from traffic_calc.distance_calculation import dis_calc
from traffic_calc.bearing import bearng
import time
# publish and subscribe keys
pubnub = Pubnub(publish_key="pub-c-cccc9d9c-50db-4aba-9f5f-11a800dd17b5",
subscribe_key="sub-c-5e2f4258-e7dc-11e4-8e57-02ee2ddab7fe")
'''
Names of the variables in the 'dic_ID[L_ID]' list
dic_ID[L_ID][0] = L_ID --> client's unique identification number
dic_ID[L_ID][1] = (NAS)Next approaching signal's number
dic_ID[L_ID][2] = (PASA)Present approaching signal's angle
dic_ID[L_ID][3] = command flag
'''
# Dictionary to store each clients necessary data for calculation
dic_ID = {}
# List to store the timestamp value of each client
dic_tme = []
# List to store the unique identification number of the client
g_process_list = ["bff29478-089c-4dff-8895-883580885661"]
def calculation_function(L_ID,lat,lng):
# List of the lattitude and longitude values of the signals
L_list1 = ["37.786188 -122.440033","37.787237 -122.431801",
"37.785359 -122.424704","37.778739 -122.423349",
"37.776381 -122.419514","37.772811 -122.412835",
"37.765782 -122.407557"]
L_ID = str(L_ID)
if (dic_ID != 0):
if (dic_ID[L_ID][1]<=7):
parameter_lat1 = float(lat)
parameter_lng1 = float(lng)
#selecting the signal from the list based on the NAS value
L_lat2 = (L_list1[dic_ID[L_ID][1]-1][0:9])
L_lng2 = (L_list1[dic_ID[L_ID][1]-1][10:21])
# calculating the distancse between the vehicle and the approaching signal
L_distance = dis_calc(parameter_lat1,parameter_lng1,L_lat2,L_lng2)
# calculating the angle between the vehicle and the approaching signal
L_bearing = bearng(parameter_lat1,parameter_lng1,L_lat2,L_lng2)
#print L_distance
# Quadrant change
if (L_bearing >180 and L_bearing <= 360):
L_bearing = str(180-L_bearing)
L_brng2 = str (L_bearing)
L_temp1 = dic_ID[L_ID][2]
L_temp1 = str(L_temp1)
L_temp2 = L_brng2
L_temp2 = str(L_temp2)
# checking for the signal crossover
if(L_temp1[0] != (L_temp2[0])):
if (dic_ID[L_ID][3] == False):
# sending vehicle crossed message to the respective client
pubnub.publish(channel = dic_ID[L_ID][0] ,message = {"signal_type":"withdraw","sig_num":dic_ID[L_ID][1]-1})
print "server sent clearance message to %s " %(L_ID)
# updating the NAS value
dic_ID[L_ID][1] = dic_ID[L_ID][1]+1
# setting the command flag
dic_ID[L_ID][3] = True
# checking for the vehicle approaching near the signal
if (L_distance <=200 and L_distance >=100):
if (dic_ID[L_ID][3] == True):
# sending vehicle approaching message to the respective client
pubnub.publish(channel=dic_ID[L_ID][0] ,message = {"signal_type":"green","sig_num":dic_ID[L_ID][1]-1})
print "server sent green signal message to %s " %(L_ID)
# updating the PASA
dic_ID[L_ID][2]= L_brng2
# setting the flag
dic_ID[L_ID][3] = False
else:
print "None of the clients are CONNECTED"
# Function to clear the all stored values in the dictionary once it is discarded in the middle
def clearing_function():
for i in range (0,len(dic_tme)):
if (len(dic_tme) >=1):
client = dic_tme[i]
client_hour = int(client[0:2])
client_min = int(client[3:5])
client_sec = int(client[6:8])
# getting the actual time in UTC
present_time = time.gmtime()
present_hours = int(present_time[3])
present_minutes = int(present_time[4])
present_secs = int(present_time[5])
presenttime = (present_hours *60*60) + (present_minutes*60) + (present_secs)
clienttime = (client_hour *60*60) + (client_min*60) + (client_sec)
# Time difference between present Time and the client's starting time
time_difference = (presenttime-clienttime)
time_difference = time_difference/60
# checking the TIMEOUT of the clients in the timestamp list and
# removing the data in the dictionaries and lists of Timeout client's
if (time_difference >= 30 and len(dic_tme)>=1 ):
L_ID = g_process_list[i+1]
print L_ID
del dic_ID[L_ID]
del dic_tme[i]
del g_process_list[i+1]
print dic_ID,dic_tme,g_process_list,len(dic_tme)
break
# callback function starts here
def callback(message,channel):
UUID = message['ID']
L_count = 0
# checking for the new clients
if (message['status'] == "start"):
L_ID = UUID
clearing_function()
day = message['day']
client_time = str(day[17:25])
dic_tme.append(client_time)
print dic_tme
# checking the whole list for the clients id
for i in range (0,len(g_process_list),1):
if (UUID != g_process_list[i]):
L_count = L_count+1
# adding the new client if not existed in the list
if (L_count == len(g_process_list)):
L_count = 0
g_process_list.append(UUID)
dic_ID[L_ID] = [L_ID,1,'167',True]
# checking for the ending of the route
if(message['status'] == "stop"):
S_ID = str(message['ID'])
del dic_ID[S_ID]
for i in range (0,len(g_process_list)):
if (S_ID == g_process_list[i]):
del g_process_list[i]
del dic_tme[i-1]
break
#Receiving the lattitude and longitude from the client and calling the calcuation function
if(message['status'] == "run"):
L_count2 = 0
for i in range(0,len(g_process_list),1):
if(message['ID'] != g_process_list[i]):
L_count2+=1
if(L_count2 == len(g_process_list)):
id = message['ID']
print "this id is expired ",message['ID']
# sending a TIMEOUT message if the clients time got expired
pubnub.publish(channel= id ,message = {"signal_type":"timeout"})
else:
L_ID = message['ID']
lat = message['lat']
lng = message['lon']
calculation_function(L_ID,lat,lng)
return True
def error(message):
print("ERROR : " + str(message))
def connect(message):
print("CONNECTED")
def reconnect(message):
print("RECONNECTED")
def disconnect(message):
print("DISCONNECTED")
# main function
if __name__ == "__main__":
pubnub.subscribe(channels='pub_channel', callback=callback,error=callback,
connect=connect, reconnect=reconnect, disconnect=disconnect)
``` |
{
"source": "johnhoman/intercept",
"score": 2
} |
#### File: intercept/intercept/webhook_test.py
```python
import base64
import copy
import json
import fastapi
from fastapi import FastAPI
from fastapi.testclient import TestClient
from kubernetes.client import V1Pod, V1Container
from intercept import webhook
from intercept import models
from intercept import responses
def get_patch(patch: str):
decoded = base64.b64decode(patch).decode()
return json.loads(decoded)
admission_review = {
"kind": "AdmissionReview",
"apiVersion": "admission.k8s.io/v1beta1",
"request": {
"uid": "0df28fbd-5f5f-11e8-bc74-36e6bb280816",
"kind": {
"group": "",
"version": "v1",
"kind": "Pod"
},
"resource": {
"group": "",
"version": "v1",
"resource": "pods"
},
"namespace": "dummy",
"operation": "CREATE",
"userInfo": {
"username": "system:serviceaccount:kube-system:replicaset-controller",
"uid": "a7e0ab33-5f29-11e8-8a3c-36e6bb280816",
"groups": [
"system:serviceaccounts",
"system:serviceaccounts:kube-system",
"system:authenticated"
]
},
"object": {
"metadata": {
"generateName": "nginx-deployment-6c54bd5869-",
"labels": {
"app": "nginx",
"pod-template-hash": "2710681425"
},
"annotations": {
"openshift.io/scc": "restricted"
},
"ownerReferences": [
{
"apiVersion": "extensions/v1beta1",
"kind": "ReplicaSet",
"name": "nginx-deployment-6c54bd5869",
"uid": "16c2b355-5f5d-11e8-ac91-36e6bb280816",
"controller": True,
"blockOwnerDeletion": True
}
]
},
"spec": {
"volumes": [
{
"name": "default-token-tq5lq",
"secret": {
"secretName": "default-token-tq5lq"
}
}
],
"containers": [
{
"name": "nginx",
"image": "nginx:1.7.9",
"ports": [
{
"containerPort": 80,
"protocol": "TCP"
}
],
"resources": {},
"volumeMounts": [
{
"name": "default-token-tq5lq",
"readOnly": True,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "IfNotPresent",
"securityContext": {
"capabilities": {
"drop": [
"KILL",
"MKNOD",
"SETGID",
"SETUID"
]
},
"runAsUser": 1000080000
}
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"serviceAccountName": "default",
"serviceAccount": "default",
"securityContext": {
"seLinuxOptions": {
"level": "s0:c9,c4"
},
"fsGroup": 1000080000
},
"imagePullSecrets": [
{
"name": "default-dockercfg-kksdv"
}
],
"schedulerName": "default-scheduler"
},
"status": {}
},
"oldObject": None
}
}
app = FastAPI()
@app.post("/mutate")
@webhook.mutating(V1Pod)
def add_init(pod):
pod.spec.init_containers = [V1Container(command="ls -lart", name="list-dir")]
client = TestClient(app)
def test_add_init_container():
response = client.post("/mutate", json=admission_review).json()
patch = get_patch(response["response"]["patch"])
assert patch[0]["op"] == "add"
assert patch[0]["path"] == "/spec/initContainers"
assert patch[0]["value"] == [{"command": "ls -lart", "name": "list-dir"}]
assert "patchType" in response["response"]
@app.post("/validate-labels-v1-pod")
@webhook.validate_create(V1Pod)
def validate_name(pod):
try:
pod.metadata.labels["component"]
except KeyError:
raise webhook.Denied("invalid name found")
def test_validate_create():
response = client.post("/validate-labels-v1-pod", json=admission_review).json().get("response")
assert response["allowed"] is False
@app.post("/validate-update-v1-pod")
@webhook.validate_update(V1Pod)
def validate_label_change(new, old):
if old.metadata.labels != new.metadata.labels:
raise webhook.Denied("invalid name found")
def test_validate_update():
review = copy.deepcopy(admission_review)
review = models.AdmissionReview(**review)
review.request.old_object = copy.deepcopy(review.request.object)
review.request.object["metadata"]["labels"] = {}
review.request.operation = webhook.OPERATION_UPDATE
response = client.post(
"/validate-update-v1-pod", json=responses._response(review)
).json().get("response")
assert response["allowed"] is False
review = copy.deepcopy(admission_review)
review = models.AdmissionReview(**review)
review.request.old_object = copy.deepcopy(review.request.object)
review.request.operation = webhook.OPERATION_UPDATE
response = client.post(
"/validate-update-v1-pod", json=responses._response(review),
).json().get("response")
assert response["allowed"] is True
``` |
{
"source": "johnhoman/kubeflow-defaulting",
"score": 2
} |
#### File: kubeflow-defaulting/src/conftest.py
```python
import uuid
from fastapi.testclient import TestClient
import pytest
from src import main, admission_review
@pytest.fixture(scope="function")
def app():
app_ = main.app
app_.dependency_overrides = {}
return app_
@pytest.fixture(scope="function")
def client(app):
return TestClient(app)
@pytest.fixture(scope="function")
def pod_admission_review():
ar = {
"kind": "AdmissionReview",
"apiVersion": "admission.k8s.io/v1beta1",
"request": {
"uid": "0df28fbd-5f5f-11e8-bc74-36e6bb280816",
"kind": {"group": "", "version": "v1", "kind": "Pod"},
"resource": {"group": "", "version": "v1", "resource": "pods"},
"namespace": "dummy",
"operation": "CREATE",
"userInfo": {
"username": "system:serviceaccount:kube-system:replicaset-controller",
"uid": "a7e0ab33-5f29-11e8-8a3c-36e6bb280816",
"groups": [
"system:serviceaccounts",
"system:serviceaccounts:kube-system",
"system:authenticated",
],
},
"object": {
"metadata": {
"name": "test-pod",
"namespace": "testspace-1234",
},
"spec": {
"volumes": [],
"containers": [
{
"name": "main",
"image": "python:3.8",
},
{
"name": "proxy",
"image": "nginx:latest",
},
],
"restartPolicy": "Always",
"serviceAccountName": "default",
"serviceAccount": "default",
},
"status": {},
},
"oldObject": None,
},
}
return admission_review.AdmissionReview(**ar)
@pytest.fixture(scope="function")
def profile_admission_review():
ar = {
"kind": "AdmissionReview",
"apiVersion": "admission.k8s.io/v1beta1",
"request": {
"uid": str(uuid.uuid4()),
"kind": {"group": "kubeflow.org", "version": "v1", "kind": "Profile"},
"resource": {
"group": "kubeflow.org",
"version": "v1",
"resource": "profile",
},
"operation": "CREATE",
"userInfo": {
"username": "system:serviceaccount:kube-system:replicaset-controller",
"uid": "a7e0ab33-5f29-11e8-8a3c-36e6bb280816",
"groups": [
"system:serviceaccounts",
"system:serviceaccounts:kube-system",
"system:authenticated",
],
},
"object": {
"metadata": {
"name": "staging",
},
"spec": {"owner": {"kind": "User", "name": "<EMAIL>"}},
"status": {
"conditions": [],
},
},
"oldObject": None,
},
}
return admission_review.AdmissionReview(**ar)
```
#### File: kubeflow-defaulting/src/spark_test.py
```python
from src import admission_review
from src.patch_util import decode_patch
def test_spark_pod_has_block_manager_port_defined(client, pod_admission_review):
"""
the webhook exposes port 7777 on the target pod
"""
obj = client.post(
"/spark/mutate-driver-core-v1-pod", json=pod_admission_review.dict()
).json()
res = admission_review.AdmissionReview(**obj)
pod = decode_patch(res)
ports = {port["name"]: port for port in pod["spec"]["containers"][0]["ports"]}
assert "blockmanager" in ports
assert ports["blockmanager"]["containerPort"] == 7777
def test_spark_default_is_idempotent(client, pod_admission_review):
"""
repeated calls to the webhook produces the same result
"""
obj = client.post(
"/spark/mutate-driver-core-v1-pod", json=pod_admission_review.dict()
).json()
res = admission_review.AdmissionReview(**obj)
before = decode_patch(res)
obj = client.post(
"/spark/mutate-driver-core-v1-pod",
json=admission_review.AdmissionReview(**obj).dict(),
).json()
res = admission_review.AdmissionReview(**obj)
after = decode_patch(res)
assert before == after
def test_spark_driver_pod_has_ip_address(client, pod_admission_review):
obj = client.post(
"/spark/mutate-driver-core-v1-pod", json=pod_admission_review.dict()
).json()
ob = decode_patch(admission_review.AdmissionReview(**obj))
env = {env["name"]: env for env in ob["spec"]["containers"][0]["env"]}
assert env["POD_IP_ADDRESS"]["valueFrom"]["fieldRef"]["fieldPath"] == "status.podIP"
def test_spark_driver_pod_has_pod_name(client, pod_admission_review):
obj = client.post(
"/spark/mutate-driver-core-v1-pod", json=pod_admission_review.dict()
).json()
ob = decode_patch(admission_review.AdmissionReview(**obj))
env = {env["name"]: env for env in ob["spec"]["containers"][0]["env"]}
assert env["POD_NAME"]["valueFrom"]["fieldRef"]["fieldPath"] == "metadata.name"
def test_spark_driver_pod_has_pod_namespace(client, pod_admission_review):
obj = client.post(
"/spark/mutate-driver-core-v1-pod", json=pod_admission_review.dict()
).json()
ob = decode_patch(admission_review.AdmissionReview(**obj))
env = {env["name"]: env for env in ob["spec"]["containers"][0]["env"]}
assert (
env["POD_NAMESPACE"]["valueFrom"]["fieldRef"]["fieldPath"]
== "metadata.namespace"
)
``` |
{
"source": "JohnHsiaoSB/Coursera-Machine-Learning-For-Python",
"score": 3
} |
#### File: 2.logistic_Regression/model/logrmodel.py
```python
import numpy as np
import tensorflow as tf
from scipy.optimize import minimize
from tensorflow.python.framework import ops
class LogisticRegreesion_Model:
def __init__(self, X, Y,use='python'):
self.X_train = X
self.Y_train = Y
self.use = use
self.iter = 1500
self.lrate = 0.01
def iter(self):
return self.iter
def lrate(self):
return self.lrate
def iter(self, v):
self.iter = v
def lrate(self,v):
self.lrate = v
def sigmoid(self, z):
return 1/(1+np.exp(-z))
def computeCost(self,W):
m = self.X_train.shape[1]
hx = self.sigmoid(np.dot(W.T, self.X_train))
J = (-1/m)*(np.dot(self.Y_train,np.log(hx).T)+np.dot(1-self.Y_train,np.log(1-hx).T))
return J
def gradient(self,W):
m = self.X_train.shape[1]
hx = self.sigmoid(np.dot(W.T, self.X_train))
dw = 1/m*np.dot(hx-self.Y_train, self.X_train.T).T
#must flatten for minimize function
return dw.flatten()
def train_for_tensor(self):
pass
def trains_for_python(self):
W = np.zeros((self.X_train.shape[0],1))
result = minimize(self.computeCost, W, method=None, jac=self.gradient, options={"maxiter":self.iter})
#result.fun is final cost function
#result.x are theta parameters
return result.fun, result.x.reshape(result.x.shape[0],1)
def train(self):
if self.use == 'tensor':
return self.train_for_tensor()
else:
return self.trains_for_python()
``` |
{
"source": "johnhu33/COMP5328-assignment2",
"score": 3
} |
#### File: johnhu33/COMP5328-assignment2/algorithm.py
```python
import csv
import os
import random
import sys
from typing import Callable, List, Tuple
import lightgbm as lgb
import numpy as np
from scipy.special import softmax
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
class Backward:
def __init__(self, model):
self._model = model
def train(self, X: np.ndarray, y: np.ndarray, _: np.ndarray) -> None:
self._model.fit(X, y)
def __call__(self,
X: np.ndarray,
T: np.ndarray,
denoise: bool = False) -> np.ndarray:
ret = self._model.predict_proba(X)
if denoise:
ret = softmax(np.linalg.inv(T) @ ret.T, axis=0).T
return ret
Model = Callable[[int, int], nn.Module]
class Forward:
def __init__(self, build: Model):
self._build = build
def train(self, X: np.ndarray, y: np.ndarray, T: np.ndarray) -> None:
T = torch.from_numpy(T.astype(np.float32))
sm = nn.Softmax(dim=1)
self._model = train(self._build, X, y, lambda x: sm(T @ sm(x).T).T)
def __call__(self,
X: np.ndarray,
T: np.ndarray,
denoise: bool = False) -> np.ndarray:
with torch.no_grad():
ret = softmax(self._model(torch.from_numpy(X.astype(
np.float32))).numpy(),
axis=1)
if not denoise:
ret = softmax(T @ ret.T, axis=0).T
return ret
def train(build: Model, X: np.ndarray, y: np.ndarray,
transform: Callable[[torch.Tensor], torch.Tensor]) -> nn.Module:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = build(X.shape[1], max(y) + 1)
if torch.cuda.device_count() > 1:
model = nn.DistributedDataParallel(model)
model.to(device)
X = torch.from_numpy(X.astype(np.float32)).to(device)
y = torch.from_numpy(y.astype(np.int64)).to(device)
optimizer = torch.optim.SGD(model.parameters(),
lr=1e-1,
weight_decay=1e-5,
momentum=0.9)
train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(
X, y),
batch_size=256,
shuffle=True)
criterion = nn.CrossEntropyLoss()
for epoch in range(10):
for X, y in train_loader:
optimizer.zero_grad()
pred = transform(model(X))
criterion(pred, y).backward()
optimizer.step()
model.eval()
return model
class NeuralNet:
def __init__(self, build: Model):
self._build = build
def fit(self, X: np.ndarray, y: np.ndarray) -> None:
self._model = train(self._build, X, y, lambda x: x)
def predict_proba(self, X: np.ndarray) -> np.ndarray:
with torch.no_grad():
return softmax(self._model(torch.from_numpy(X.astype(
np.float32))).numpy(),
axis=1)
def evaluate(dataset: str, T: List[List[float]], model) -> Tuple[float, float]:
with np.load(f'data/{dataset}.npz') as data:
Xtr = data['Xtr'].reshape((len(data['Xtr']), -1))
Xts = data['Xts'].reshape((len(data['Xts']), -1))
Xtr, Xtr_val, Str, Str_val = train_test_split(Xtr,
data['Str'],
test_size=0.2)
Yts = data['Yts']
T = np.array(T)
model.train(Xtr, Str, T)
acc_val = top1_accuracy(model(Xtr_val, T), Str_val)
acc = top1_accuracy(model(Xts, T, True), Yts)
return acc_val, acc
def linear(in_dim: int, out_dim: int) -> nn.Module:
return nn.Linear(in_dim, out_dim)
def three_layer(in_dim: int, out_dim: int) -> nn.Module:
return nn.Sequential(nn.Linear(in_dim, out_dim), nn.ReLU(),
nn.Linear(out_dim, out_dim), nn.ReLU(),
nn.Linear(out_dim, out_dim))
def top1_accuracy(pred: np.ndarray, y: np.ndarray) -> float:
return sum(pred.argmax(axis=1) == y) / len(y)
def reset_seed(seed: int = 0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# If multi-GPUs are used.
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def main() -> None:
"""Run all training and evaluation"""
w = csv.DictWriter(
sys.stdout,
['dataset', 'model', 'acc_val', 'acc_val_std', 'acc', 'acc_std'])
w.writeheader()
for dataset, T in DATA.items():
for name, model in MODEL.items():
reset_seed()
acc_val, acc = [], []
for i in range(10):
v, a = evaluate(dataset, T, model)
acc_val.append(v)
acc.append(a)
w.writerow({
'dataset': dataset,
'model': name,
'acc_val': np.mean(acc_val),
'acc_val_std': np.std(acc_val),
'acc': np.mean(acc),
'acc_std': np.std(acc)
})
DATA = {
'FashionMNIST0.5': [[0.5, 0.2, 0.3], [0.3, 0.5, 0.2], [0.2, 0.3, 0.5]],
'FashionMNIST0.6': [[0.4, 0.3, 0.3], [0.3, 0.4, 0.3], [0.3, 0.3, 0.4]],
}
MODEL = {
'forward_linear': Forward(linear),
'backward_linear': Backward(NeuralNet(linear)),
'forward_three_layer': Forward(three_layer),
'backward_three_layer': Backward(NeuralNet(three_layer)),
'LGB': Backward(lgb.LGBMClassifier()),
'logistic': Backward(LogisticRegression()),
}
if __name__ == '__main__':
main()
``` |
{
"source": "johnhubertjj/codesnappy",
"score": 3
} |
#### File: codesnappy/image_collector/bing_scraper.py
```python
from bing_image_downloader import downloader
class BingScraper:
def __init__(self, config):
self.config = config
self.output = self.scrape_bing()
def scrape_bing(self):
download_things = downloader.download(self.config.search, limit=self.config.limit,
output_dir=self.config.output_dir,
adult_filter_off=True,
force_replace=False, timeout=60)
return download_things
``` |
{
"source": "john-hu/rl",
"score": 3
} |
#### File: rl/acrobot/agent.py
```python
import os
import random
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
class Agent():
def __init__(self, cfg):
self.state_size = cfg['state_size']
self.action_size = cfg['action_size']
self.memory = deque(maxlen=cfg.get('memory_size', 2000))
self.learning_rate = cfg.get('learning_rate', 0.001)
self.gamma = cfg.get('gamma', 0.95)
self.exploration_rate = cfg.get('exploration_rate', 1.0)
self.exploration_min = cfg.get('exploration_min', 0.01)
self.exploration_decay = cfg.get('exploration_decay', 0.995)
self.model = None
self.__build_model()
def __build_model(self):
self.model = Sequential()
self.model.add(Dense(64, input_dim=self.state_size, activation='linear'))
self.model.add(Dense(32, activation='linear'))
self.model.add(Dense(32, activation='linear'))
self.model.add(Dense(self.action_size, activation='linear'))
self.model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
return self.model
def load_weights(self, weights_path):
if not os.path.isfile(weights_path):
return
self.model.load_weights(weights_path)
self.exploration_rate = self.exploration_min
def save_weights(self, weights_path):
if not self.model:
return
self.model.save(weights_path)
def choose_action(self, state):
if np.random.rand() <= self.exploration_rate:
return np.random.choice(self.action_size)
np_state = np.reshape(state, [1, self.state_size])
actions_rewards = self.model.predict(np_state)
return np.argmax(actions_rewards[0])
def remember(self, state, action, reward, next_state, done):
self.memory.append((
np.reshape(state, [1, self.state_size]),
action,
reward,
np.reshape(next_state, [1, self.state_size]),
done))
def replay(self, batch_size):
if len(self.memory) < batch_size:
training_batch = self.memory
else:
training_batch = random.sample(self.memory, batch_size)
for (state, action, reward, next_state, done) in training_batch:
target = reward
if not done:
# calcuare the next action rewards
next_reward = np.amax(self.model.predict(next_state)[0])
# adds future reward expectation
target = reward + self.gamma * next_reward
# predict and train
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
# decay the exploration rate
if self.exploration_rate > self.exploration_min:
self.exploration_rate *= self.exploration_decay
```
#### File: rl/agents/base.py
```python
from abc import ABC, abstractmethod
from collections import deque
class BaseAgent(ABC):
def __init__(self, cfg):
self.state_size = cfg['state_size']
self.action_size = cfg['action_size']
cfg_agent = cfg.get('agent', {})
self.memory = deque(maxlen=cfg_agent.get('memory_size', 2000))
self.learning_rate = cfg_agent.get('learning_rate', 0.001)
self.gamma = cfg_agent.get('gamma', 0.95)
self.verbose_mode = cfg_agent.get('verbose', False)
@abstractmethod
def load_weights(self, weights_path):
pass
@abstractmethod
def save_weights(self, weights_path):
pass
@abstractmethod
def choose_action(self, state):
pass
@abstractmethod
def train_on_step(self, state, action, reward, next_state, done):
pass
@abstractmethod
def remember(self, state, action, reward, next_state, done):
pass
@abstractmethod
def replay(self, batch_size):
pass
def game_start(self, episode):
pass
def game_end(self, episode):
pass
```
#### File: rl/explorations/uniform_exploration.py
```python
import numpy as np
class UniformExploration:
def __init__(self, cfg):
cfg_epsilon = cfg.get('epsilon', {})
self.__exploration_rate = cfg_epsilon.get('exploration_rate', 1.0)
self.__exploration_min = cfg_epsilon.get('exploration_min', 0.01)
self.__exploration_decay = cfg_epsilon.get('exploration_decay', 0.995)
@property
def exploration_rate(self):
return self.__exploration_rate
def is_exploring(self):
return np.random.rand() <= self.__exploration_rate
def decay(self):
if self.__exploration_rate > self.__exploration_min:
self.__exploration_rate *= self.__exploration_decay
def set_to_min(self):
self.__exploration_rate = self.__exploration_min
``` |
{
"source": "johnhw/complexitygraph",
"score": 3
} |
#### File: complexitygraph/docs/mkplot.py
```python
from complexitygraph import complexity_graph
import matplotlib.pyplot as plt
def quadratic_time(n):
s = 0
for i in range(n):
for j in range(n):
s = s + 1
complexity_graph(quadratic_time, range(1, 500, 20), reps=12, number=6)
plt.savefig("quadratic.png")
```
#### File: complexitygraph/test/test_complexitygraph.py
```python
import pytest
import math
from complexitygraph import time_complexity, fit_curves, score_report
import random
def test_complexities():
def complexity_should_be(what, fn, ns):
ts = time_complexity(fn, ns, reps=10, number=10)
score_dict = fit_curves(ns, ts)
print(score_dict)
assert list(score_dict.keys())[0] == what
ns = range(1, 1000, 50)
def constant(n):
return 1
def linear(n):
return [1 for i in range(n)]
def quadratic(n):
return [1 for i in range(n) for j in range(n)]
def cubic(n):
return [1 for i in range(n) for j in range(n) for k in range(n)]
def exp(n):
return [1 for i in range(2**n)]
def factorial(n):
x = [random.random() for i in range(n)]
while not x == sorted(x):
random.shuffle(x)
def nlogn(n):
x = [random.random() for i in range(n)]
return sorted(x)
complexity_should_be("constant", constant, ns)
complexity_should_be("linear", linear, ns)
complexity_should_be("quadratic", quadratic, range(1, 200, 20))
complexity_should_be("nlogn", nlogn, range(1, 2000, 20))
complexity_should_be("cubic", cubic, range(1, 50, 5))
complexity_should_be("exp", exp, range(1, 16))
complexity_should_be("factorial", factorial, range(1, 8))
from math import log
def logtime(n):
[1 for i in range(int(1000*log(n)))]
complexity_should_be("log", logtime, range(1, 2000, 100))
def test_fit_curves():
ns = range(10)
ts = time_complexity(lambda n: n, ns, reps=10, number=10, shuffle=True)
score_dict = fit_curves(ns, ts)
score_report(score_dict)
def test_time_complexity():
time_complexity(lambda n: n, [1], reps=1, number=1, shuffle=True)
time_complexity(lambda n: n, range(10), reps=1, number=1, shuffle=False)
time_complexity(lambda n: n, range(0, 100, 10), reps=10, number=1, shuffle=False)
time_complexity(lambda n: n, range(0, 100, 10), reps=10, number=10, shuffle=True)
``` |
{
"source": "johnhw/hashlife",
"score": 3
} |
#### File: johnhw/hashlife/lifeparsers.py
```python
import os, re, textwrap
import numpy as np
# Convert a list of (x,y) positions to a (dense) NumPy array
def to_numpy(pos):
pos = np.array(pos)
print(pos.shape)
pos[:, 0] -= np.min(pos[:, 0])
pos[:, 1] -= np.min(pos[:, 1])
dense = np.zeros((np.max(pos[:, 1]) + 1, np.max(pos[:, 0]) + 1))
for x, y in pos:
dense[y, x] = 1
return dense
def parse_life_106(file):
"""
Parse a Life 1.06 file, returning a tuple:
positions: list of (x,y) co-ordinates
comments: all comments in file, as a list of strings, one per line
"""
lines = file.split("\n")
comments = []
positions = []
pattern_106 = r"\s*\-?[0-9]+\s+\-?[0-9]+\s*"
for line in lines:
line = line.strip().rstrip()
if line.startswith("#"):
# strip out comments
if line[1] in "CcDdnN":
comments.append(line[2:])
else:
if re.match(pattern_106, line):
try:
x, y = [int(p) for p in line.split()]
positions.append((x, y))
except:
pass
comments = "\n".join(comments)
return positions, comments
def to_rle(pts):
"""Convert a point list to RLE format.
Returns:
tuple (rle, (width, height))
rle: the RLE string,
width, height: bounds of the pattern """
# sort by x, then y
pts.sort(key=lambda x: x[0])
max_x = pts[-1][0]
min_x = pts[0][0]
pts.sort(key=lambda x: x[1])
max_y = pts[-1][1]
min_y = pts[0][1]
line = 0
x = 0
stars = 0
out = []
# write out the on cells
def flush_stars():
if stars == 1:
out.append("o")
if stars > 1:
out.append("%do" % stars)
for pt in pts:
pt = (pt[0] - min_x, pt[1] - min_y)
# y co-ord change, write out new lines
if pt[1] != line:
flush_stars()
reps = pt[1] - line
if reps != 1:
out.append("%d$" % reps)
else:
out.append("$")
line = pt[1]
stars = 0
x = 0
cts = 0
# mark blanks
while x != pt[0]:
x = x + 1
cts = cts + 1
if cts != 0:
# write out pending on cells
flush_stars()
# write out blanks
if cts == 1:
out.append("b")
else:
out.append("%db" % cts)
stars = 0
stars = stars + 1
x = x + 1
flush_stars()
out.append("!")
return "".join(out), (max_x - min_x, max_y - min_y)
def write_rle(fname, pts, comments=[]):
"""Write a point list to a file, with an optional comment block"""
rle, (x, y) = to_rle(pts)
f = open(fname, "w")
# size header
f.write("x = %d, y = %d\n")
# comments
for comment in comments:
f.write("#C %s\n" % comment)
# rle, 70 char max width
rle = textwrap.fill(rle, 70)
f.write(rle)
f.close()
def rle_string(pts, comments=[]):
"""Write a point list to a file, with an optional comment block"""
rle, (x, y) = to_rle(pts)
output = []
# size header
output.append("x = %d, y = %d\n" % (x, y))
# comments
for comment in comments:
output.append("#C %s" % comment.strip())
output.append("\n")
# rle, 70 char max width
rle = textwrap.fill(rle, 70)
output.append(rle)
return "\n".join(output)
def parse_life_105(file):
"""Parse a Life 1.05 file, returning a tuple:
positions: list of (x,y) co-ordinates
comments: all comments in file, as a list of strings, one per line.
"""
lines = file.split("\n")
comments = []
positions = []
ox, oy = 0, 0
x, y = ox, oy
pattern_105 = r"\s*(\.|\*|o|O)+\s*\Z"
for line in lines:
line = line.strip().rstrip()
if line.startswith("#"):
# comment
if line[1] in "CcDd":
comments.append(line[2:])
# new block definition
if line[1] in "Pp":
coords = line[2:]
try:
ox, oy = [int(p) for p in coords.split()]
x, y = ox, oy
except:
pass
else:
# skip blanks
if len(line) > 0 and re.match(pattern_105, line):
# only fill in points which are active
for char in line:
if char == "*" or char == "o" or char == "O":
positions.append((x, y))
x += 1
y = y + 1
x = ox
comments = "\n".join(comments)
return positions, comments
def parse_dblife(file):
"""Parse an DBLife file, returning a tuple:
positions: list of (x,y) co-ordinates
comments: all comments in file, as a list of strings, one per line.
"""
lines = file.split("\n")
comments = []
positions = []
x = 0
y = 0
dblife_pattern = r"((\d*)(\.|O|o|\*))*"
for line in lines:
line = line.strip().rstrip()
if line.startswith("!"):
comments.append(line[2:])
# check if this is part of the pattern
if re.match(dblife_pattern, line):
count = 0
for char in line:
# repeat counts
if char.isdigit():
count *= 10
count += int(char)
# blanks
if char in ".":
if count != 0:
x += int(count)
else:
x += 1
count = 0
# ons
if char in "oO*":
if count != 0:
for i in range(count):
positions.append((x, y))
x += 1
else:
positions.append((x, y))
x += 1
count = 0
count = 0
# newlines
y += 1
x = 0
count = 0
return positions, comments
def parse_rle(rle):
"""Parse an RLE string, returning a tuple:
positions: list of (x,y) co-ordinates"""
lines = rle.split("\n")
comments = []
positions = []
x = 0
y = 0
complete = False
for line in lines:
line = line.strip().rstrip()
if len(line) == 0:
pass
elif complete:
comments.append(line)
elif line.startswith("#"):
# extract comment/owner
if complete or line[1] in "cCoOnN":
comments.append(line[2:])
# get offsets
if line[1] in "pP":
coords = line[2:]
try:
x, y = [int(p) for p in coords.split()]
except:
pass
# skip any size line -- we don't need it
elif line.startswith("x"):
continue
else:
count = 0
for char in line:
# repeat counts
if char.isdigit():
count *= 10
count += int(char)
# blanks
if char in "bB":
if count != 0:
x += int(count)
else:
x += 1
count = 0
# ons
if char in "oO":
if count != 0:
for i in range(count):
positions.append((x, y))
x += 1
else:
positions.append((x, y))
x += 1
count = 0
# newlines
if char in "$":
if count != 0:
y += int(count)
else:
y += 1
x = 0
count = 0
if char in "!":
complete = True
break
return positions, comments
def autoguess_life_file(fname):
"""Open the given file, try and identify the file type
and return the parsed version of the file. Supports:
* Life 1.05
* Life 1.06
* DBLife
* XLife
* RLE
"""
base, ext = os.path.splitext(fname)
f = open(fname)
text = f.read()
f.close()
lines = text.split("\n")
first_line = lines[0].strip().rstrip()
# life 1.05
if first_line.startswith("#Life 1.05"):
return parse_life_105(text)
if first_line.startswith("#Life 1.06"):
return parse_life_106(text)
elif first_line.startswith("!"):
return parse_dblife(text)
# ok, now it could be an RLE file, or it could be an XLIFE file
rle_result = parse_rle(text)
result_105 = parse_life_105(text)
result_106 = parse_life_106(text)
r1 = len(rle_result[0])
r2 = len(result_105[0])
r3 = len(result_106[0])
# rle gave most cells
if r1 > r2 and r1 > r3:
print("Guessed RLE")
return rle_result
if r2 > r1 and r2 > r3:
print("Guessed Life 1.05")
return result_105
if r3 > r1 and r3 > r1:
print("Guessed Life 1.06")
return result_106
# default, RLE
return rle_result
def read_rle(fname):
"""Open and parse an RLE file"""
f = open(life_fname)
positions, comments = parse_rle(f.read())
f.close()
return positions, comments
if __name__ == "__main__":
import sys
pat, comments = autoguess_life_file(sys.argv[1])
print((rle_string(pat, comments=comments.split("\n"))))
``` |
{
"source": "johnhw/ICMI2017",
"score": 3
} |
#### File: johnhw/ICMI2017/pfilter.py
```python
import numpy as np
# return a new function that has the heat kernel (given by delta) applied.
def make_heat_adjusted(sigma):
def heat_distance(d):
return np.exp(-d**2 / (2.0*sigma**2))
return heat_distance
# resample function from http://scipy-cookbook.readthedocs.io/items/ParticleFilter.html
def resample(weights):
n = len(weights)
indices = []
C = [0.] + [np.sum(weights[:i+1]) for i in range(n)]
u0, j = np.random.random(), 0
for u in [(u0+i)/n for i in range(n)]:
while u > C[j]:
j+=1
indices.append(j-1)
return indices
def no_dynamics(x):
return x
def no_noise(x):
return x
def squared_error(x,y,sigma=1):
# RBF kernel
d = np.sum((x-y)**2, axis=(1,2))
return np.exp(-d / (2.0*sigma**2))
def gaussian_noise(x, sigmas):
"""Apply normally-distributed noise to the N,D array x.
Parameters:
-----------
x : array
(N,D) array of values
sigmas : array
D-element vector of std. dev. for each column of x
"""
n = np.random.normal(np.zeros(len(sigmas)), sigmas, size=(x.shape[0], len(sigmas)))
return x+n
class ParticleFilter(object):
"""A particle filter object which maintains the internal state of a population of particles, and can
be updated given observations.
Attributes:
-----------
n_particles : int
number of particles used (N)
d : int
dimension of the internal state
resample_proportion : float
fraction of particles resampled from prior at each step
particles : array
(N,D) array of particle states
mean_hypothesis : array
The current mean hypothesized observation
mean_state : array
The current mean hypothesized internal state D
hypotheses : array
The (N,...) array of hypotheses for each particle
weights : array
N-element vector of normalized weights for each particle.
"""
def __init__(self, initial, observe_fn, n_particles=200, dynamics_fn=None, noise_fn=None,
weight_fn=None, resample_proportion=0.05, column_names=None, internal_weight_fn=None):
"""
Parameters:
-----------
initial : list
sequence of prior distributions; should be a frozen distribution from scipy.stats;
e.g. scipy.stats.norm(loc=0,scale=1) for unit normal
observe_fn : function(states) => observations
transformation function from the internal state to the sensor state. Takes an (N,D) array of states
and returns the expected sensor output as an array (e.g. a (N,W,H) tensor if generating W,H dimension images).
n_particles : int
number of particles in the filter
dynamics_fn : function(states) => states
dynamics function, which takes an (N,D) state array and returns a new one with the dynamics applied.
noise_fn : function(states) => states
noise function, takes a state vector and returns a new one with noise added.
weight_fn : function(real, hypothesized) => weights
computes the distance from the real sensed variable and that returned by observe_fn. Takes
a an array of N hypothesised sensor outputs (e.g. array of dimension (N,W,H)) and the observed output (e.g. array of dimension (W,H)) and
returns a strictly positive weight for the each hypothesis as an N-element vector.
This should be a *similarity* measure, with higher values meaning more similar, for example from an RBF kernel.
internal_weight_fn : function(states, observed) => weights
Reweights the particles based on their *internal* state. This is function which takes
an (N,D) array of internal states and the observation and
returns a strictly positive weight for the each state as an N-element vector.
Typically used to force particles inside of bounds, etc.
resample_proportion : float
proportion of samples to draw from the initial on each iteration.
column_names : list of strings
names of each the columns of the state vector
"""
self.column_names = column_names
self.prior = initial
self.d = self.prior(n_particles).shape[1]
self.n_particles = n_particles
self.observe_fn = observe_fn
self.dynamics_fn = dynamics_fn or no_dynamics
self.noise_fn = noise_fn or no_noise
self.weight_fn = weight_fn or squared_error
self.resample_proportion = resample_proportion
self.particles = np.zeros((self.n_particles, self.d))
self.internal_weight_fn = internal_weight_fn
self.init_filter()
self.original_particles = np.array(self.particles)
def init_filter(self, mask=None):
"""Initialise the filter by drawing samples from the prior.
Parameters:
-----------
mask : array, optional
boolean mask specifying the elements of the particle array to draw from the prior. None (default)
implies all particles will be resampled (i.e. a complete reset)
"""
new_sample = self.prior(self.n_particles)
# resample from the prior
if mask is None:
self.particles = new_sample
else:
self.particles[mask,:] = new_sample[mask,:]
def update(self, observed=None):
"""Update the state of the particle filter given an observation.
Parameters:
----------
observed: array
The observed output, in the same format as observe_fn() will produce. This is typically the
input from the sensor observing the process (e.g. a camera image in optical tracking).
If None, then the observation step is skipped, and the filter will run one step in prediction-only mode.
"""
# apply dynamics and noise
self.particles = self.dynamics_fn(self.particles)
self.particles = self.noise_fn(self.particles)
# invert to hypothesise observations
self.hypotheses = self.observe_fn(self.particles)
if observed is not None and not np.any(np.isnan(observed)):
# compute similarity to observations
# force to be positive
weights = np.clip(np.array(self.weight_fn(self.hypotheses, observed)), 0, np.inf)
else:
# we have no observation, so all particles weighted the same
weights = np.ones((self.n_particles,))
# apply weighting based on the internal state
if self.internal_weight_fn is not None:
internal_weights = self.internal_weight_fn(self.particles, observed)
internal_weights = np.clip(internal_weights, 0, np.inf)
internal_weights = internal_weights / np.sum(internal_weights)
weights *= internal_weights
# normalise probabilities to "probabilities"
self.weights = weights / np.sum(weights)
# resampling step
indices = resample(self.weights)
self.particles = self.particles[indices, :]
# mean hypothesis
self.mean_hypothesis = np.sum(self.hypotheses.T * self.weights, axis=-1).T
self.mean_state = np.sum(self.particles.T * self.weights, axis=-1).T
self.map = self.particles[np.argmax(self.weights)]
self.original_particles = np.array(self.particles)
# randomly resample some particles from the prior
random_mask = np.random.random(size=(self.n_particles,))<self.resample_proportion
self.resampled_particles = random_mask
self.init_filter(mask=random_mask)
``` |
{
"source": "johnhw/sound_server",
"score": 3
} |
#### File: sound_server/sound_server/derivative_estimator.py
```python
import numpy as np
#sg coefficient computation
def savitzky_golay(window_size=None,order=2):
if window_size is None:
window_size = order + 2
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window size is too small for the polynomial")
# A second order polynomial has 3 coefficients
order_range = range(order+1)
half_window = (window_size-1)//2
B = np.mat(
[ [k**i for i in order_range] for k in range(-half_window, half_window+1)] )
M = np.linalg.pinv(B)
return M
# savitzky-golay polynomial estimator, with optional derivatives
def sg_filter(size, deriv=0, order=4):
if size%2==0:
print "Size for Savitzky-Golay must be odd. Adjusting..."
size = size + 1
if order<deriv+1:
order = deriv+1
sgolay = savitzky_golay(size, order)
diff = np.ravel(sgolay[deriv, :])
return diff
class SavitzkyGolay:
def __init__(self, size, deriv=0, order=4):
if size%2==0:
print "Size for Savitzky-Golay must be odd. Adjusting..."
size = size + 1
self.size = size
self.deriv = deriv
if order<deriv+1:
order = deriv+1
sgolay = savitzky_golay(size, order)
diff = np.ravel(sgolay[deriv, :])
self.buffer = None
self.diff = diff
self.init = False
def new_sample(self, x):
if self.buffer is None:
self.buffer = np.ones((self.size,)) * x
else:
self.buffer[0:-1] = self.buffer[1:]
self.buffer[-1] = x
result = np.sum(self.buffer * self.diff)
return result
def clear(self):
self.buffer = None
``` |
{
"source": "johnhw/sqlexperiment",
"score": 2
} |
#### File: sqlexperiment/examples/distributed.py
```python
import time
import sys
import logging
from multiprocessing import Process
import zmq
import traceback
import collections
import logging
import daiquiri
daiquiri.setup(level=logging.INFO)
# logging.basicConfig()
import explogger
from explogger import zmq_log
import random
def log_remote(name):
logger = zmq_log.LogProxy()
for i in range(20):
time.sleep(random.random()*0.1)
logger.log(name, data={"name":name, "id":i})
if __name__=="__main__":
# basic test if the remote logging is working...
m = zmq_log.ZMQLog("my_multi.db", ntp_sync=False)
log = m.get_proxy()
t = str(time.time())
log.enter(t)
pub = zmq_log.LogProxyPub()
for i in range(100):
pub.log('test', data={'x':1})
time.sleep(1./20)
print(pub.test)
session_id = log.session_id
print("Multiple asynchronous writes...")
p1 = Process(target=log_remote, args=("Alpha",))
p2 = Process(target=log_remote, args=("Bravo",))
p3 = Process(target=log_remote, args=("Charlie",))
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
print("Completed.")
print(log.meta_dataframe())
log.leave()
log.close()
# import sqlite3
# conn = sqlite3.connect("my_multi.db")
# results = conn.execute("SELECT * FROM log WHERE session=?", (session_id,)).fetchall()
# for r in results:
# print(r)
# conn.close()
``` |
{
"source": "johnhw/summerschool2018",
"score": 2
} |
#### File: summerschool2018/src/key_display.py
```python
from key_test import capture_keys
from tkanvas import TKanvas
import Queue as queue
# py 2.x compatibility
try:
import Tkinter as tkinter
from Tkinter import mainloop
except ImportError:
from tkinter import mainloop
import numpy as np
from multiprocessing import Queue, Process
import matplotlib
import time
import matplotlib.pyplot as plt
from rwo import RWO
from key_noise import Corrupter
def tkcolor(rgb):
return "#" + "".join(("%02X" % (int(c * 255)) for c in rgb[:3]))
class TKMatrix(object):
def __init__(self, canvas, shape, size, origin=None, cmap=None):
self.origin = origin or (size / 2, size / 2)
self.cmap = cmap or plt.get_cmap("viridis")
self.shape = shape
self.size = size
self.canvas = canvas
self.create_rects()
def create_rects(self):
self.rects = []
sz = self.size
ox, oy = self.origin
for i in range(self.shape[1]):
for j in range(self.shape[0]):
rect = self.canvas.rectangle(
ox + i * sz,
oy + j * sz,
ox + (i + 1) * sz,
oy + (j + 1) * sz,
fill="blue",
)
self.rects.append(rect)
def update(self, matrix):
assert matrix.shape == self.shape
for i in range(self.shape[0]):
for j in range(self.shape[1]):
ix = i * self.shape[1] + j
color = self.cmap(matrix[i, j])[:3]
self.canvas.canvas.itemconfig(self.rects[ix], fill=tkcolor(color))
class KeyDisplay(object):
def __init__(
self,
q,
shape=(8, 16),
transform_fn=None,
rwo_kwargs=None,
alpha=0.9,
noise=0.05,
):
self.transform_fn = transform_fn or (lambda x: x) # default to identity
self.shape = shape
self.state = np.zeros(shape)
self.q = q # keyboard input
self.keys = np.zeros(128, dtype=np.float32)
self.block_size = 24
self.status = "OK"
self.use_rwo = rwo_kwargs is not None
if self.use_rwo:
self.rwo = RWO(128, **rwo_kwargs)
np.random.seed(35325)
random_permutation = np.eye(128)[np.random.permutation(128)]
self.corrupter = Corrupter(
[random_permutation], sensor_noise=np.full((128,), noise), obs_alpha=alpha
)
self.corrupt_keys = np.zeros_like(self.keys)
self.canvas = TKanvas(
draw_fn=self.draw,
tick_fn=self.tick,
w=self.block_size * (self.shape[1]+1),
h=self.block_size * (self.shape[0]+2),
)
self.canvas.title("Ctrl-ESC-ESC-ESC to quit")
self.matrix_display = TKMatrix(self.canvas, self.shape, self.block_size)
self.text = self.canvas.text(
self.block_size / 2,
self.canvas.h - self.block_size / 2,
text=str(self.status),
fill="white",
anchor="w",
font=("Arial", 16),
)
def tick(self, dt):
try:
result = self.q.get(block=False)
if result:
arr_bytes, t, _ = result
self.keys[:] = np.frombuffer(arr_bytes, dtype=np.float32)
else:
self.canvas.quit(None)
except queue.Empty:
# no updates, do nothing
pass
self.corrupt_keys = self.corrupter.update(self.keys)
if self.use_rwo:
self.rwo.update(self.corrupt_keys, time.clock())
self.state = self.transform_fn(self.corrupt_keys).reshape(self.shape)
def draw(self, src):
# draw the blank squares for the outputs
if self.use_rwo:
src.canvas.itemconfig(self.text, text=str(self.rwo.n_vecs))
self.matrix_display.update(self.state)
def key_tk(*args, **kwargs):
import keyboard
current_state = keyboard.stash_state()
q = Queue()
keys = Process(target=capture_keys, args=(q,))
keys.start()
k = KeyDisplay(q, *args, **kwargs)
keyboard.restore_state(current_state)
time.sleep(0.5)
keyboard.restore_state(current_state)
return current_state
if __name__ == "__main__":
import keyboard, atexit
current_state = keyboard.stash_state()
atexit.register(keyboard.restore_state, current_state)
q = Queue()
keys = Process(target=capture_keys, args=(q,))
keys.start()
k = KeyDisplay(q)
mainloop()
```
#### File: summerschool2018/src/key_nb.py
```python
from IPython.display import clear_output
import numpy as np
from multiprocessing import Queue, Process
from key_test import capture_keys
def key_tk():
current_state = keyboard.stash_state()
q = Queue()
keys = Process(target=capture_keys, args=(q,))
keys.start()
k = KeyDisplay(q)
def notebook_keys():
q = Queue()
keys = Process(target=capture_keys, args=(q,))
keys.start()
result = q.get()
print("Ctrl-ESC to stop")
while result:
arr_bytes, time, name = result
print("Ctrl-ESC to stop: {0}".format(name))
result = q.get()
keys.join()
```
#### File: summerschool2018/src/key_noise.py
```python
import numpy as np
def add_noise(x, noise):
return x + np.random.normal(0,1, noise.shape) * noise
class Corrupter:
def __init__(self, mix_l, sensor_noise=0.0, obs_noise=0.0, vec_size=128, sensor_alpha=0.0, obs_alpha=0.0):
self.mix_l = mix_l
self.vecs = np.zeros((len(mix_l), vec_size))
self.sensor_noise = np.array(sensor_noise)
self.obs_noise = np.array(obs_noise)
self.ix = 0 # buffer write position
self.n = len(self.vecs)
self.ix_array = list(range(self.n))
self.sensor_alpha = sensor_alpha
self.obs_alpha = obs_alpha
self.last_output = np.zeros(vec_size,)
self.last_sensor = np.zeros(vec_size,)
def update(self, k_vec):
self.last_sensor = (1-self.sensor_alpha) * k_vec + self.sensor_alpha*self.last_sensor
self.vecs[self.ix] = add_noise(self.last_sensor, self.sensor_noise)
self.ix = (self.ix+1) % self.n
ixs = self.ix_array[self.ix:] + self.ix_array[:self.ix]
output = np.sum([np.dot(self.mix_l[i], self.vecs[ixs[i]]) for i in range(self.n)], axis=0)
self.last_output = (1-self.obs_alpha) * output + self.obs_alpha*self.last_output
return add_noise(self.last_output, self.obs_noise)
```
#### File: summerschool2018/src/kf_display.py
```python
import numpy as np
# for interactive drawing
import tkanvas
import pykalman
import scipy.stats
def kf_loglik(C, mean, cov, obs):
pred_obs_mean = np.dot(C, mean)
pred_obs_cov = np.dot(C, np.dot(cov, C.T))
obs_arr = np.array(obs)
# likelihood of this sample
return scipy.stats.multivariate_normal.logpdf(
obs_arr, mean=pred_obs_mean, cov=pred_obs_cov
)
class KFDisplay(object):
def __init__(self, A, C, sigma_a, sigma_c, mu_0, sigma_0, path, frame_time=2000, reject_lik=None):
self.track = True
self.A = A
self.C = C
self.reject_lik = reject_lik
self.sigma_c = sigma_c
self.path = iter(path)
self.kalman_filter = pykalman.KalmanFilter(
transition_matrices=A,
observation_matrices=C,
transition_covariance=sigma_a,
observation_covariance=sigma_c,
initial_state_mean=mu_0,
initial_state_covariance=sigma_0,
)
self.obs_path = []
self.track_path = []
self.obs = next(self.path)
self.src = tkanvas.TKanvas(
draw_fn=self.kalman_draw,
frame_time=frame_time,
w=800,
h=800,
bgcolor="black",
)
self.mean, self.cov = mu_0, sigma_0
self.new_mean, self.new_cov = self.kalman_filter.filter_update(
self.mean, self.cov, observation=self.obs
)
self.lik = kf_loglik(self.C, self.new_mean, self.cov, self.obs)
self.kalman_iter = self.draw_kalman_filter()
# Draw each step of the Kalman filter onto a TKinter canvas
def draw_kalman_filter(self):
self.src.clear()
font = ("Arial", 24)
for p in self.obs_path:
self.src.circle(p[0], p[1], 2, fill="white")
for p in self.track_path:
self.src.circle(p[0], p[1], 2, fill="blue")
if self.obs is not None:
self.obs_path.append(self.obs)
self.track_path.append(self.new_mean[:2])
# don't bother drawing circles when at speed
# draw the prior
self.src.normal(self.mean[:2], self.cov[:2, :2], outline="#0000ff")
loglik = self.src.text(
20, 40, text="%.0f"%self.lik, anchor="w", fill="gray", font=("Arial", 10)
)
if self.src.frame_time < 50:
return
text = self.src.text(
20, 20, text="Prior P(X_t)", anchor="w", fill="gray", font=font
)
self.src.to_front(text)
yield 0 # this is a trick to allow to "return" here but resume later
ax = np.dot(self.A, self.mean)
acov = np.dot(np.dot(self.A, self.cov), self.A.T)
# prediction after linear dynamics
self.src.normal(ax[:2], acov[:2, :2], outline="#00ff00", dash=(2, 4))
self.src.modify(text, text="Prediction f(x_(t-1)) -> x_t")
self.src.to_front(text)
yield 0
# prediction after linear dynamics
self.src.normal(ax[:2], acov[:2, :2], outline="#dd00ff", dash=(2, 2))
self.src.modify(text, text="Expected observation y_t g(x_t) -> y'_t")
self.src.to_front(text)
yield 0
if self.obs is not None:
# observation (if there is one)
self.src.circle(self.obs[0], self.obs[1], 5, fill="#ffffff")
# src.modify(text, text="Observation y_t")
# uncertainty of observation
self.src.normal(
self.obs, self.sigma_c[:2, :2], outline="#6600ff", dash=(2, 2)
)
self.src.modify(text, text="Observation w/uncertainty")
self.src.to_front(text)
yield 0
yield 0
# posterior
self.src.normal(self.new_mean[:2], self.new_cov[:2, :2], outline="#8899ff")
self.src.modify(text, text="Posterior P(Xt|Yt)")
self.src.to_front(text)
yield 0
# draw the Kalman filter updates interactively
def kalman_draw(self, src):
if self.src.frame_time > 20:
# slowly speed up over time
self.src.frame_time = src.frame_time * 0.95
try:
next(self.kalman_iter)
# we've drawn all the steps, so make another update
except StopIteration:
self.mean, self.cov = self.new_mean, self.new_cov
try:
self.obs = next(self.path)
except StopIteration:
src.quit(None)
return
self.lik = kf_loglik(self.C, self.mean, self.cov, self.obs)
if self.reject_lik is None or self.lik>self.reject_lik:
self.new_mean, self.new_cov = self.kalman_filter.filter_update(
self.mean, self.cov, observation=self.obs
)
self.kalman_iter = self.draw_kalman_filter()
return
```
#### File: summerschool2018/src/rwo.py
```python
import scipy.spatial
import numpy as np
class RWO(object):
def __init__(self, d, threshold=0.45, bag=None, metric='euclidean'):
if bag is not None and "data" in bag and len(bag["data"])>0:
self.bag = np.array(self.bag["data"])
else:
self.bag = None
self.output = bag
self.d = d
self.metric = metric
self.threshold = threshold
self.n_vecs = 0
def update(self, vector, t):
if self.bag is None:
self.bag = np.array(vector[None,:])
self.n_vecs = 1
if self.output is not None:
self.output["data"].append(vector)
self.output["time"].append(t)
return True
ds = scipy.spatial.distance.cdist(self.bag, vector[None, :], self.metric)
if np.min(ds)>self.threshold:
# add to the bag if new enough
self.bag = np.concatenate((self.bag, vector[None, :]), axis=0)
self.n_vecs = len(self.bag)
if self.output is not None:
self.output["data"].append(vector)
self.output["time"].append(t)
return True
return False
``` |
{
"source": "johnhw/wmirror",
"score": 3
} |
#### File: wmirror/server/astro.py
```python
import ephem
import math
import datetime
from datetime import timedelta
def local_transit(transit, body, location, date, horizon='0',
use_center=False):
# deal with sun never coming up/going down
# special cases: sun is always up or always down
if date == 'always':
return 'always'
if date == 'never':
return 'never'
location.date = date
location.horizon = horizon
transit_fn = {
"noon": location.next_transit,
"rising": location.previous_rising,
"setting": location.next_setting
}[transit]
try:
# make sure we get the next setting, after *todays* rising
# even if it is after sunset now
if transit != "rising":
start = location.previous_rising(body, use_center=use_center)
else:
start = None
if use_center:
time = transit_fn(body, use_center=use_center, start=start)
else:
time = transit_fn(body, start=start)
return time
except ephem.AlwaysUpError:
# polar regions, in polar summer
return 'always'
except ephem.NeverUpError:
# polar regions, in polar winter
return 'never'
def format_date(body, here, date):
# convert special string dates without modification
if type(date) == type(""):
return {"date": date, "alt": "0.0", "az": "0.0"}
else:
here.date = date
body.compute(here)
# convert to isoformat date (in LOCAL time!)
return {
"date": ephem.localtime(date).isoformat(),
"alt": body.alt,
"az": body.az
}
# note: we always calculate using UTC time
# and convert to local time when returning results
# this means that times must be in UTC when used as parameters!
# Standard format for positions is:
# "time": (local time as ISO format string)
# "alt": altitude, radians (0=horizon, +ve=above)
# "az": azimuth, radians (0=due North, +ve=clockwise)
class Astro:
def __init__(self, lon, lat, elev=0):
self.lon = lon
self.lat = lat
self.sun = ephem.Sun()
self.moon = ephem.Moon()
def locations(self, date=None):
if date is None:
date = datetime.datetime.utcnow()
here = ephem.Observer()
here.lat = self.lat
here.lon = self.lon
here.date = date
self.moon.compute(here)
self.sun.compute(here)
return {
"moon": {
"time": ephem.localtime(ephem.date(date)).isoformat(),
"alt": self.moon.alt,
"az": self.moon.az
},
"sun": {
"time": ephem.localtime(ephem.date(date)).isoformat(),
"alt": self.sun.alt,
"az": self.sun.az
}
}
def solar_analemma(self, date=None):
# compute the position of the sun at each point day in the year
# at this exact time
if date is None:
date = datetime.datetime.utcnow()
here = ephem.Observer()
here.lat = self.lat
here.lon = self.lon
here.date = date
solar_analemma = []
# ignoring leap years...
for i in range(365):
self.sun.compute(here)
solar_analemma.append({
"time":ephem.localtime(ephem.date(here.date)).isoformat(),
"alt":self.sun.alt,
"az":self.sun.az
})
here.date += 1
return {"sun": solar_analemma}
def transits(self, date=None, n=240):
# compute path of sun and moon across the whole day
if date is None:
date = datetime.datetime.utcnow()
morning = ephem.date(date.date()) # chop off time
evening = ephem.date(
date.date() + timedelta(hours=24)) # add one full day
lunar_transit = []
solar_transit = []
here = ephem.Observer()
here.lat = self.lat
here.lon = self.lon
for i in range(n):
time = (morning) + (evening - morning) * (i / (n - 1))
here.date = time
self.moon.compute(here)
self.sun.compute(here)
lunar_transit.append({
"time": ephem.localtime(ephem.date(time)).isoformat(),
"alt":self.moon.alt,
"az":self.moon.az
})
solar_transit.append({
"time":ephem.localtime(ephem.date(time)).isoformat(),
"alt":self.sun.alt,
"az":self.sun.az
})
return {"moon": lunar_transit, "sun": solar_transit}
def lunar_phase(self, date=None):
# phase is a fraction 0.0 -> 1.0
# radius is visual radius in arcseconds
# direction is Waxing or Waning
# name is Gibbous or Crescent
lunar_names = [['Waxing', 'Crescent'], ['Waxing', 'Gibbous'],
['Waning', 'Gibbous'], ['Waning', 'Crescent']]
if date is None:
date = datetime.datetime.utcnow()
here = ephem.Observer()
here.lat = self.lat
here.lon = self.lon
here.date = date
self.moon.compute(here)
self.sun.compute(here)
# lunar name computation from
# https://stackoverflow.com/questions/26702144/human-readable-names-for-phases-of-the-moon-with-pyephem
sunlon = ephem.Ecliptic(self.sun).lon
moonlon = ephem.Ecliptic(self.moon).lon
tau = 2.0 * ephem.pi
angle = (moonlon - sunlon) % tau
quarter = int(angle * 4.0 // tau)
###
return {
"phase": self.moon.moon_phase,
"radius": self.moon.radius,
'direction': lunar_names[quarter][0],
'name': lunar_names[quarter][1],
"sunmoon_angle": angle
}
def solar_day(self, date=None):
return self.day(self.sun, date)
# get rising/setting and noon times for the sun, including for civil, nautical and astronomical twilight
# note that times can be datetime objects, or the strings "always" (sun never sets) or "never" (sun never rises)
def day(self, body, date=None):
day = {}
if date is None:
date = datetime.datetime.utcnow()
here = ephem.Observer()
here.lat = self.lat
here.lon = self.lon
here.date = date
rising = local_transit('rising', body, here, date)
day['rising'] = format_date(body, here, rising)
day['noon'] = format_date(
body, here, local_transit('noon', self.sun, here, rising))
day['setting'] = format_date(
body, here, local_transit('setting', self.sun, here, rising))
# compute the set/rise times for twilights
twilights = {
'true': '0',
'civil': '-6',
'nautical': '-12',
'astronomical': '-18'
}
for name, degrees in twilights.items():
day[name] = {}
day[name]['rising'] = format_date(
body, here,
local_transit(
'rising', self.sun, here, date,
use_center=True, horizon=degrees))
day[name]['setting'] = format_date(
body, here,
local_transit(
'setting', self.sun, here, date,
use_center=True, horizon=degrees))
here.horizon = '0'
return day
if __name__ == "__main__":
a = Astro(lat='60:11:37', lon='-1:17:40')
print(a.solar_day())
print(a.lunar_phase())
print(a.transits())
print(a.locations())
print(a.solar_analemma())
```
#### File: wmirror/server/gcalendar.py
```python
from googleapiclient import discovery
import datetime
import json
with open("../secrets/keys.json") as f:
secrets = json.load(f)
def get_events(n=10):
build = discovery.build
service = build('calendar', 'v3', developerKey=secrets["calendar_api_key"])
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
events_result = service.events().list(calendarId=secrets["calendar_id"], timeMin=now,
maxResults=n, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
return {"events":events}
if __name__=="__main__":
print(get_events(20))
``` |
{
"source": "JohnHZheng/Athena2019",
"score": 2
} |
#### File: SandBox/CoachEric/Test.py
```python
from ev3dev2.motor import LargeMotor,MediumMotor, OUTPUT_D, OUTPUT_A, OUTPUT_C, OUTPUT_B, follow_for_ms
from ev3dev2.motor import SpeedDPS, SpeedRPM, SpeedRPS, SpeedDPM, MoveTank, MoveSteering, SpeedPercent
from ev3dev2.sound import Sound
from time import sleep
from ev3dev2.sensor.lego import ColorSensor
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
from ev3dev2.sound import Sound
import math
import sys
import time
LeftAction = MediumMotor(OUTPUT_A)
RightAction = MediumMotor(OUTPUT_D)
TankPair = MoveTank(OUTPUT_B, OUTPUT_C, motor_class=LargeMotor)
LeftSensor = ColorSensor(INPUT_1)
RightSensor = ColorSensor(INPUT_4)
LeftWheel = LargeMotor(OUTPUT_B)
RightWheel = LargeMotor(OUTPUT_C)
sound = Sound()
def Step1():
TankPair.on_for_seconds(SpeedDPS(-405),SpeedDPS(-400), 1.3,False,True)
TankPair.on_for_seconds(SpeedDPS(-255),SpeedDPS(-250), 1.2,False,True)
def Step2():
TankPair.on_for_degrees(SpeedDPS(250),SpeedDPS(250), 270,True,True)
def LineFollowing(Degree):
DegreeSum = 0
AngleOld = 360 * LeftWheel.position / LeftWheel.count_per_rot
while DegreeSum < Degree:
if LeftSensor.color == ColorSensor.COLOR_WHITE:
RightWheel.on(SpeedDPS(230))
LeftWheel.on(SpeedDPS(80))
else:
LeftWheel.on(SpeedDPS(230))
RightWheel.on(SpeedDPS(80))
#print("LeftSensor - color:{0}", LeftSensor.color_name, file=sys.stderr)
AngleNew = 360 * LeftWheel.position / LeftWheel.count_per_rot
DegreeSum = DegreeSum + AngleNew - AngleOld
AngleOld = AngleNew
LeftWheel.off()
RightWheel.off()
#TankPair.on_for_seconds(SpeedDPS(-250),SpeedDPS(-150), 1)
LineFollowing(1300)
#print("LeftWheelAngleOld:{0}, LeftWheelAngleNew:{2}", AngleOld, AngleNew, file=sys.stderr)
#def Step3():
#TankPair.on_for_degrees(SpeedDPS(0),SpeedDPS(-250),230 )
#def Step4():#TankPair.on_for_degrees(SpeedDPS(-250),SpeedDPS(250), 100,True,True)
#sound.beep()
#print("LeftSensor - reflected:{0}, color:{1}", LeftSensor.reflected_light_intensity, LeftSensor.color_name, file=sys.stderr)
#print("RightSensor - reflected:{0}, color:{1}", RightSensor.reflected_light_intensity, RightSensor.color_name, file=sys.stderr)
Step1()
Step2()
#Step3()
```
#### File: SandBox/EanZheng/LineDetect.py
```python
from ev3dev2.motor import LargeMotor, OUTPUT_C, OUTPUT_B, follow_for_ms
from ev3dev2.motor import SpeedDPS, SpeedRPM, SpeedRPS, SpeedDPM, MoveTank, MoveSteering, SpeedPercent
from time import sleep
from ev3dev2.sensor.lego import ColorSensor
from ev3dev2.sensor import INPUT_1, INPUT_2, INPUT_3, INPUT_4
import os
import sys
os.system('setfont Lat15-TerminusBold14')
lmB = LargeMotor(OUTPUT_C)
lmC = LargeMotor(OUTPUT_B)
cs1 = ColorSensor(INPUT_4)
cs4 = ColorSensor(INPUT_1)
def detect():
times = 0
notfoundyet = True
while notfoundyet:
if cs1.color == ColorSensor.COLOR_WHITE and cs4.color == ColorSensor.COLOR_WHITE:
if cs1.reflected_light_intensity > 80 and cs4.reflected_light_intensity > 80:
times = times+1
if times == 2:
notfoundyet = False
print( "CS1: {1:10}, CS4: {2:10}, CS1Reflected: {3:04d}, CS4Reflected: {3:04d}".format( cs1.color_name, cs4.color_name, cs1.reflected_light_intensity, cs4.reflected_light_intensity), file=sys.stderr)
print( "CS1: {1:10}, CS4: {2:10}, CS1Reflected: {3:04d}, CS4Reflected: {3:04d}".format(
cs1.color_name, cs4.color_name, cs1.reflected_light_intensity, cs4.reflected_light_intensity), file=sys.stderr)
lmB.on(60)
lmC.on(60)
detect()
lmB.off
lmC.off
#loopCounter = 0
#while (cs4.color != ColorSensor.COLOR_BLACK and cs1.color != ColorSensor.COLOR_BLACK):
""" while (loopCounter < 48):
print( "{0:02d} - CS1: {1:10}, CS4: {2:10}, CS1Reflected: {3:04d}, CS4Reflected: {3:04d}".format(
loopCounter, cs1.color_name, cs4.color_name, cs1.reflected_light_intensity, cs4.reflected_light_intensity), file=sys.stderr)
loopCounter += 1;
sleep(.005)
lmB.off()
lmC.off() """
``` |
{
"source": "johniblake/JavaTutor",
"score": 3
} |
#### File: python/example_02/person.py
```python
class Person(object):
def __init__(self,h):
self.height = h
def printHeight(self):
print "Height: ",self.height
def main():
guy = Person(5)
guy.printHeight()
if __name__ == "__main__":
main()
``` |
{
"source": "Johnicholas/EldritchCopy",
"score": 2
} |
#### File: Eldritch/Tools/bake.py
```python
import os
import sys
VERBOSE = True
REBUILD = False
# The first value in each entry is the description that will be
# shown to the user if they try an invalid ruleset.
BAKE_RULES = { 'all': ( 'Bake everything.', 'blender_meshes', 'fonts', 'font_textures', 'textures', 'shaders', 'meshes', 'config', 'audio', 'rooms', 'exe', 'etc' ),
'meshes': ( 'Export and bake Blender meshes.', 'blender_meshes', 'meshes' ),
'compilemeshes': ( 'Bake intermediate meshes.', 'meshes' ),
'fonts': ( 'Generate font textures and bake definitions.', 'fonts', 'font_textures' ),
'textures': ( 'Bake all textures.', 'textures', 'font_textures' ),
'shaders': ( 'Bake shaders.', 'shaders' ),
'config': ( 'Bake configuration files.', 'config' ),
'audio': ( 'Bake all audio.', 'audio' ),
'rooms': ( 'Bake room files.', 'rooms' ),
'exe': ( 'Bake executables.', 'exe' ),
'etc': ( 'Bake everything else.', 'etc' ),
'clean': ( 'Clean Intermediate and Baked folders.', 'clean' ) }
# Asset folders
RAW = '../Raw/'
BAKED = '../Baked/'
INTERMEDIATE = '../Intermediate/'
RAW_TEXTURES = RAW + 'Textures/'
INTER_TEXTURES = INTERMEDIATE + 'Textures/'
INTER_FONT_TEXTURES = INTER_TEXTURES + 'Fonts/'
RAW_MESHES = RAW + 'Meshes/'
INTER_MESHES = INTERMEDIATE + 'Meshes/'
FONTS = RAW + 'Fonts/'
SHADERS = RAW + 'Shaders/'
CONFIG = RAW + 'Config/'
AUDIO = RAW + 'Audio/'
ROOMS = RAW + 'Rooms/'
MISC = RAW + 'Misc/'
# Tools folders
CYGWIN_BIN = 'C:/cygwin/bin/'
TOOLS_DIR = './'
BLENDER_DIR = 'C:/Program Files (x86)/Blender Foundation/Blender/'
DXUTIL_DIR = 'C:/Program Files (x86)/Microsoft DirectX SDK (June 2008)/Utilities/Bin/x86/'
# Tools
COPY = CYGWIN_BIN + 'cp'
MOVE = CYGWIN_BIN + 'mv'
MESH_COMPILER = TOOLS_DIR + 'MeshCompiler.exe'
CONFIG_COMPILER = TOOLS_DIR + 'ConfigCompiler.exe'
FONT_GENERATOR = TOOLS_DIR + 'FontGenerator.exe'
MESH_EXPORT = TOOLS_DIR + 'xmlmeshexport.py'
BLENDER = BLENDER_DIR + 'blender.exe'
DXTEX = TOOLS_DIR + 'DxTex.exe' # Custom version to do normal hack
FXC = TOOLS_DIR + 'fxc.exe' # Moved to work around Program Files weirdness or something
NUMBAKED = 0
NUMSKIPPED = 0
#-----------------------------------------------------
def bake( rules ):
if 'blender_meshes' in rules:
blenddir( MESH_EXPORT, RAW_MESHES, RAW, INTERMEDIATE, '.mesh' )
if 'fonts' in rules:
generatefonts( FONT_GENERATOR, FONTS )
if 'textures' in rules:
converttextures( DXTEX, RAW_TEXTURES, RAW, BAKED );
bakedir( COPY, RAW_TEXTURES, RAW, BAKED, '.bmp', '.bmp' )
if 'font_textures' in rules:
converttextures( DXTEX, INTER_TEXTURES, INTERMEDIATE, BAKED );
if 'shaders' in rules:
compileshaders( FXC, COPY, SHADERS );
if 'config' in rules:
bakedir( CONFIG_COMPILER, CONFIG, RAW, BAKED, '.config', '.ccf' )
bakedir( CONFIG_COMPILER, CONFIG, RAW, BAKED, '.syncerconfig', '.pcf' )
bakedir( COPY, CONFIG, RAW, BAKED, '.cfg', '.cfg' )
if 'meshes' in rules:
bakedir( MESH_COMPILER, INTER_MESHES, INTERMEDIATE, BAKED, '.mesh', '.cms' )
if 'audio' in rules:
bakedir( COPY, AUDIO, RAW, BAKED, '.mp3', '.mp3' )
bakedir( COPY, AUDIO, RAW, BAKED, '.ogg', '.ogg' )
bakedir( COPY, AUDIO, RAW, BAKED, '.wav', '.wav' )
if 'rooms' in rules:
bakedir( COPY, ROOMS, RAW, BAKED, '.eldritchroom', '.eldritchroom' )
if 'exe' in rules:
bakedir( COPY, RAW, RAW, BAKED, '.exe', '.exe', False )
bakedir( COPY, RAW, RAW, BAKED, '.dll', '.dll', False )
if 'etc' in rules:
bakedir( COPY, RAW, RAW, BAKED, '.txt', '.txt', False )
bakedir( COPY, RAW, RAW, BAKED, '.html', '.html', False )
bakedir( COPY, MISC, RAW, BAKED, '.bmp', '.bmp', False )
#-----------------------------------------------------
def runtool( args ):
if VERBOSE:
for arg in args:
print arg,
print
os.spawnv( os.P_WAIT, args[0], args )
#-----------------------------------------------------
# Timestamp checking for simple improvement on build time
def shouldbuild( rawfile, bakedfiles ):
global NUMBAKED
global NUMSKIPPED
if REBUILD:
NUMBAKED = NUMBAKED + 1
return True
rawstat = os.stat( rawfile )
for bakedfile in bakedfiles:
bakedfileexists = os.access( bakedfile, os.F_OK )
if bakedfileexists:
bakedstat = os.stat( bakedfile )
if bakedstat.st_mtime < rawstat.st_mtime:
NUMBAKED = NUMBAKED + 1
return True
else:
NUMBAKED = NUMBAKED + 1
return True
if VERBOSE:
print 'Skipping ' + rawfile
NUMSKIPPED = NUMSKIPPED + 1
return False
#-----------------------------------------------------
# If rawext is specified, only files matching that extension are baked
# If rawext isn't specified, all files in the folder are baked
# This will recurse into any subfolders of the given path
def bakedir( tool, rawdir, rawpath, bakedpath, rawext = '', bakedext = '', recursive = True ):
for path, dirs, files in os.walk( rawdir ):
if '.svn' in dirs:
dirs.remove( '.svn' ) # Ignore SVN folders
if not recursive:
del dirs[:] # Empty the list so we just examine this path
if not os.path.exists( path.replace( rawpath, bakedpath ) ):
os.mkdir( path.replace( rawpath, bakedpath ) )
for file in files:
if( ( not rawext ) or ( rawext in file ) ):
infile = os.path.join( path, file )
outfile = infile.replace( rawpath, bakedpath ).replace( rawext, bakedext )
if shouldbuild( infile, [ outfile ] ):
runtool( [ tool, infile, outfile ] )
#-----------------------------------------------------
# Create font property and image files
def generatefonts( tool, rawdir ):
for path, dirs, files in os.walk( rawdir ):
if '.svn' in dirs:
dirs.remove( '.svn' ) # Ignore SVN folders
if not os.path.exists( path.replace( FONTS, INTER_FONT_TEXTURES ) ):
os.mkdir( path.replace( FONTS, INTER_FONT_TEXTURES ) )
if not os.path.exists( path.replace( RAW, BAKED ) ):
os.mkdir( path.replace( RAW, BAKED ) )
for file in files:
if( '.font' in file ):
infile = os.path.join( path, file )
outfontfile = infile.replace( RAW, BAKED ).replace( '.font', '.fnp' )
outimagefile = infile.replace( FONTS, INTER_FONT_TEXTURES ).replace( '.font', '.tga' )
# Don't test shouldbuild(outimagefile), because it's unnecessary and the name will be modified by FontGenerator
if shouldbuild( infile, [ outfontfile ] ):
runtool( [ tool, infile, outfontfile, outimagefile ] )
#-----------------------------------------------------
# Create font property and image files
def converttextures( tool, rawdir, rawpath, bakedpath ):
for path, dirs, files in os.walk( rawdir ):
if '.svn' in dirs:
dirs.remove( '.svn' ) # Ignore SVN folders
if not os.path.exists( path.replace( rawpath, bakedpath ) ):
os.mkdir( path.replace( rawpath, bakedpath ) )
for file in files:
if( '.tga' in file ):
if( '_NODXT' in file ):
infile = os.path.join( path, file )
outfile = infile.replace( rawpath, bakedpath )
if shouldbuild( infile, [ outfile ] ):
runtool( [ COPY, infile, outfile ] )
else:
compression = 'DXT1'
normalhack = ''
if( '_DXT2' in file ):
compression = 'DXT2'
if( '_DXT3' in file ):
compression = 'DXT3'
if( '_DXT4' in file ):
compression = 'DXT4'
if( '_DXT5' in file ):
compression = 'DXT5'
if( '_NORMAL' in file ):
compression = 'DXT5'
normalhack = '-n'
if( '_SPEC' in file ):
compression = 'DXT5'
infile = os.path.join( path, file )
outfile = infile.replace( rawpath, bakedpath ).replace( '.tga', '.dds' )
if shouldbuild( infile, [ outfile ] ):
runtool( [ tool, infile, '-m', normalhack, compression, outfile ] )
#-----------------------------------------------------
# Create font property and image files
def compileshaders( compiler, copy, rawdir ):
for path, dirs, files in os.walk( rawdir ):
if '.svn' in dirs:
dirs.remove( '.svn' ) # Ignore SVN folders
if not os.path.exists( path.replace( RAW, BAKED ) ):
os.mkdir( path.replace( RAW, BAKED ) )
for file in files:
if( file.endswith( '.fx' ) ):
infile = os.path.join( path, file )
outfile = infile.replace( RAW, BAKED ).replace( '.fx', '.cfx' )
if shouldbuild( infile, [ outfile ] ):
runtool( [ compiler, '/T', 'fx_2_0', '/Fo', outfile, infile ] )
elif( file.endswith( '.hlsl_vs2' ) ):
infile = os.path.join( path, file )
outfile = infile.replace( RAW, BAKED ).replace( '.hlsl_vs2', '.chv2' )
if shouldbuild( infile, [ outfile ] ):
runtool( [ compiler, '/T', 'vs_2_0', '/E', 'Main', '/Fo', outfile, infile ] )
elif( file.endswith( '.hlsl_ps2' ) ):
infile = os.path.join( path, file )
outfile = infile.replace( RAW, BAKED ).replace( '.hlsl_ps2', '.chp2' )
if shouldbuild( infile, [ outfile ] ):
runtool( [ compiler, '/T', 'ps_2_0', '/E', 'Main', '/Fo', outfile, infile ] )
elif( file.endswith( '.glsl_vs120' ) ):
infile = os.path.join( path, file )
outfile = infile.replace( RAW, BAKED ).replace( '.glsl_vs120', '.gv12' )
if shouldbuild( infile, [ outfile ] ):
runtool( [ copy, infile, outfile ] )
elif( file.endswith( '.glsl_fs120' ) ):
infile = os.path.join( path, file )
outfile = infile.replace( RAW, BAKED ).replace( '.glsl_fs120', '.gf12' )
if shouldbuild( infile, [ outfile ] ):
runtool( [ copy, infile, outfile ] )
#-----------------------------------------------------
# Call Blender in background mode with a given script for each blend file
def blenddir( script, sourcedir, rawpath, bakedpath, bakedext ):
for path, dirs, files in os.walk( sourcedir ):
if '.svn' in dirs:
dirs.remove( '.svn' ) # Ignore SVN folders
if not os.path.exists( path.replace( rawpath, bakedpath ) ):
os.mkdir( path.replace( rawpath, bakedpath ) )
for file in files:
if( file.endswith('.blend') ): # Cull .blend1, etc.
infile = os.path.join( path, file )
exportedfile = infile.replace( '.blend', bakedext )
outfile = exportedfile.replace( rawpath, bakedpath )
# Because I can't pass another parameter to Blender, I have to assume the name and move it
if shouldbuild( infile, [ outfile ] ):
runtool( [ BLENDER, '-b', infile, '-P', script ] )
runtool( [ MOVE, '-f', exportedfile, outfile ] )
#-----------------------------------------------------
# Recursively delete a directory
def clean( cleanpath ):
if os.path.exists( cleanpath ):
for path, dirs, files in os.walk( cleanpath, False ):
for file in files:
os.remove( path + '/' + file )
os.rmdir( path )
#-----------------------------------------------------
# Ensure a directory exists by making it if it does not
def makeifnotexists( path ):
while not os.path.exists( path ):
try:
os.mkdir( path )
except:
pass
#-----------------------------------------------------
# Entry point
#-----------------------------------------------------
# TODO: Add exception handling (that also catches bad tool exit codes)
# NOTE: Python uses try/except/raise instead of try/catch/throw
# Ruleset must be the first parameter, if it's used at all
if len( sys.argv ) > 1:
if sys.argv[1] in BAKE_RULES:
ruleset = sys.argv[1]
else:
ruleset = None
else:
ruleset = 'all'
for arg in sys.argv:
if arg == '-r':
REBUILD = True
if not ruleset:
print 'Specified ruleset not found. Valid rulesets are:'
for k, v in BAKE_RULES.iteritems():
print k + ': ' + v[0]
else:
rules = BAKE_RULES[ ruleset ]
# Only clean the Intermediate/Baked folders if we're rebuilding all
if( ( ruleset == 'all' and REBUILD ) or 'clean' in rules ):
print 'Deleting Intermediate and Baked folders...'
clean( INTERMEDIATE )
clean( BAKED )
makeifnotexists( INTERMEDIATE )
makeifnotexists( BAKED )
print 'Baking assets...'
try:
bake( rules )
except:
print 'Exception while baking.'
sys.exit(1)
print 'Baking done!'
print 'Baked %d files, skipped %d files.' % ( NUMBAKED, NUMSKIPPED )
print 'Total %d files processed.' % ( NUMBAKED + NUMSKIPPED )
```
#### File: Eldritch/Tools/xmlmeshexport.py
```python
__bpydoc__ = """\
Raw XML mesh output for Engine meshes.
"""
# Bones names must be less than 16 characters (my limit)
# Animation names must be less than 32 characters (my limit)
# Use a frame marker named Event_ to indicate an animation event at the start of that frame (NOT YET IMPLEMENTED)
# Use a frame marker named Loop to indicate a loop start point (jumped to when anim ends) (NOT YET IMPLEMENTED)
import sys
import Blender
from Blender import Mesh, Armature, Modifier, Mathutils
from Blender.Mathutils import *
import BPyMesh
# ========================
# === Write XML Format ===
# ========================
def write(filename):
start = Blender.sys.time()
if not filename.lower().endswith('.mesh'):
filename += '.mesh'
scn = Blender.Scene.GetCurrent()
object = scn.objects.active
if not object:
Blender.Draw.PupMenu('Error%t|Select 1 active object')
return
if object.getType() != 'Mesh':
Blender.Draw.PupMenu('Error%t|Active object is not a mesh')
return
mesh = object.getData(False,True)
if not mesh:
Blender.Draw.PupMenu('Error%t|Could not get mesh data from active object')
return
mesh.transform(object.matrixWorld, True)
# Try to get the armature (and the armature object, which
# I need to get the pose) through the modifier stack
armObj = None
for m in object.modifiers:
if m.type == Modifier.Types['ARMATURE']:
armObj = m[Modifier.Settings['OBJECT']] # Armature.Get(m.name)
break
arm = None
if not armObj:
# If armature is not applied as a modifier, try to get it as the object's parent
print 'No armature modifier.'
armObj = object.parent
if not armObj:
print 'No parent object.'
elif armObj.type != 'Armature':
print 'Parent object is not an armature.'
armObj = None
else:
print 'Armature found from parent: OB:', armObj.name
arm = armObj.getData(False,False)
else:
print 'Armature found from modifier: OB:', armObj.name
arm = armObj.getData(False,False)
bones = None
if arm:
bones = arm.bones.values()
if not arm.vertexGroups:
print 'Warning: Armature is not using vertex groups.'
vgNames = mesh.getVertGroupNames()
vgroups = [mesh.getVertsFromGroup(vg,1) for vg in vgNames] # Python is awesome!
file = open(filename, "wb")
file.write('<mesh name="%s">\n' % object.name)
# Output all vertices in all faces--this is redundant, but that's okay,
# because Blender doesn't have index lists, so I have to do a pass to
# produce those anyway, and I can cull duplicate vertices then (and if
# I just iterate on the verts, I only get one UV per vert).
for face in mesh.faces:
file.write('\t<face>\n')
idx = 0;
for vert in face.verts:
file.write('\t\t<vert>\n')
file.write('\t\t\t<pos x="%.6f" y="%.6f" z="%.6f" />\n' % tuple(vert.co))
if mesh.faceUV:
file.write('\t\t\t<uv x="%.6f" y="%.6f" />\n' % tuple(face.uv[idx]))
elif mesh.vertexUV:
file.write('\t\t\t<uv x="%.6f" y="%.6f" />\n' % tuple(vert.uvco))
file.write('\t\t\t<norm x="%.6f" y="%.6f" z="%.6f" />\n' % tuple(vert.no))
# Bones--this will get expensive
vgiter = 0
numbones = 0
for vg in vgroups:
for (vgi,vgw) in vg:
if vgi == vert.index:
numbones = numbones + 1
if numbones > 4:
print 'Warning: More than 4 bones applied on a vertex (%s)' % vgNames[vgiter]
else:
file.write('\t\t\t<bone name="%s" wgt="%.6f" />\n' % (vgNames[vgiter], vgw))
vgiter = vgiter + 1
file.write('\t\t</vert>\n')
idx = idx + 1
file.write('\t</face>\n')
if bones:
curframe = Blender.Get('curframe')
file.write('\t<armature frames="%d">\n' % (Blender.Get('endframe')-Blender.Get('staframe')+1))
for bone in bones:
if not bone.hasParent(): # Start with the root bone(s) and recurse
writeBone(file,bone,armObj,True)
# Write keyframe marker names
# (When animating, use markers in the timeline to set
# keyframes at the start of each separate animation in the file
timeline = Blender.Scene.GetCurrent().getTimeLine();
if timeline:
for f in range(Blender.Get('staframe'),Blender.Get('endframe')+1):
if(timeline.getMarked(f)):
file.write('\t\t<anim frame="%d" name="%s" />\n' % ( f, timeline.getName(f) ))
file.write('\t</armature>\n')
Blender.Set('curframe',curframe)
# Material getter stuff
for mat in mesh.materials:
if mat:
for mtex in mat.getTextures():
if mtex and mtex.tex and mtex.tex.image:
imgtype = 'Diffuse'
if mtex.tex.normalMap: # Could get bunches of other information, just depends on what I need
imgtype = 'Normal'
file.write('\t<material type="%s" file="%s" />\n' % ( imgtype, mtex.tex.image.filename ))
file.write('</mesh>\n')
mesh.transform(object.getInverseMatrix(), True)
file.close()
end = Blender.sys.time()
message = 'Successfully exported "%s" in %.4f seconds' % ( Blender.sys.basename(filename), end-start)
print message
def writeBone(file,bone,armObj,recurse):
pose = armObj.getPose()
file.write('\t\t<bonedef name="%s">\n' % bone.name)
flipMat = Matrix([1,0,0,0],[0,0,-1,0],[0,1,0,0],[0,0,0,1])
# Write each frame
for f in range(Blender.Get('staframe'),Blender.Get('endframe')+1):
Blender.Set('curframe',f)
posebone = pose.bones[bone.name]
poseMatrix = armObj.matrixWorld.copy().invert() * posebone.localMatrix * armObj.matrixWorld
file.write('\t\t\t<frame num="%d"\n' % f)
file.write('\t\t\t\tm00="%.6f" m01="%.6f" m02="%.6f" m03="%.6f"\n' % tuple(poseMatrix[0]))
file.write('\t\t\t\tm10="%.6f" m11="%.6f" m12="%.6f" m13="%.6f"\n' % tuple(poseMatrix[1]))
file.write('\t\t\t\tm20="%.6f" m21="%.6f" m22="%.6f" m23="%.6f"\n' % tuple(poseMatrix[2]))
file.write('\t\t\t\tm30="%.6f" m31="%.6f" m32="%.6f" m33="%.6f"\n' % tuple(poseMatrix[3]))
file.write('\t\t\t/>\n')
# Skip bone weight for now, it doesn't seem to affect anything
file.write('\t\t</bonedef>\n')
if recurse and bone.hasChildren():
for child in bone.getAllChildren():
writeBone(file,child,armObj,False)
def main():
if Blender.mode == 'interactive':
Blender.Window.FileSelector(write, 'XML Mesh Export', Blender.sys.makename(ext='.mesh'))
else:
# Find the blend file argument
for arg in sys.argv:
if '.blend' in arg:
write(Blender.sys.makename(arg, '.mesh'))
if __name__=='__main__':
main()
``` |
{
"source": "johnidm/yt-lb-audio",
"score": 3
} |
#### File: johnidm/yt-lb-audio/main.py
```python
import webvtt
import uuid
from pydub import AudioSegment
from datetime import datetime
import os
import glob
import shutil
import sys
from unidecode import unidecode
import string
if len(sys.argv) != 2:
print("")
print("Subtitle language is required!")
print("Usage: 'python main.py <subtitle lang>'")
print("")
exit(1)
def to_sec(t):
"""
Convert canonical time format to milliseconds
Example:
- 00:00:10.930 to 10930
"""
td = datetime.strptime(t, '%H:%M:%S.%f') - datetime(1900, 1, 1)
return int(td.total_seconds() * 1000)
def text_cleaner(text):
"""
Clean up the source text
Steps:
- remove accents
- remove punctuation
- remove newlines and double spaces
- convert to lowercase
"""
text = unidecode(text)
text = text.translate(str.maketrans('', '', string.punctuation))
text = " ".join(text.split())
text = text.lower()
return text
directory = 'corpus'
lang = sys.argv[1]
if os.path.exists(directory):
answer = None
while answer not in ("y", "n", "Y", "N"):
answer = input(f"Do you wish to remove {directory} folder [y/n] ? ")
if answer in ("y", "Y"):
shutil.rmtree(directory)
elif answer in ("N", "n"):
break
else:
print("Please answer [y] or [n].")
else:
os.makedirs(directory)
files = glob.glob(f'downloads/*.{lang}.wav')
for audio_file in files:
root_file, _ = os.path.splitext(audio_file)
subtitle_file = f"{root_file}.vtt"
audio = AudioSegment.from_wav(audio_file)
basename = f"{directory}/{str(uuid.uuid4())}"
os.makedirs(basename)
for index, caption in enumerate(webvtt.read(subtitle_file), start=1):
basename_file = f"{basename}/{str(index)}"
part_audio_filename = f"{basename_file}.wav"
part_sub_filename = f"{basename_file}.txt"
t1 = to_sec(caption.start)
t2 = to_sec(caption.end)
part_audio = audio[t1:t2]
part_audio.export(part_audio_filename, format="wav")
with open(part_sub_filename, 'w') as f:
f.write(text_cleaner(caption.text))
print(f"Done! The labeled audio datasets are avaliable in {directory} folder.")
``` |
{
"source": "JohnieBraaf/Boat-Controller-Micropython-LVGL",
"score": 3
} |
#### File: JohnieBraaf/Boat-Controller-Micropython-LVGL/buffer.py
```python
class RingBuffer:
def __init__(self, size):
self.size = size + 1
self.data = bytearray(self.size)
self.index_put = 0
self.index_get = 0
self.count = 0
@micropython.native
def any(self):
if self.index_get != self.index_put:
return True
return False
@micropython.native
def put(self, value):
next_index = (self.index_put + 1) % self.size
if self.index_get != next_index:
self.data[self.index_put] = value
self.index_put = next_index
self.count += 1
return value
else:
return 0x00 # buffer full
@micropython.native
def get(self):
if self.any():
value = self.data[self.index_get]
self.index_get = (self.index_get + 1) % self.size
self.count -= 1
return value
else:
return 0x00 # buffer empty
#
# TextBuffer
#
# - size: number of bytes allocated to text
# - lines_max: maximum number of lines to keep in buffer (0xd new line delimited)
# - lines_trim: number of lines to trim if lines_max is reached
#
# Adds index_read to the ringbuffer, and dirty_read to flag if the reader is in sync
# get_text() resets the dirty_read flag, after which read_text() can be used for subsequent reads
#
class TextBuffer(RingBuffer):
def __init__(self, size, lines_max, lines_trim):
super().__init__(size)
self.lines_max = lines_max
self.lines_trim = lines_trim
self.lines_count = 0
self.index_read = 0
self.dirty_read = False
@micropython.native
def put(self, value):
if self.index_get == (self.index_put + 1) % self.size: # buffer full,
self.get_line() # pop line from buffer
super().put(value)
if value == 0xd:
self.lines_count += 1
if self.lines_count > self.lines_max: # too many lines
self.get_line(self.lines_trim) # pop number of lines from buffer
@micropython.native
def get_line(self, num=1, peek=False):
old_index = self.index_get # save index
old_count = self.count # save count
ret = ''
for i in range(num):
while self.any():
c = self.get()
if c == 0xd:
break
ret += str(chr(c)) + str(hex(c))
if peek:
self.index_get = old_index # reset index
self.count = old_count # reset count
else:
self.lines_count -= num
self.dirty_read = True
return ret
@micropython.native
def get_text(self, peek=False):
old_index = self.index_get # save index
old_count = self.count # save count
ret = ''
while self.any():
ret += str(chr(self.get()))
if peek:
self.index_get = old_index # reset index
self.count = old_count # reset count
self.dirty_read = False
return ret
@micropython.native
def any_read(self):
if self.index_read != self.index_put:
return True
return False
@micropython.native
def read(self):
if self.any_read():
value = self.data[self.index_read]
self.index_read = (self.index_read + 1) % self.size
return value
else:
return None # buffer empty
@micropython.native
def read_text(self):
ret = ''
while self.any_read():
ret += str(chr(self.read()))
return ret
```
#### File: JohnieBraaf/Boat-Controller-Micropython-LVGL/console.py
```python
import io
import lvgl as lv
from buffer import TextBuffer
class REPL(io.IOBase):
def __init__(self, console):
self.console = console
self.buf = TextBuffer(1000, 15, 1)
@micropython.native
def readinto(self, buf, nbytes=0):
return None
@micropython.native
def write(self, buf):
i = 0
while i < len(buf):
c = buf[i]
if c == 0x1b: # remove escape chars
i += 1
while chr(buf[i]) in '[;0123456789':
i += 1
#c = buf[i]
#if c != 0x4b and c != 0x4:
# self.console.add_text(hex(c))
else:
if c == 0x8: # backspace
self.console.del_char()
elif c != 0xa: # normal character
self.buf.put(c)
i += 1
# print directly to console
if self.buf.dirty_read:
self.console.set_text(self.buf.get_text(True))
self.buf.read_text() # flag all as read
self.console.add_text(self.buf.read_text())
return len(buf)
```
#### File: JohnieBraaf/Boat-Controller-Micropython-LVGL/page_dashboard.py
```python
import machine
import lvgl as lv
#from lv_colors import lv_colors
from style import ColorStyle, ShadowStyle
class Page_Dashboard:
def __init__(self, app, page):
self.app = app
self.page = page
self.test_events = []
#self.page.set_flex_flow(lv.FLEX_FLOW.COLUMN)
#self.page.set_flex_align(lv.FLEX_ALIGN.SPACE_EVENLY, lv.FLEX_ALIGN.CENTER, lv.FLEX_ALIGN.CENTER)
# create an arc
self.arc = lv.arc(page)
self.arc.set_size(200,200)
self.arc.align(lv.ALIGN.TOP_LEFT, 0, 0)
self.arc.set_rotation(180)
self.arc.set_range(-40, 40)
self.arc.set_end_angle(270)
self.arc.set_bg_angles(180, 360)
self.arc.set_start_angle(180)
style = lv.style_t()
#lv.style_copy(style, lv.style_plain)
#style.line.color = lv.color_make(0,0,255) # Arc color
#style.line.width = 8 # Arc width
#self.arc.set_style(lv.arc.STYLE.MAIN, style) # Use the new style
# counter button
self.reset_btn = lv.btn(page)
self.reset_btn.set_size(50,50)
self.reset_btn.align(lv.ALIGN.TOP_RIGHT, 10, 10)
self.reset_btn.add_event_cb(self.on_reset_btn, lv.EVENT.CLICKED, None)
self.reset_label = lv.label(self.reset_btn)
self.reset_label.set_text("Reset")
self.reset_label.align(lv.ALIGN.CENTER, 0, 0)
self.meter = lv.meter(page)
self.meter.set_size(200, 200)
self.meter.align(lv.ALIGN.TOP_RIGHT, -40, 15)
self.scale_ticks = self.meter.add_scale()
self.meter.set_scale_ticks(self.scale_ticks, 10, 2, 3, lv.palette_main(lv.PALETTE.GREY))
self.meter.set_scale_range(self.scale_ticks, 0, 100, 100, 0)
self.scale_major_ticks = self.meter.add_scale()
self.meter.set_scale_major_ticks(self.scale_major_ticks, 42, 2, 10, lv.palette_main(lv.PALETTE.GREY), 2)
indic = self.meter.add_needle_line(self.scale_ticks, 5, lv.palette_main(lv.PALETTE.BLUE), 10)
self.meter.set_indicator_value(indic, 50)
# slider
#self.slider = lv.slider(page)
#self.slider.set_width(lv.pct(80))
#self.slider_label = lv.label(page)
#self.slider.add_event_cb(self.on_slider_changed, lv.EVENT.VALUE_CHANGED, None)
#self.on_slider_changed(None)
# style selector
#self.styles = [('Gray', ColorStyle(0xCCC)),
# ('Red', ColorStyle(0xF00)),
# ('Green',ColorStyle(0x0F0)),
# ('Blue', ColorStyle(0x00F))]
#self.style_selector = lv.dropdown(page)
#self.style_selector.add_style(ShadowStyle(), lv.PART.MAIN)
#self.style_selector.align(lv.ALIGN.OUT_BOTTOM_LEFT, 0, 40)
#self.style_selector.set_options('\n'.join(x[0] for x in self.styles))
#self.style_selector.add_event_cb(self.on_style_selector_changed, lv.EVENT.VALUE_CHANGED, None)
# counter button
#self.counter_btn = lv.btn(page)
#self.counter_btn.set_size(80,80)
#self.counter_label = lv.label(self.counter_btn)
#self.counter_label.set_text("Count")
#self.counter_label.align(lv.ALIGN.CENTER, 0, 0)
#self.counter_btn.add_event_cb(self.on_counter_btn, lv.EVENT.CLICKED, None)
#self.counter = 0
#def on_slider_changed(self, event):
# self.slider_label.set_text(str(self.slider.get_value()))
def on_reset_btn(self, event):
machine.reset()
def on_style_selector_changed(self, event):
selected = self.style_selector.get_selected()
tabview = self.app.screen_main.tabview
if hasattr(self, 'selected_style'): tabview.remove_style(self.selected_style, lv.PART.MAIN)
self.selected_style = self.styles[selected][1]
tabview.add_style(self.selected_style, lv.PART.MAIN)
def on_counter_btn(self, event):
self.counter += 1
self.counter_label.set_text(str(self.counter))
```
#### File: JohnieBraaf/Boat-Controller-Micropython-LVGL/repl.py
```python
import subprocess
subprocess.Popen("plink -serial \\.\COM7 -sercfg 115200,8,n,1,N", shell=False, stdin=subprocess.PIPE)
class REPL():
def __init__(self):
self.com_port = 'COM7'
``` |
{
"source": "JohnieBraaf/django-river",
"score": 2
} |
#### File: django-river/features/environment.py
```python
import os
import django
from behave import register_type
from django.core import management
os.environ["DJANGO_SETTINGS_MODULE"] = "settings.with_sqlite3"
def before_all(context):
django.setup()
def before_scenario(context, scenario):
management.call_command('flush', interactive=False)
def parse_string_with_whitespace(text):
return text
def parse_list(text):
return [better_item.strip() for item in text.split(" or ") for better_item in item.split(" and ")]
# -- REGISTER: User-defined type converter (parse_type).
register_type(ws=parse_string_with_whitespace)
register_type(list=parse_list)
```
#### File: django-river/river/apps.py
```python
import logging
import operator
from functools import reduce
from django.apps import AppConfig
from django.db.utils import OperationalError, ProgrammingError
LOGGER = logging.getLogger(__name__)
class RiverApp(AppConfig):
name = 'river'
label = 'river'
def ready(self):
for field_name in self._get_all_workflow_fields():
try:
workflows = self.get_model('Workflow').objects.filter(field_name=field_name)
if workflows.count() == 0:
LOGGER.warning("%s field doesn't seem have any workflow defined in database. You should create its workflow" % field_name)
except (OperationalError, ProgrammingError):
pass
from river.config import app_config
if app_config.INJECT_MODEL_ADMIN:
for model_class in self._get_all_workflow_classes():
self._register_hook_inlines(model_class)
LOGGER.debug('RiverApp is loaded.')
@classmethod
def _get_all_workflow_fields(cls):
from river.core.workflowregistry import workflow_registry
return reduce(operator.concat, map(list, workflow_registry.workflows.values()), [])
@classmethod
def _get_all_workflow_classes(cls):
from river.core.workflowregistry import workflow_registry
return list(workflow_registry.class_index.values())
@classmethod
def _get_workflow_class_fields(cls, model):
from river.core.workflowregistry import workflow_registry
return workflow_registry.workflows[id(model)]
def _register_hook_inlines(self, model): # pylint: disable=no-self-use
from django.contrib import admin
from river.core.workflowregistry import workflow_registry
from river.admin import OnApprovedHookInline, OnTransitHookInline, OnCompleteHookInline, DefaultWorkflowModelAdmin
registered_admin = admin.site._registry.get(model, None)
if registered_admin:
if OnApprovedHookInline not in registered_admin.inlines:
registered_admin.inlines = list(set(registered_admin.inlines + [OnApprovedHookInline, OnTransitHookInline, OnCompleteHookInline]))
registered_admin.readonly_fields = list(set(list(registered_admin.readonly_fields) + list(workflow_registry.get_class_fields(model))))
admin.site._registry[model] = registered_admin
else:
admin.site.register(model, DefaultWorkflowModelAdmin)
```
#### File: river/models/transitionmeta.py
```python
from __future__ import unicode_literals
from django.db import models
from django.db.models import PROTECT
from django.utils.translation import ugettext_lazy as _
from river.models import State, Workflow
from river.models.base_model import BaseModel
class TransitionMeta(BaseModel):
class Meta:
app_label = 'river'
verbose_name = _("Transition Meta")
verbose_name_plural = _("Transition Meta")
unique_together = [('workflow', 'source_state', 'destination_state')]
workflow = models.ForeignKey(Workflow, verbose_name=_("Workflow"), related_name='transition_metas', on_delete=PROTECT)
source_state = models.ForeignKey(State, verbose_name=_("Source State"), related_name='transition_meta_as_source', on_delete=PROTECT)
destination_state = models.ForeignKey(State, verbose_name=_("Destination State"), related_name='transition_meta_as_destination', on_delete=PROTECT)
def __str__(self):
return 'Field Name:%s, %s -> %s' % (
self.workflow,
self.source_state,
self.destination_state
)
```
#### File: river/models/workflow.py
```python
from django.db import models
from django.db.models import PROTECT
from django.utils.translation import ugettext_lazy as _
from river.config import app_config
from river.models import BaseModel, State
from river.models.managers.workflowmetada import WorkflowManager
class Workflow(BaseModel):
class Meta:
app_label = 'river'
verbose_name = _("Workflow")
verbose_name_plural = _("Workflows")
unique_together = [("content_type", "field_name")]
objects = WorkflowManager()
content_type = models.ForeignKey(app_config.CONTENT_TYPE_CLASS, verbose_name=_('Content Type'), on_delete=PROTECT)
field_name = models.CharField(_("Field Name"), max_length=200)
initial_state = models.ForeignKey(State, verbose_name=_("Initial State"), related_name='workflow_this_set_as_initial_state', on_delete=PROTECT)
def natural_key(self):
return self.content_type, self.field_name
def __str__(self):
return "%s.%s" % (self.content_type.model, self.field_name)
```
#### File: django-river/settings/base.py
```python
import os
import sys
import django
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
USE_TZ = True
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'behave_django',
'codemirror2',
'river',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
STATIC_URL = '/static/'
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
TESTING = any("py.test" in s for s in sys.argv) or 'test' in sys.argv
# TESTING = True
SITE_ID = 1
SECRET_KEY = '<KEY>'
ROOT_URLCONF = 'test_urls'
RIVER_INJECT_MODEL_ADMIN = True
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '(%(module)s) (%(name)s) (%(asctime)s) (%(levelname)s) %(message)s',
'datefmt': "%Y-%b-%d %H:%M:%S"
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default',
}
},
'loggers': {
'river': {
'handlers': ['console'],
'level': 'DEBUG'
}
}
}
``` |
{
"source": "JohnieBraaf/lv_binding_micropython",
"score": 3
} |
#### File: lv_binding_micropython/lib/utils.py
```python
import lvgl as lv
import ustruct
import uctypes
# Calculate pointer size on current machine, and corresponding fmt
ptr_size = uctypes.sizeof({'p': (uctypes.PTR, uctypes.VOID)})
fmt_options = {2:'H', 4:'L', 8:'Q'}
buf_fmt = fmt_options[ptr_size] if ptr_size in fmt_options else None
def aligned_buf(buf, alignment):
"""Return an aligned buffer
Given a buffer, return a memory view within that buffer, which starts
at an aligned address in RAM.
The returned memory view is possibly smaller.
!! You must keep a reference to the original buffer to prevent the
garbage collector from collecting the aligned view!
Arguments:
buf -- An object that implements buffer protocol
alignment -- Integer value
"""
p = lv.C_Pointer()
p.ptr_val = buf
if not buf_fmt: return None
addr = ustruct.unpack(buf_fmt, p.ptr_val)[0]
mod = addr % alignment
offset = alignment - mod if mod != 0 else 0
if len(buf) <= offset: return None
addr += offset
p = lv.C_Pointer.__cast__(ustruct.pack(buf_fmt, addr))
return p.ptr_val.__dereference__(len(buf) - offset)
``` |
{
"source": "JohnieBraaf/Volvo-Media-Controller-Micropython",
"score": 3
} |
#### File: Volvo-Media-Controller-Micropython/pyboard/buf.py
```python
class FrameBuffer(object):
def __init__(self, size):
self.size = size
self.overflow = 0
self.index_put = 0
self.index_get = 0
self.next_index = 0
# dummy frames to pass as reference when buffer is full or empty
self.empty_get_bytes = bytearray(8)
self.empty_get_frame = [0, 0, 0, memoryview(self.empty_get_bytes)]
self.empty_put_bytes = bytearray(8)
self.empty_put_frame = [0, 0, 0, memoryview(self.empty_put_bytes)]
# allocate byte arrays
self.data = []
for i in range(size):
self.data.append(bytearray(8))
# allocate frames with references to the byte arrays
self.frame = []
for i in range(size):
self.frame.append([i, 0, 0, memoryview(self.data[i])])
@micropython.native
def any(self):
if self.index_get != self.index_put:
return True
return False
@micropython.native
def put(self):
self.next_index = (self.index_put + 1) % self.size
if self.index_get != self.next_index:
self.index_put = self.next_index
return self.frame[self.index_put]
else:
self.overflow += 1
return self.empty_put_frame # buffer overflow
@micropython.native
def get(self):
if self.index_get != self.index_put:
self.index_get = (self.index_get + 1) % self.size
return self.frame[self.index_get]
else:
return self.empty_get_frame # buffer empty
class RingBuffer:
def __init__(self, size):
self.size = size + 1
self.data = bytearray(self.size)
self.index_put = 0
self.index_get = 0
self.count = 0
@micropython.native
def any(self):
if self.index_get != self.index_put:
return True
return False
@micropython.native
def put(self, value):
next_index = (self.index_put + 1) % self.size
if self.index_get != next_index:
self.data[self.index_put] = value
self.index_put = next_index
self.count += 1
return value
else:
print("overflow")
return None # buffer full
@micropython.native
def get(self):
if self.any():
value = self.data[self.index_get]
self.index_get = (self.index_get + 1) % self.size
self.count -= 1
return value
else:
return None # buffer empty
```
#### File: Volvo-Media-Controller-Micropython/pyboard/can.py
```python
import micropython, math
import ubinascii, uhashlib
from pyb import CAN
from pyboard.buf import FrameBuffer
class CanInterface:
def __init__(self, itf, baudrate=1_000_000, sample_point=80, \
extframe=False, auto_restart=True, \
params=None, debug_rx=False, debug_tx=False):
self.itf = itf
self.baudrate= baudrate
self.sample_point = sample_point
self.extframe = extframe
self.auto_restart = auto_restart
self.params = params
self.debug_rx = debug_rx
self.debug_tx = debug_tx
self.init()
def init(self):
self._buf = FrameBuffer(64)
self._send_caller = self._sendcb
self._recv_caller = self._recvcb
self._can = CAN(self.itf, CAN.NORMAL, \
baudrate=self.baudrate, sample_point=self.sample_point, \
extframe=self.extframe, auto_restart=self.auto_restart)
try:
# fdcan interface
mode = CAN.RANGE
params = self.params or (0x0, 0x0) # default no filter
self._can.setfilter(bank=self.itf-1, mode=mode, fifo=self.itf-1, params=params)
except Exception as ex:
# classic can interface
if self.extframe: # extended id
mode = CAN.MASK32
params = self.params or (0x0, 0x0) # default no filter
else: # classic id
mode = CAN.MASK16
params = self.params or (0x0, 0x0, 0x0, 0x0) # default no filter
self._can.setfilter(bank=self.itf-1, mode=mode, fifo=self.itf-1, params=params)
self._can.rxcallback(self.itf-1, self.receive)
print ("CAN " + str(self.itf) + " initialized")
def deinit(self):
self._can.rxcallback(self.itf-1, None)
self._can.deinit()
def send(self, message, *args):
# send the message in callback
if len(args) == 1: # bytes and address
micropython.schedule(self._send_caller, (message, args[0]))
else: # micropython can msg format
micropython.schedule(self._send_caller, message)
def _sendcb(self, message):
try:
if isinstance(message, tuple): # bytes and address
count = math.ceil(len(message[0]) / 8)
for i in range(count):
msg_bytes = message[0][i * 8: (i * 8) + 8]
if i == count: # last message
msg_bytes = message[0][(i-1) * 8:]
self._can.send(msg_bytes, message[1])
self._print('TX', message[1], msg_bytes)
else: # micropython can msg format
self._can.send(message[3], message[0])
self._print('TX', message[0], message[3])
except Exception as ex:
if ex.errno == 110: # ETIMEDOUT
print('cannot send packet on CAN' + str(self.itf) + ', TX queue is full')
else:
print('cannot send packet on CAN' + str(self.itf) + ', '+ str(ex))
@micropython.native
def receive(self, bus, reason):
if 0 <= reason < 3:
while self._can.any(self.itf-1):
self._can.recv(self.itf-1, self._buf.put())
if reason == 2:
print('lost packet on CAN' + str(self.itf) + ', RX queue overflow')
if self.debug_rx: # print all incoming packets
micropython.schedule(self._recv_caller, reason)
def _recvcb(self, reason):
while self._buf.any():
msg = self._buf.get()
self._print('RX', msg[0], msg[3])
def _print(self, direction, address, message):
try:
if (self.debug_rx and direction == 'RX') or (self.debug_tx and direction == 'TX'):
print(direction + ' | ', address, \
'|', '{0: <40}'.format(str(ubinascii.unhexlify(ubinascii.hexlify(message)))), \
'|', ubinascii.hexlify(message, ' '))
except:
print('could not print frame')
def print_frame(self, direction, frame):
try:
print(direction + ' | ', frame[0], \
'|', '{0: <40}'.format(str(ubinascii.unhexlify(ubinascii.hexlify(frame[3])))), \
'|', ubinascii.hexlify(frame[3], ' '))
except:
print('could not print frame')
``` |
{
"source": "JohnInterpreter/ointilj",
"score": 3
} |
#### File: JohnInterpreter/ointilj/extract3333.py
```python
import urllib.request
from bs4 import BeautifulSoup as bs
import struct
import time
class Extract:
def __init__(self):
self.dataToSend = b''
self.myName = input('Enter your nickname: ')
while len(self.myName) > 11:
print("Please enter less than 12 characters: ")
self.myName = input('Enter your nickname: ')
if len(self.myName) < 12:
break
self.myName = self.myName + ' '
def findGPU(self):
my_name_byte = bytes(self.myName, encoding='utf-8')
#url에 피닉스 마이너가 송출하는 주소를 담아 전달하여 data에 채굴 정보를 담습니다.
url = urllib.request.urlopen('http://127.0.0.1:3333')
data = url.read()
#뷰티풀솝에 전달하여 텍스트를 추출합니다.
soup = bs(data, "html.parser")
text = soup.get_text()
#리스트로 변경하여 필요한 정보를 추출합니다.
textS = text.split()
# 원하는 정보가 있는 위치를 발견합니다.
try:
gpuN = textS.index('GPUs:')
# 가끔 GPUs: 정보가 나오지 않을때가 있으므로 3초의 여유를 두고 예외처리를 하여 다시 채굴정보를 받습니다.
except ValueError:
time.sleep(3)
url = urllib.request.urlopen('http://1192.168.127.12:3333')
data = url.read()
soup = bs(data, "html.parser")
text = soup.get_text()
textS = text.split()
gpuN = textS.index('GPUs:')
# textAll에 필요한 채굴 정보만 담기 위한 준비를 합니다.
textAll = []
# 컴퓨터 자원을 아끼기위해 앞부분만 검색해도 충분하므로, GPU 1개당 33문자이므로 GPU가 최대 12개까지 있다고 가정할때 넉넉하게 500문자까지만 검색을 합니다.
for i in range(500):
# 만약 Eth:등의 리스트 구성요소가 나오면, 거기까지가 필요한 채굴정보이므로 거기까지만 추출합니다.
if textS[gpuN + i + 1] != 'Eth:' and textS[gpuN + i + 1] != 'Eth' and textS[gpuN + i + 1] != 'GPU1:' and \
textS[gpuN + i + 1] != '***' and textS[gpuN + i + 1] != 'Available':
# textAll에 필요한 채굴 정보를 담습니다.
# print(textS[gpuN+i])
textAll.append(textS[gpuN + i + 1])
else:
# print(textAll)
break
# GPU 하나당 4개의 리스트 구성요소를 가지므로 4로 나누어 줘서 총 GPU 개수를 구해서 gpuA에 담아줍니다.
gpuA = int(len(textAll) / 4)
# 리스트를 스트링으로 변환하여 송출할 준비를 해 줍니다.
textAll_str = str(textAll)
# 송출하기 위해 바이트로 변경해 줍니다.
textAll_byte = bytes(textAll_str, encoding='utf-8')
# 빅엔디안(>)으로 패킹을 합니다.
self.dataToSend = struct.pack('> 12s 500s I', my_name_byte, textAll_byte, gpuA)
"""
#패킹을 하기 위한 준비를 합니다.
values = (textAll_byte, gpuA)
#패킹의 형식을 빅 엔디안(>)으로 지정해 줍니다.
fmt = '>{}s I'.format(len(textAll_str))
#패킹의 구조를 설정해 줍니다.
packer = struct.Struct(fmt)
#패킹을 해 줍니다.
textAll_Num = len(textAll_byte)
#테스트용으로 출력해 봅니다.
#print(dataToSend)
#패킹한 데이터를 송출합니다.
#client_socket.send(sendData)
"""
"""
연습코드
try:
a = textS.index('fdk')
except ValueError:
print("su")
for i in range(200):
if textS[gpuN+i] != 'Eth':
print(textS[gpuN+i])
else:
break
"""
```
#### File: JohnInterpreter/ointilj/serverNINESOFT.py
```python
import socket
import threading
import time
class Cserver(threading.Thread):
def __init__(self, socket):
super().__init__()
self.s_socket = socket
self.clients_Num = 0
def run(self):
global index
global clients
self.c_socket, addr = self.s_socket.accept()
clients.append(self.c_socket)
self.clients_Num = len(clients)
print('All Workers: ', self.clients_Num, addr[0], addr[1], 'are connected')
index = index + 1
creat_thread(self.s_socket)
t = threading.Thread(target=self.c_recv)
t.daemon = True
t.start()
def c_recv(self):
print(self.c_socket)
while True:
get_data= self.c_socket.recv(1024)
get_data_str = str(get_data.decode('utf-8'))
get_data_spl = get_data_str.split()
print('Worker: ', get_data.decode('utf-8'))
time.sleep(3)
#print('Worker: ', get_data_spl[0], get_data.decode('utf-8'))
"""
for i in range(self.clients_Num):
get_data[i] = clients[i].c_socket.recv(1024)
time.sleep(3)
for i in range(self.clients_Num):
print(get_data[i].decode('utf-8'))
time.sleep(3)
"""
#def c_send(self,put_data):
# self.c_socket.send(put_data.encode('utf-8'))
def creat_thread(s_socket):
global index
t.append(Cserver(s_socket))
t[index].daemon = True
t[index].start()
t=[]
index=0
clients = []
s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
bufsize= 1024
host= input("Welcome to NINESOFT Server \n Enter the Server IP:")
port=9999
s_socket.bind((host, port))
s_socket.listen(5)
creat_thread(s_socket)
while True:
pass
#st_data = input('Message to send:' + '\n')
#c_socket.send(st_data.encode('utf-8'))
s_socket.close()
"""
while True:
try:
for i in t:
i.c_send(put_data)
except Exception as e:
pass
for j in t:
try:
j.c_socket.close()
except Exception as e:
pass
"""
"""
def data_recv():
while True:
get_data = c_socket.recv(1024)
print(get_data.decode('utf-8'))
s_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
bufsize = 1024
host="127.0.0.1"
port=5001
s_socket.bind((host, port))
s_socket.listen(5)
c_socket, addr = s_socket.accept()
print(addr,"has been connected")
t = threading.Thread(target = data_recv)
t.daemon = True
t.start()
while True:
st_data = input('Message to send:' + '\n')
c_socket.send(st_data.encode('utf-8'))
#time.sleep(3)
c_socket.close()
s_socket.close()
"""
``` |
{
"source": "johnisanerd/SlackLinkChecker",
"score": 2
} |
#### File: johnisanerd/SlackLinkChecker/slackbot_link_reporter.py
```python
from slackclient import SlackClient
import os, sys, datetime, time
import csv
import subprocess
import fileinput
write_debug_bool = True
filename_raw = "test.csv"
filename_out = "clean_test.csv"
try:
slack_token = os.environ['SLACK_API_TOKEN']
sc = SlackClient(slack_token)
except:
print("Could not find slack API token on this machine.")
print("Aborting.")
sys.exit()
def write_debug(in_string):
if(write_debug_bool):
print("DEBUG: " + str(in_string))
def send_bash_command(bashCommand):
write_debug(bashCommand)
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE) #, stderr=subprocess.PIPE)
output = process.communicate()[0]
return output
# Note the start time.
time_start = time.time()
print("Time in seconds since the epoch: %s" %time.time())
# Run check_links.sh
command = "bash /home/john/SlackLinkChecker/check_links.sh"
send_bash_command(command)
# Filter for things we don't want to show up.
# Return True if we find something we don't want
# Return False if we find somethign we DO want
def filter_test(string_in):
# Things we don't want to see!
filters = ["mailto", "add-to-cart", "SSLError"]
for test in filters:
if string_in.find(test) > 0:
return True
else:
return False
# Clear out things you don't want. Like mailto links
with open(filename_raw, "r") as f:
data_in = f.read()
data_in = data_in.split("\n")
data_in = list(filter(None, data_in))
data_out = []
for row in data_in:
if not filter_test(row):
data_out.append(row)
with open(filename_out, "w") as f:
for each in data_out:
f.write(str(each)+"\n")
# Clean out all the duplicates.
seen = set() # set for fast O(1) amortized lookup
for line in fileinput.FileInput(filename_out, inplace=1):
if line in seen: continue # skip duplicate
seen.add(line)
print line, # standard output is now redirected to the file
# Note the start time.
end_time = time.time()
print("Time in seconds since the epoch: %s" %time.time())
total_time = end_time - time_start
total_time = total_time // (60*60)
print("Total crawl time (h) was: %s" %total_time)
# Count the number of lines in the file.
# Post the file, number of lines in the file to slack.s
# Upload a file
command = "curl -F file=@" + filename_out + " -F title='Hello!' -F content='Hello' -F channels=#website-broken-links -F token=" + slack_token +" https://slack.com/api/files.upload"
send_bash_command(command)
text_for_chat = "Total time to run crawl in hours: " + str(total_time)
sc.api_call(
"chat.postMessage",
channel="#website-broken-links",
text=text_for_chat
)
``` |
{
"source": "johnistan/confluent-kafka-python",
"score": 2
} |
#### File: confluent-kafka-python/tests/test_Consumer.py
```python
from confluent_kafka import Consumer, TopicPartition, KafkaError, KafkaException
def test_basic_api():
""" Basic API tests, these wont really do anything since there is no
broker configured. """
try:
kc = Consumer()
except TypeError as e:
assert str(e) == "expected configuration dict"
def dummy_commit_cb (err, partitions):
pass
kc = Consumer({'group.id':'test', 'socket.timeout.ms':'100',
'session.timeout.ms': 1000, # Avoid close() blocking too long
'on_commit': dummy_commit_cb})
kc.subscribe(["test"])
kc.unsubscribe()
def dummy_assign_revoke (consumer, partitions):
pass
kc.subscribe(["test"], on_assign=dummy_assign_revoke, on_revoke=dummy_assign_revoke)
kc.unsubscribe()
msg = kc.poll(timeout=0.001)
if msg is None:
print('OK: poll() timeout')
elif msg.error():
print('OK: consumer error: %s' % msg.error().str())
else:
print('OK: consumed message')
partitions = list(map(lambda p: TopicPartition("test", p), range(0,100,3)))
kc.assign(partitions)
kc.unassign()
kc.commit(async=True)
try:
kc.commit(async=False)
except KafkaException as e:
assert e.args[0].code() in (KafkaError._TIMED_OUT, KafkaError._NO_OFFSET)
# Get current position, should all be invalid.
kc.position(partitions)
assert len([p for p in partitions if p.offset == -1001]) == len(partitions)
try:
offsets = kc.committed(partitions, timeout=0.001)
except KafkaException as e:
assert e.args[0].code() == KafkaError._TIMED_OUT
kc.close()
``` |
{
"source": "JohnJ255/text2num",
"score": 2
} |
#### File: text2num/tests/test_text_to_num_ru.py
```python
import sys
"""
Test the ``text_to_num`` library.
"""
from unittest import TestCase
from text_to_num import alpha2digit, text2num
class TestTextToNumRU(TestCase):
def test_text2num(self):
test1 = "пятьдесят три миллиарда двести сорок три тысячи семьсот двадцать четыре"
self.assertEqual(text2num(test1, 'ru'), 53_000_243_724)
test2 = (
"пятьдесят один миллион пятьсот семьдесят восемь тысяч триста два"
)
self.assertEqual(text2num(test2, 'ru'), 51_578_302)
test3 = "восемьдесят пять"
self.assertEqual(text2num(test3, 'ru'), 85)
test4 = "восемьдесят один"
self.assertEqual(text2num(test4, 'ru'), 81)
self.assertEqual(text2num("пятьнадцать", 'ru'), 15)
self.assertEqual(text2num("сто пятьнадцать", 'ru'), 115)
self.assertEqual(text2num("сто пятнадцать", 'ru'), 115)
self.assertEqual(text2num("семьдесят пять тысяч", 'ru'), 75000)
self.assertEqual(text2num("тысяча девятьсот двадцать", 'ru'), 1920)
self.assertEqual(text2num("одна тысяча девятьсот двадцать", 'ru'), 1920)
def test_text2num_centuries(self):
self.assertEqual(text2num("тысяча девятьсот семьдесят три", 'ru'), 1973)
def test_text2num_exc(self):
self.assertRaises(ValueError, text2num, "тысяча тысяча двести", 'ru')
self.assertRaises(ValueError, text2num, "шестьдесят пятьдесят", 'ru')
self.assertRaises(ValueError, text2num, "шестьдесят сто", 'ru')
def test_text2num_zeroes(self):
self.assertEqual(0, text2num("ноль", 'ru'))
self.assertEqual(8, text2num("ноль восемь", 'ru'), 8)
self.assertEqual(125, text2num("ноль ноль сто двадцать пять", 'ru'))
self.assertRaises(ValueError, text2num, "пять ноль", 'ru')
self.assertRaises(ValueError, text2num, "пять ноль три", 'ru')
self.assertRaises(ValueError, text2num, "пятьдесят три ноль", 'ru')
def test_alpha2digit_phones(self):
source = "восемь девятьсот два сто один ноль один ноль один"
expected = "8 902 101 01 01"
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "плюс семь восемьсот пятьдесят девять сто один ноль сто один"
expected = "+7 859 101 0101"
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "Телефон восемь девятьсот шестьдесят два пятьсот девятнадцать семьдесят ноль ноль"
expected = "Телефон 8 962 519 70 00"
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "три сто пять сто один ноль один ноль один"
expected = "3 105 101 01 01"
self.assertEqual(expected, alpha2digit(source, 'ru'))
def test_alpha2digit_integers(self):
source = "Двадцать пять коров, двенадцать сотен цыплят и сто двадцать пять точка сорок кг картофеля."
expected = "25 коров, 1200 цыплят и 125.40 кг картофеля."
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "Одна сотня огурцов, две сотни помидор, пять сотен рублей."
expected = "100 огурцов, 200 помидор, 500 рублей."
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "одна тысяча двести шестьдесят шесть рублей."
expected = "1266 рублей."
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "тысяча двести шестьдесят шесть рублей."
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "один, два, три, четыре, двадцать, пятьнадцать"
expected = "1, 2, 3, 4, 20, 15"
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "двадцать один, тридцать один."
expected = "21, 31."
self.assertEqual(expected, alpha2digit(source, 'ru'))
def test_relaxed(self):
source = "один два три четыре двадцать пять."
expected = "1 2 3 4 25."
self.assertEqual(expected, alpha2digit(source, 'ru', relaxed=True))
source = "один два три четыре двадцать, пять."
expected = "1 2 3 4 20, 5."
self.assertEqual(expected, alpha2digit(source, 'ru', relaxed=True))
def test_alpha2digit_formal(self):
source = "плюс тридцать три, девять, шестьдесят, ноль шесть, двенадцать, двадцать один"
expected = "+33, 9, 60, 06, 12, 21"
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "ноль девять, шестьдесят, ноль шесть, двенадцать, двадцать один"
expected = "09, 60, 06, 12, 21"
self.assertEqual(expected, alpha2digit(source, 'ru'))
source = "Сам по себе я одиночка"
self.assertEqual(source, alpha2digit(source, 'ru'))
def test_and(self):
source = "пятьдесят, шестьдесят, тридцать и одиннадцать"
expected = "50, 60, 30 и 11"
self.assertEqual(expected, alpha2digit(source, 'ru'))
def test_alpha2digit_zero(self):
source = "тринадцать тысяч, ноль девяносто"
expected = "13000, 090"
self.assertEqual(expected, alpha2digit(source, 'ru'))
self.assertEqual("0", alpha2digit("ноль", 'ru'))
def test_alpha2digit_ordinals_force(self):
source = (
"Пятый, третий, второй, двадцать первый, сотый, тысяча двести тридцатый, двадцать пятый, тридцать восьмой, сорок девятый."
)
expected = "5ый, 3ий, 2ой, 21ый, 100ый, 1230ый, 25ый, 38ой, 49ый."
self.assertEqual(expected, alpha2digit(source, 'ru', ordinal_threshold=0))
source = (
"первый, второй, третий, четвёртый, четвертый, пятый, шестой, седьмой, восьмой, девятый, десятый."
)
expected = "1ый, 2ой, 3ий, 4ый, 4ый, 5ый, 6ой, 7ой, 8ой, 9ый, 10ый."
self.assertEqual(expected, alpha2digit(source, 'ru', ordinal_threshold=0))
source = "двадцать второе место на двадцать первой олимпиаде занял первый и второй"
expected = "22ое место на 21ой олимпиаде занял 1ый и 2ой"
self.assertEqual(expected, alpha2digit(source, 'ru', ordinal_threshold=0))
source = "каждый пятый на первый второй расчитайсь!"
expected = "каждый 5ый на 1ый 2ой расчитайсь!"
self.assertEqual(expected, alpha2digit(source, 'ru', ordinal_threshold=0))
def test_alpha2digit_decimals(self):
source = (
"двенадцать точка девяносто девять, сто двадцать точка ноль пять,"
" сто двадцать целых ноль пять, одна целая двести тридцать шесть."
)
expected = "12.99, 120.05, 120.05, 1.236."
self.assertEqual(expected, alpha2digit(source, 'ru'))
self.assertEqual("0.15", alpha2digit("точка пятьнадцать", 'ru'))
self.assertEqual("0.15", alpha2digit("ноль целых пятьнадцать", 'ru'))
def test_alpha2digit_signed(self):
source = "В комнате плюс двадцать градусов, тогда как на улице минус пятьдесят."
expected = "В комнате +20 градусов, тогда как на улице -50."
self.assertEqual(expected, alpha2digit(source, 'ru'))
def test_uppercase(self):
source = "ПЯТЬНАДЦАТЬ ОДИН ДЕСЯТЬ ОДИН"
expected = "15 1 10 1"
self.assertEqual(expected, alpha2digit(source, 'ru'))
def test_hundreds(self):
source = "пятьдесят один миллион пятьсот семьдесят восемь тысяч триста два"
expected = 51578302
self.assertEqual(expected, text2num(source, 'ru'))
source = "восемьдесят один"
expected = 81
self.assertEqual(expected, text2num(source, 'ru'))
source = "восемьсот"
expected = 800
self.assertEqual(expected, text2num(source, 'ru'))
source = "сто"
expected = 100
self.assertEqual(expected, text2num(source, 'ru'))
source = "сто двадцать"
expected = 120
self.assertEqual(expected, text2num(source, 'ru'))
source = "сто два"
expected = 102
self.assertEqual(expected, text2num(source, 'ru'))
source = "семьсот один"
expected = 701
self.assertEqual(expected, text2num(source, 'ru'))
source = "восемьсот миллионов"
expected = 800_000_000
self.assertEqual(expected, text2num(source, 'ru'))
def test_big_numbers(self):
source = "триллион миллиард миллион тысяча один"
expected = 1_001_001_001_001
self.assertEqual(expected, text2num(source, 'ru'))
source = "один триллион один миллиард один миллион одна тысяча один"
expected = 1_001_001_001_001
self.assertEqual(expected, text2num(source, 'ru'))
source = "одиннадцать триллионов одиннадцать миллиардов одиннадцать миллионов одиннадцать тысяч одиннадцать"
expected = 11_011_011_011_011
self.assertEqual(expected, text2num(source, 'ru'))
source = "сто одиннадцать триллионов сто одиннадцать миллиардов сто одиннадцать миллионов сто одиннадцать тысяч сто одиннадцать"
expected = 111_111_111_111_111
self.assertEqual(expected, text2num(source, 'ru'))
source = "сто десять триллионов сто десять миллиардов сто десять миллионов сто десять тысяч сто десять"
expected = 110_110_110_110_110
self.assertEqual(expected, text2num(source, 'ru'))
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.