prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
from matplotlib import pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.backend_bases import key_press_handler
import os
import numpy as np
import sys
import random
from scipy.interpolate import interp1d, interp2d, griddata
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.spatial import ConvexHull
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import warnings
from tkinter import *
from tkinter import filedialog, messagebox, simpledialog
import datetime
from pygimli.physics import Refraction
import pygimli as pg
from matplotlib.colors import LogNorm
import matplotlib.colors
warnings.filterwarnings('ignore')
class Sisref(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.grid(row = 0, column = 0, sticky = NSEW)
master.geometry("1360x768")
master.resizable(0,0)
#master.protocol("WM_DELETE_WINDOW", self.fechar)
master.title('REFRAINV')
menuBar = Menu(master)
fileMenu = Menu(master, tearoff=0)
ttMenu = Menu(master, tearoff=0)
tomoMenu = Menu(master, tearoff=0)
layerIntMenu = Menu(master, tearoff=0)
createpygimli = Menu(master, tearoff=0)
tomoinversion = Menu(master, tearoff=0)
tomoVMplot = Menu(master, tearoff=0)
tomocmap = Menu(master, tearoff=0)
editTTtomo = Menu(master, tearoff=0)
runInvMenu = Menu(master, tearoff=0)
editTT = Menu(master, tearoff=0)
TTplot = Menu(master, tearoff=0)
VMplot = Menu(master, tearoff=0)
TTcolors = Menu(master, tearoff=0)
VMcolors = Menu(master, tearoff=0)
helpMenu = Menu(master, tearoff=0)
menuBar.add_cascade(label = 'File', menu = fileMenu)
fileMenu.add_cascade(label='Open travel times file',
command= lambda: print(""))
fileMenu.add_cascade(label='Save results',
command= lambda: print(""))
fileMenu.add_cascade(label='Restart all analysis',
command= lambda: print(""))
fileMenu.add_separator()
fileMenu.add_cascade(label='Exit',
command= lambda: print(""))
menuBar.add_cascade(label = 'Time-term analysis', menu = ttMenu)
ttMenu.add_cascade(label='Layer interpretation', menu = layerIntMenu)
layerIntMenu.add_command(label="Star/stop layer assignment", command = lambda: print(""))
layerIntMenu.add_separator()
layerIntMenu.add_command(label="Layer 1", command = lambda: print(""))
layerIntMenu.add_command(label="Layer 2", command = lambda: print(""))
layerIntMenu.add_command(label="Layer 3", command = lambda: print(""))
layerIntMenu.add_separator()
layerIntMenu.add_command(label="Clear assignmanets", command = lambda: print(""))
ttMenu.add_cascade(label='Inversion', menu = runInvMenu)
runInvMenu.add_command(label="Use elevation file", command = lambda: print(""))
runInvMenu.add_separator()
runInvMenu.add_command(label="Run default inversion", command = lambda: print(""))
runInvMenu.add_command(label="Run inversion choosing regularization parameter", command = lambda: print(""))
ttMenu.add_cascade(label='Edit travel times', menu = editTT)
editTT.add_command(label="Open travel times file", command = lambda: print(""))
editTT.add_command(label="Save travel times file", command = lambda: print(""))
ttMenu.add_separator()
ttMenu.add_cascade(label='Travel-times plot', menu = TTplot)
TTplot.add_cascade(label='Travel-times color', menu = TTcolors)
TTcolors.add_command(label="Use colors", command = self.tt_TTcolors)
TTcolors.add_command(label="Black", command = self.tt_TTblack)
TTplot.add_separator()
TTplot.add_command(label="Show/hide sources", command = self.tt_TTshowS)
TTplot.add_command(label="Show/hide grid", command = self.tt_TTshowGrid)
ttMenu.add_cascade(label='Velocity model plot', menu = VMplot)
VMplot.add_cascade(label='Layer colors', menu = VMcolors)
VMcolors.add_command(label="Use colors", command = self.tt_LayerColors)
VMcolors.add_command(label="Black", command = self.tt_LayerBlack)
VMplot.add_separator()
VMplot.add_command(label="Show/hide sources", command = self.tt_VMshowS)
VMplot.add_command(label="Show/hide geophones", command = self.tt_VMshowG)
VMplot.add_command(label="Show/hide grid", command = self.tt_VMshowGrid)
ttMenu.add_separator()
ttMenu.add_command(label="Save analysis results", command = self.tt_save)
ttMenu.add_command(label="Restart analysis", command = self.restart)
menuBar.add_cascade(label = 'Tomography analysis', menu = tomoMenu)
tomoMenu.add_cascade(label='Create pyGIMLi travel-times file', command = self.tomo_create)
tomoMenu.add_cascade(label='Inversion', menu = tomoinversion)
tomoinversion.add_command(label="Run inversion", command = self.tomo_invert)
tomoinversion.add_separator()
tomoinversion.add_command(label="Load parameters", command = self.tomo_loadParams)
tomoinversion.add_separator()
tomoinversion.add_command(label="Show fit", command = self.tomo_showFit)
tomoMenu.add_cascade(label='Edit travel-times', menu = editTTtomo)
editTTtomo.add_command(label="Open travel-times file", command = self.tomo_editTT)
editTTtomo.add_command(label="Save travel-times file", command = lambda: print(""))
tomoMenu.add_separator()
tomoMenu.add_cascade(label='Velocity model plot', menu = tomoVMplot)
tomoVMplot.add_cascade(label='Colormap', menu = tomocmap)
tomocmap.add_command(label="jet", command = self.tomo_cmapJet)
tomocmap.add_command(label="gist rainbow", command = self.tomo_cmapGistr)
tomocmap.add_command(label="viridis", command = self.tomo_cmapGistn)
tomocmap.add_command(label="spectral", command = self.tomo_cmapNipys)
tomocmap.add_command(label="brw", command = self.tomo_cmapbrw)
tomocmap.add_command(label="greys", command = self.tomo_cmapGreys)
tomoVMplot.add_separator()
tomoVMplot.add_command(label="Show triangular mesh model", command = self.tomo_triangular)
tomoVMplot.add_command(label="Show interpolated model", command = self.tomo_interpolated)
tomoVMplot.add_separator()
tomoVMplot.add_command(label="Plot time-terms results", command = self.tomo_usett)
tomoVMplot.add_separator()
tomoVMplot.add_command(label="Show/hide sources", command = self.tomo_VMshowS)
tomoVMplot.add_command(label="Show/hide geophones", command = self.tomo_VMshowG)
tomoVMplot.add_command(label="Show/hide ray path", command = self.tomo_showRP)
tomoVMplot.add_command(label="Show/hide grid", command = lambda: print(""))
tomoMenu.add_separator()
tomoMenu.add_cascade(label='Save analysis result',command= self.tomo_save)
tomoMenu.add_cascade(label='Restart analysis',command= self.restart)
menuBar.add_cascade(label = 'Help', menu = helpMenu)
helpMenu.add_cascade(label='Tutorial',command= lambda: print(""))
helpMenu.add_cascade(label='Report a bug',command= lambda: print(""))
helpMenu.add_cascade(label='Credits',command= lambda: print(""))
master.configure(menu=menuBar)
plt.rcParams.update({'font.size': 8})
self.tt_frame1 = Frame(self)
self.tt_frame1.grid(row = 1, column = 0, sticky = NSEW)
self.tt_fig1 = plt.figure(figsize = (6.85,3.65))
self.tt_ax1 = self.tt_fig1.add_subplot(111)
tt_tela1 = FigureCanvasTkAgg(self.tt_fig1, self.tt_frame1)
tt_tela1.draw()
tt_tela1.get_tk_widget().pack(fill='both', expand=True)
tt_toolbar1 = NavigationToolbar2Tk(tt_tela1, self.tt_frame1)
tt_toolbar1.update()
tt_tela1._tkcanvas.pack(fill='both', expand=True)
self.tt_ax1.set_title("Traveltime curves panel")
self.tt_frame2 = Frame(self)
self.tt_frame2.grid(row = 1, column = 2, sticky = NSEW)
self.tt_fig2 = plt.figure(figsize = (6.85,3.65), facecolor = "white")
self.tt_ax2 = self.tt_fig2.add_subplot(111)
tt_tela2 = FigureCanvasTkAgg(self.tt_fig2, self.tt_frame2)
tt_tela2.draw()
tt_tela2.get_tk_widget().pack(fill='both', expand=True)
tt_toolbar2 = NavigationToolbar2Tk(tt_tela2, self.tt_frame2)
tt_toolbar2.update()
tt_tela2._tkcanvas.pack(fill='both', expand=True)
self.tt_ax2.set_title("Fit and edditing panel")
self.tt_frame3 = Frame(self)
self.tt_frame3.grid(row = 2, column = 0, columnspan=3, sticky = NSEW)
self.tt_fig3 = plt.figure(figsize = (13.7,3.04))
self.tt_ax3 = self.tt_fig3.add_subplot(111)
self.tt_ax3.set_aspect("equal")
tt_tela3 = FigureCanvasTkAgg(self.tt_fig3, self.tt_frame3)
tt_tela3.draw()
tt_tela3.get_tk_widget().pack(fill='both', expand=True)
tt_toolbar3 = NavigationToolbar2Tk(tt_tela3, self.tt_frame3)
tt_toolbar3.update()
tt_tela3._tkcanvas.pack(fill='both', expand=True)
self.tt_ax3.set_title("Time-terms velocity model panel")
#---------------
self.tomo_frame1 = Frame(self)
self.tomo_frame1.grid(row = 1, column = 0, sticky = NSEW)
self.tomo_fig1 = plt.figure(figsize = (6.85,3.65))
self.tomo_ax1 = self.tomo_fig1.add_subplot(111)
tomo_tela1 = FigureCanvasTkAgg(self.tomo_fig1, self.tomo_frame1)
tomo_tela1.draw()
tomo_tela1.get_tk_widget().pack(fill='both', expand=True)
tomo_toolbar1 = NavigationToolbar2Tk(tomo_tela1, self.tomo_frame1)
tomo_toolbar1.update()
tomo_tela1._tkcanvas.pack(fill='both', expand=True)
self.tomo_ax1.set_title("Traveltime curves panel")
self.tomo_frame2 = Frame(self)
self.tomo_frame2.grid(row = 1, column = 2, sticky = NSEW)
self.tomo_fig2 = plt.figure(figsize = (6.85,3.65))
self.tomo_ax2 = self.tomo_fig2.add_subplot(111)
tomo_tela2 = FigureCanvasTkAgg(self.tomo_fig2, self.tomo_frame2)
tomo_tela2.draw()
tomo_tela2.get_tk_widget().pack(fill='both', expand=True)
tomo_toolbar2 = NavigationToolbar2Tk(tomo_tela2, self.tomo_frame2)
tomo_toolbar2.update()
tomo_tela2._tkcanvas.pack(fill='both', expand=True)
self.tomo_ax2.set_title("Fit and edditing panel")
self.tomo_frame3 = Frame(self)
self.tomo_frame3.grid(row = 2, column = 0, columnspan=3, sticky = NSEW)
self.tomo_fig3 = plt.figure(figsize = (13.7,3.04))
self.tomo_ax3 = self.tomo_fig3.add_subplot(111)
self.tomo_ax3.set_aspect("equal")
tomo_tela3 = FigureCanvasTkAgg(self.tomo_fig3, self.tomo_frame3)
tomo_tela3.draw()
tomo_tela3.get_tk_widget().pack(fill='both', expand=True)
tomo_toolbar3 = NavigationToolbar2Tk(tomo_tela3, self.tomo_frame3)
tomo_toolbar3.update()
tomo_tela3._tkcanvas.pack(fill='both', expand=True)
self.tomo_ax3.set_title("Tomography velocity model panel")
self.tomo_frame4 = Frame(self)
self.tomo_frame4.grid(row = 2, column = 0, columnspan=3, sticky = NSEW)
self.tomo_fig4 = plt.figure(figsize = (13.7,3.04))
self.tomo_ax4 = self.tomo_fig4.add_subplot(111)
self.tomo_ax4.set_aspect("equal")
tomo_tela4 = FigureCanvasTkAgg(self.tomo_fig4, self.tomo_frame4)
tomo_tela4.draw()
tomo_tela4.get_tk_widget().pack(fill='both', expand=True)
tomo_toolbar4 = NavigationToolbar2Tk(tomo_tela4, self.tomo_frame4)
tomo_toolbar4.update()
tomo_tela4._tkcanvas.pack(fill='both', expand=True)
self.tomo_ax4.set_title("Tomography velocity model panel")
toolbar_frame = Frame(self)
toolbar_frame.grid(row = 0, column = 0, columnspan=3, sticky = NSEW)
self.img_abrir = PhotoImage(file="%s/images/abrir.gif"%os.getcwd())
self.img_salvar = PhotoImage(file="%s/images/salvar.gif"%os.getcwd())
self.img_proximo = PhotoImage(file="%s/images/proximo.gif"%os.getcwd())
self.img_voltar = PhotoImage(file="%s/images/voltar.gif"%os.getcwd())
self.img_camadas = PhotoImage(file="%s/images/camadas.gif"%os.getcwd())
self.img_edit = PhotoImage(file="%s/images/edit.gif"%os.getcwd())
self.img_L1 = PhotoImage(file="%s/images/layer1.gif"%os.getcwd())
self.img_L2 = PhotoImage(file="%s/images/layer2.gif"%os.getcwd())
self.img_L3 = PhotoImage(file="%s/images/layer3.gif"%os.getcwd())
self.img_vm = PhotoImage(file="%s/images/vm.gif"%os.getcwd())
self.img_limpar = PhotoImage(file="%s/images/limpar.gif"%os.getcwd())
self.img_topo = PhotoImage(file="%s/images/topo.gif"%os.getcwd())
self.img_editOK = PhotoImage(file="%s/images/editOK.gif"%os.getcwd())
self.img_star = PhotoImage(file="%s/images/star.gif"%os.getcwd())
self.img_geophone = PhotoImage(file="%s/images/geophone.gif"%os.getcwd())
self.img_tomogram = PhotoImage(file="%s/images/tomogram.gif"%os.getcwd())
self.img_restart = PhotoImage(file="%s/images/restart.gif"%os.getcwd())
self.img_create = PhotoImage(file="%s/images/create.gif"%os.getcwd())
self.img_inv = PhotoImage(file="%s/images/inv.gif"%os.getcwd())
root.tk.call('wm', 'iconphoto', root._w, self.img_inv)
Abrir = Button(toolbar_frame, command = self.openTT)
Abrir.config(image = self.img_abrir)
Abrir.grid(row=0,column=0,sticky=W)
Salvar = Button(toolbar_frame, command = lambda: print("ai"))
Salvar.config(image = self.img_salvar)
Salvar.grid(row=0,column=1,sticky=W)
Voltar = Button(toolbar_frame, command = self.back)
Voltar.config(image = self.img_voltar)
Voltar.grid(row=0,column=2,sticky=W)
Proximo = Button(toolbar_frame, command = self.next)
Proximo.config(image = self.img_proximo)
Proximo.grid(row=0,column=3,sticky=W)
Camadas = Button(toolbar_frame, command = self.tt_layerInterpretation)
Camadas.config(image = self.img_camadas)
Camadas.grid(row=0,column=4,sticky=W)
L1 = Button(toolbar_frame, command = self.tt_L1)
L1.config(image = self.img_L1)
L1.grid(row=0,column=5,sticky=W)
L2 = Button(toolbar_frame, command = self.tt_L2)
L2.config(image = self.img_L2)
L2.grid(row=0,column=6,sticky=W)
L3 = Button(toolbar_frame, command = self.tt_L3)
L3.config(image = self.img_L3)
L3.grid(row=0,column=7,sticky=W)
VM = Button(toolbar_frame, command = self.tt_invert)
VM.config(image = self.img_vm)
VM.grid(row=0,column=8,sticky=W)
Edit = Button(toolbar_frame, command = self.editTT)
Edit.config(image = self.img_edit)
Edit.grid(row=0,column=9,sticky=W)
EditOK = Button(toolbar_frame, command = self.saveTT)
EditOK.config(image = self.img_editOK)
EditOK.grid(row=0,column=10,sticky=W)
Limpar = Button(toolbar_frame, command = self.tt_clearInterpretation)
Limpar.config(image = self.img_limpar)
Limpar.grid(row=0,column=11,sticky=W)
#Topo = Button(toolbar_frame, command = self.tt_topo)
#Topo.config(image = self.img_topo)
#Topo.grid(row=0,column=12,sticky=W)
Create = Button(toolbar_frame, command = self.tomo_create)
Create.config(image = self.img_create)
Create.grid(row=0,column=12,sticky=W)
Tomogram = Button(toolbar_frame, command = self.tomo_invert)
Tomogram.config(image = self.img_tomogram)
Tomogram.grid(row=0,column=13,sticky=W)
'''Fonte = Button(toolbar_frame, command = self.tt_topo)
Fonte.config(image = self.img_star)
Fonte.grid(row=0,column=14,sticky=W)
Geofone = Button(toolbar_frame, command = self.tt_topo)
Geofone.config(image = self.img_geophone)
Geofone.grid(row=0,column=15,sticky=W)'''
Restart = Button(toolbar_frame, command = self.restart)
Restart.config(image = self.img_restart)
Restart.grid(row=0,column=14,sticky=W)
self.tt_pltTT = False
self.tt_vmGrid = True
self.tt_TTGrid = True
self.tt_TTSources = True
self.tt_VMSources = False
self.tt_VMGeophones = False
self.tt_pltG = False
self.tt_pltS = False
self.cm = False
self.TTfile = False
self.TTfile_ext = False
self.tomo_TTplot = False
self.tomo_pltS = False
self.tomo_pltG = False
self.tomo_pltG_triang = False
self.tomo_pltS_triang = False
self.tomo_cmap = plt.cm.get_cmap("jet")
self.tt_frame1.tkraise()
self.tt_frame2.tkraise()
self.tt_frame3.tkraise()
self.page = 1
def openTT(self):
try:
self.TTfile = filedialog.askopenfilename(title='Open',filetypes=[('Refrapy pick','*.rp'),
('pyGIMLi pick','*.sgt')])
filename, file_extension = os.path.splitext(self.TTfile)
if file_extension == ".rp":
self.TTfile_ext = ".rp"
self.tt_openTT()
elif file_extension == ".sgt":
self.TTfile_ext = ".sgt"
self.tomo_openTT()
except:
pass
def tomo_interpolated(self):
if self.cm:
self.tomo_raise()
self.tomo_frame3.tkraise()
def tomo_triangular(self):
if self.cm:
#self.tomo_ax4.cla()
#self.tomo_ax4.set_xlabel("Distance (m)")
#self.tomo_ax4.set_ylabel("Elevation (m)")
#self.tomo_ax4.set_title("Tomography velocity model panel")
#pg.show(self.m, self.vest, label="Velocity [m/s]", cMap = self.tomo_cmap,
# ax = self.tomo_ax4, colorBar = False, logScale = False)
#self.tomo_raise()
self.tomo_frame4.tkraise()
def tomo_showRP(self):
if self.cm:
self.ra.showRayPaths(lw=0.75, color = "white", ax = self.tomo_ax3)
self.ra.showRayPaths(lw=0.75, color = "white", ax = self.tomo_ax4)
#self.tomo_raise()
self.tomo_frame4.tkraise()
def tomo_updateCmap(self):
if self.cm:
self.cm.set_cmap(self.tomo_cmap) #update cmap of interpolated tomogram
#self.tomo_ax4.cla()
pg.show(self.m, self.vest, label="Velocity [m/s]", cMap = self.tomo_cmap, #overlap the triang mesh result with this new plot
ax = self.tomo_ax4, colorBar = False, logScale = False)
self.tomo_fig3.canvas.draw()
self.tomo_fig4.canvas.draw()
def tomo_cmapNipys(self):
if self.cm:
self.tomo_cmap = plt.cm.get_cmap("Spectral")
self.tomo_updateCmap()
def tomo_cmapJet(self):
if self.cm:
self.tomo_cmap = plt.cm.get_cmap("jet")
self.tomo_updateCmap()
def tomo_cmapGistr(self):
if self.cm:
self.tomo_cmap = plt.cm.get_cmap("gist_rainbow")
self.tomo_updateCmap()
def tomo_cmapGreys(self):
if self.cm:
self.tomo_cmap = plt.cm.get_cmap("Greys")
self.tomo_updateCmap()
def tomo_cmapbrw(self):
if self.cm:
self.tomo_cmap = matplotlib.colors.LinearSegmentedColormap.from_list("", ["blue","red","white"])
self.tomo_updateCmap()
def tomo_cmapGistn(self):
if self.cm:
self.tomo_cmap = plt.cm.get_cmap("viridis")
self.tomo_updateCmap()
def tomo_usett(self):
if self.tt_pltVM and self.cm:
if self.layer2:
self.tomo_ax3.plot(self.gp, self.depthLayer2, '--', c="k")
self.tomo_ax4.plot(self.gp, self.depthLayer2, '--', c="k")
'''#artigo
from scipy.interpolate import interp1d
f = interp1d([0,14,32,50,64], [-10,-12,-18,-12,-10], kind="cubic")
y = f(np.arange(0,64,1))
self.tomo_ax4.plot(np.arange(0,64,1),y, c = "k")
self.tomo_ax3.plot(np.arange(0,64,1),y, c = "k")'''
if self.layer3:
self.tomo_ax3.plot(self.gp, self.depthLayer3, '--', c="k")
self.tomo_ax4.scatter(self.gp, self.depthLayer3, '--', c="k")
self.tomo_fig3.canvas.draw()
self.tomo_fig4.canvas.draw()
#self.tomo_raise()
def tomo_create(self):
tomo_topoFile = False
tomo_rpTTfile = False
if messagebox.askyesno("Refrapy", "Use elevation file?"):
try:
tomo_topoFile = filedialog.askopenfilename(title='Open',filetypes=[('Elevation file','*.txt'), ('All files','*.*')])
except:
pass
if tomo_topoFile:
p, e = np.loadtxt(tomo_topoFile, usecols = (0,1), unpack = True)
messagebox.showinfo('Refrapy','Elevation data loaded successfully!')
try:
tomo_rpTTfile = filedialog.askopenfilename(title='Open',filetypes=[('Refrapy pick file','*.rp'), ('All files','*.*')])
except:
pass
if tomo_rpTTfile:
x, t = np.genfromtxt(tomo_rpTTfile, usecols = (0,1), unpack = True)
fg = x[1]
gn = t[0]
gs = t[1]
x = np.delete(x,[0,1])
t = np.delete(t,[0,1])
sp = []
for i in range(len(t)):
if np.isnan(x[i]):
sp.append(t[i])
gp = np.arange(fg,fg+gn*gs,gs)
sgp = np.sort(np.concatenate((np.arange(fg,fg+gn*gs,gs),np.array(sp))))
for i in range(len(sgp)):
try:
if sgp[i+1] == sgp[i]:
if sgp[i] <= min(gp):
sp[sp.index(sgp[i])] = sgp[i]-0.01
sgp[i] = sgp[i]-0.01
elif sgp[i] >= max(gp):
sp[sp.index(sgp[i])] = sgp[i]+0.01
sgp[i] = sgp[i]+0.01
else:
sp[sp.index(sgp[i])] = sgp[i]+0.01
sgp[i] = sgp[i]+0.01
except:
pass
out_sgtFile = filedialog.asksaveasfilename(title='Save',filetypes=[('pyGIMLi travel-times file', '*.sgt')])
with open(out_sgtFile+".sgt", "w") as f:
f.write("%d # shot/geophone points\n#x y\n"%(gn+len(sp)))
for i in range(len(sgp)):
if tomo_topoFile:
f.write("%.2f %.2f\n"%(sgp[i], e[i]))
else:
f.write("%.2f 0\n"%sgp[i])
f.write("%d # measurements\n#s g t\n"%(len(t)-len(sp)))
si = np.where(np.isin(np.sort(np.concatenate((np.arange(fg,fg+gn*gs,gs),np.array(sp)))), sp))[0]
a = 0
for i in range(len(t)):
if a <= len(si)-1:
if not np.isnan(x[i]):
f.write("%d %d %.6f\n"%(1+np.where(np.isclose(sgp, sp[a]))[0][0],
1+np.where(np.isclose(np.sort(np.concatenate((np.arange(fg,fg+gn*gs,gs),np.array(sp)))), x[i]))[0][-1],
t[i]/1000))
else:
a += 1
messagebox.showinfo('Refrapy',"pyGIMLI's travel-times file has been created succesfully!")
def tomo_openTT(self):
if self.tomo_TTplot:
messagebox.showinfo('Refrapy','A tomographic analysis is already happening\nTo start a new one, please restart the current analysis.')
else:
from pygimli.physics import Refraction
import pygimli as pg
data = pg.DataContainer(self.TTfile, 's g')
ra = Refraction(data)
ra.showData(ax = self.tomo_ax1)
self.tomo_ax1.invert_yaxis()
self.tomo_raise()
self.pars = []
self.tomo_TTplot = True
messagebox.showinfo('Refrapy',"pyGIMLI's travel-times file has been loaded succesfully!")
def tomo_showFit(self):
if self.TTfile:
self.ra.showData(response = self.ra.inv.response(), ax = self.tomo_ax2)
self.tomo_ax2.scatter([], [], marker = '+', s = 20, c = "k", label="Observed traveltimes")
self.tomo_ax2.scatter([], [], marker = '_', c = "k", label="Calculated traveltimes")
self.tomo_ax2.legend(loc='upper right', bbox_to_anchor=(1.01, 1.15))
self.tomo_raise()
def tomo_editTT(self):
self.win.destroy
if self.TTfile:
try: ttFile = filedialog.askopenfilename(title='Open',filetypes=[('Refrapy pick','*.rp')])
except: pass
if ttFile:
x, t = np.genfromtxt(ttFile, usecols = (0,1), unpack = True)
fg = x[1]
self.fg = fg
gn = t[0]
self.gn = gn
gs = t[1]
self.gs = gs
x = np.delete(x,[0,1])
t = np.delete(t,[0,1])/1000
sp = []
for i in range(len(t)):
if np.isnan(x[i]):
sp.append(t[i])
sp = np.array(sp)*1000
self.sp = sp
self.datax, self.datat = [], []
artb = []
for i in range(len(sp)):
self.datax.append({sp[i]:[]})
self.datat.append({sp[i]:[]})
artb.append({sp[i]:[]})
colors = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(sp))]
s = 0
for i in range(len(t)):
if not np.isnan(x[i]):
self.datax[s][sp[s]].append(x[i])
self.datat[s][sp[s]].append(t[i])
if i == 0:
b = self.tomo_ax2.scatter(x[i],t[i], s = 20, c = "white", edgecolor = "k", picker = 4,zorder=10,
label = "Observed travel-times")
else:
b = self.tomo_ax2.scatter(x[i],t[i], s = 20, c = "white", edgecolor = "k", picker = 4,zorder=10)
artb[s][sp[s]].append(b)
else:
s+=1
lines = []
for i in range(len(sp)):
self.tomo_ax2.scatter(sp[i],0, marker = "*", c = "yellow", edgecolor = "k", s = 20, zorder=10)
l = self.tomo_ax2.plot(self.datax[i][sp[i]],self.datat[i][sp[i]], c = "k", lw = 0.75)
lines.append(l)
def onpick(event):
self.art = event.artist
artx = self.art.get_offsets()[0][0]
artt = self.art.get_offsets()[0][1]
for i in range(len(sp)):
for b in artb[i][sp[i]]:
bx = b.get_offsets()[0][0]
bt = b.get_offsets()[0][1]
if artx == bx and artt == bt:
self.arts = sp[i]
self.i = i
self.arti = np.where(np.array(self.datax[i][sp[i]]) == artx)[0][0]
def onrelease(event):
try:
self.datat[self.i][self.arts][self.arti] = event.ydata
self.art.set_offsets((self.art.get_offsets()[0][0],event.ydata))
for i in range(len(sp)):
lines[i][0].set_data(self.datax[i][sp[i]],self.datat[i][sp[i]])
self.tomo_fig2.canvas.draw()
except:
pass
self.art = None
self.arts = None
self.i = None
self.arti = None
event = self.tomo_fig2.canvas.mpl_connect('pick_event', onpick)
event2 = self.tomo_fig2.canvas.mpl_connect('button_release_event', onrelease)
self.tomo_ax2.legend(loc="best")
self.tomo_raise()
self.editTT_panel = 2
messagebox.showinfo('Refrapy','Travel-times file loaded successfully!\nDrag and drop travel-times (circles) to edit\nWhen done, save the results into a new file')
def tomo_loadParams(self):
self.pars = []
tomoParFile = filedialog.askopenfilename(title='Open',filetypes=[('Parameter file','*.txt'),('All files','*.*')])
if tomoParFile:
try:
with open(tomoParFile, "r") as arq:
for i in arq:
self.pars.append(float(i.split()[1]))
messagebox.showinfo('Refrapy',"Tomography inversion parameters loaded successfully!")
except:
messagebox.showinfo('Refrapy',"Invalid parameters! Please, check the file (in doubt view the Help menu)")
def tomo_save(self):
if self.cm:
now = datetime.datetime.now()
np.savetxt("tomoXYZ_%s-%s-%s__%sh%s.txt"%(now.day, now.month, now.year, now.hour, now.minute),
np.c_[self.tomo_cellsx, self.tomo_cellsy, self.tomo_cellsv], fmt='%.2f')
messagebox.showinfo('Refrapy',"The velocity tomogram XYZ file has been saved successfully!")
def tomo_invert(self):
if self.TTfile_ext == ".sgt":
self.tomo_ax3.cla()
a = np.genfromtxt(self.TTfile, usecols = (0), unpack = True)
with open(self.TTfile) as f:
head = [next(f).split() for x in range(2+int(a[0]))][2:]
data = pg.DataContainer(self.TTfile, 's g')
ra = Refraction(data)
self.ra = ra
meshpar = simpledialog.askstring("Refrapy", "Enter mesh parameters separated by comma (e.g. 1,2,3) or cancel for default:\n\nOrder: maxDepth,meshDx,maxCellSize\n\n")
if meshpar == None:
m = ra.createMesh()
elif len(meshpar.split(",")) == 3:
maxDepth = int(meshpar.split(",")[0])
meshDx = float(meshpar.split(",")[1])
maxCellSize = float(meshpar.split(",")[2])
m = ra.createMesh(data=data,paraMaxCellSize=maxCellSize,paraDX=meshDx, depth=maxDepth)
else:
messagebox.showinfo('Refrapy',"Values were not properly assigned. A default mesh will be used.")
invpar = simpledialog.askstring("Refrapy", "Enter inversion parameters separated by comma (e.g. 1,2,3) or cancel for default:\n\nOrder: smoothing,vertWeight,minVel,maxVel,secNodes,maxIterations\n\n")
if invpar == None:
vest = ra.invert(mesh=m)
elif len(invpar.split(",")) == 6:
smoothing = float(invpar.split(",")[0])
vertWeight = float(invpar.split(",")[1])
minVel = float(invpar.split(",")[2])
maxVel = float(invpar.split(",")[3])
sNodes = int(invpar.split(",")[4])
maxIterations = int(invpar.split(",")[5])
vest = ra.invert(mesh=m,lam=smoothing,zWeight=vertWeight,useGradient=True,vTop=minVel,vBottom=maxVel,verbose=False,maxIter=maxIterations,secNodes=sNodes)
else:
messagebox.showinfo('Refrapy',"Values were not properly assigned. A default inversion will be executed.")
'''if len(self.pars) == 0:
m = ra.createMesh()
vest = ra.invert()
else:
m = ra.createMesh(paraMaxCellSize=float(self.pars[2]), secNodes=int(self.pars[3]))
vest = ra.invert(mesh = m, useGradient = True, vtop = self.pars[0], vbottom = self.pars[1],
zWeight=self.pars[4], lam = float(self.pars[6]), verbose = int(self.pars[5]))'''
#except:
# messagebox.showinfo('Refrapy',"Invalid parameters! Please, check the file (in doubt view the Help menu)")
self.m = m
self.vest = vest
rrms = ra.inv.relrms() # relative RMS
arms = ra.inv.absrms() # Absolute RMS
chi2 = ra.inv.chi2() # chi2
#print("maxDepth %.2f\nmeshDx %.2f\nmaxCellSize %.2f\nlam %.2f\nzW %.2f\nminVel %.2f\nmaxVal %.2f\nsecNodes %.2f\nmaxIter %.5f\narms %.2f\nrrms %.2f\nchi2 %.2f\n"%(maxDepth, meshDx, maxCellSize,
# smoothing,vertWeight,minVel,maxVel,sNodes,
# maxIterations, arms,rrms,chi2))
messagebox.showinfo('Refrapy',"Relative RMS = %.2f%%\nAbsolute RMS = %.2f ms\nChi-square = %.2f"%(rrms,1000*arms,chi2))
x = np.array([i for i in pg.x(m.cellCenters())])
y = np.array([i for i in pg.y(m.cellCenters())])
v = np.array([i for i in vest])
self.tomo_cellsx, self.tomo_cellsy, self.tomo_cellsv = x,y,v
spi = np.unique(ra.dataContainer("s"), return_inverse=False)
gi = np.unique(ra.dataContainer("g"), return_inverse=False)
a = np.genfromtxt(self.TTfile, usecols = (0), skip_header = 2, unpack = True)
sp = [a[:len(spi)+len(gi)][int(i)] for i in spi]
self.tomo_sp = sp
gp = [a[:len(spi)+len(gi)][int(i)] for i in gi]
self.tomo_gp = gp
x_grid = np.linspace(x.min(), x.max(), 500)
y_grid = np.linspace(y.min(), y.max(), 500)
xi,yi = np.meshgrid(x_grid,y_grid)
self.xi, self.yi = xi,yi
zi = griddata((x, y), v,(xi,yi), method='cubic')
self.zi = zi
if min(v) < 0:
vmin = 0
else:
vmin = min(v)
d = ra.getDepth()
head = [(float(head[i][0]),float(head[i][1])) for i in range(len(head))]
e = np.array([head[i][1] for i in range(len(head))])
sgp = np.array([head[i][0] for i in range(len(head))])
tail = [(head[i][0],max(y)-d) for i in range(len(head))]
p = head+list(reversed(tail))
poly = plt.Polygon(p, ec="none", fc="none")
self.tomo_ax3.add_patch(poly)
#cm = self.tomo_ax3.imshow(zi, cmap = "Spectral", origin='lower', interpolation = 'spline36',
# vmin = vmin, vmax = max(v),
# extent=[x.min(),x.max(),y.min(),y.max()], clip_path=poly, clip_on=True)
cm =self.tomo_ax3.contourf(zi, cmap = self.tomo_cmap, levels = 20, origin='lower', extent=[x.min(),x.max(),y.min(),y.max()])
#cm2 =self.tomo_ax3.contourf(xi,yi,zi, levels = [2100], origin='lower', extent=[x.min(),x.max(),y.min(),y.max()])
pg.show(self.m, self.vest, label="Velocity [m/s]", cMap = self.tomo_cmap,
ax = self.tomo_ax4, colorBar = False, logScale = False)
self.tomo_ax4.set_xlabel("Distance (m)")
self.tomo_ax4.set_ylabel("Elevation (m)")
self.tomo_ax4.set_title("Tomography velocity model panel")
#gx = np.linspace(0,63,24)
#self.tomo_ax3.plot(gx, [-5+(i*0) for i in range(len(gx))], lw = 1, ls = "--", c = "black")
#self.tomo_ax3.plot(gx, [-10+(i*0) for i in range(len(gx))],lw = 1, ls = "--", c = "black")
#cm = self.tomo_ax3.imshow(zi, norm=LogNorm(vmin = vmin, vmax= max(v)), cmap = "gist_rainbow", origin='lower', interpolation = 'spline36',
# extent=[x.min(),x.max(),y.min(),y.max()], clip_path=poly, clip_on=True)
self.cm = cm
self.tomo_ge = [e[np.where(np.array(sgp) == np.array(gp)[i])[0]][0] for i in range(len(gp))]
self.tomo_se = [e[np.where(np.array(sgp) == np.array(sp)[i])[0]][0] for i in range(len(sp))]
divider = make_axes_locatable(self.tomo_ax3)
divider2 = make_axes_locatable(self.tomo_ax4)
cax = divider.append_axes("right", size="1%", pad=0.05)
cax2 = divider2.append_axes("right", size="1%", pad=0.05)
#plt.colorbar(cm,orientation="vertical",aspect =20,
# cax = cax, label = "Velocity (m/s)")
self.tomo_fig3.colorbar(cm,orientation="vertical",aspect =20,
cax = cax, label = "Velocity (m/s)")
self.tomo_fig4.colorbar(cm,orientation="vertical",aspect =20,
cax = cax2, label = "Velocity (m/s)")
self.tomo_ax3.set_xlabel("Distance (m)")
self.tomo_ax3.set_ylabel("Elevation (m)")
self.tomo_ax3.set_title("Tomography velocity model panel")
self.tomo_ax3.grid(ls = '--', lw = 0.5)
self.tomo_pltVM = True
self.tomo_showFit()
self.tomo_raise()
def restart(self):
self.tt_ax1.cla()
self.tt_ax2.cla()
self.tt_ax3.cla()
self.tt_pltTT = False
self.tomo_TTplot = False
self.tt_vmGrid = True
self.tt_TTGrid = True
self.tt_TTSources = True
self.tt_pltG = False
self.tt_pltS = False
self.tomo_ax1.cla()
self.tomo_ax2.cla()
self.tomo_ax3.cla()
self.tomo_ax4.cla()
self.tomo_fig3.clf()
self.tomo_fig4.clf()
self.tomo_ax3 = self.tomo_fig3.add_subplot(111)
self.tomo_ax3.set_aspect("equal")
self.tomo_ax4 = self.tomo_fig4.add_subplot(111)
self.tomo_ax4.set_aspect("equal")
self.tomo_pltVM = False
self.tomo_pltS = False
self.tomo_pltG = False
self.tomo_pltS_triang = False
self.tomo_pltG_triang = False
self.TTfile = False
self.TTfile_ext = False
self.pars = []
self.tt_raise()
self.tomo_ax4.set_title("Tomography velocity model panel")
self.tomo_ax3.set_title("Tomography velocity model panel")
self.tomo_ax2.set_title("Fit and edditing panel")
self.tomo_ax1.set_title("Traveltime curves panel")
self.tt_ax3.set_title("Time-terms velocity model panel")
self.tt_ax2.set_title("Fit and edditing panel")
self.tt_ax1.set_title("Traveltime curves panel")
messagebox.showinfo('Refrapy','All analysis have been restarted!')
def tt_save(self):
if self.tt_pltVM == True:
now = datetime.datetime.now()
with open("timeterms_%s-%s-%s__%sh%s.txt"%(now.day, now.month, now.year, now.hour, now.minute), "w") as arq:
print(self.depthLayer2)
if self.tt_topoFile:
arq.write("Layer 1\n")
arq.write("Velocity = %.2f m/s\n"%self.velocity1)
for i in range(len(self.gp)):
arq.write("%.2f %.2f\n"%(self.gp[i], self.ge[i]))
arq.write("\nLayer 2\n")
arq.write("Velocity = %.2f m/s\n"%self.velocity2)
for i in range(len(self.gp)):
arq.write("%.2f %.2f\n"%(self.gp[i], self.depthLayer2[i]))
else:
arq.write("Layer 1\n")
arq.write("Velocity = %.2f m/s\n"%self.velocity1)
for i in range(len(self.gp)):
arq.write("%.2f 0\n"%self.gp[i])
arq.write("\nLayer 2\n")
arq.write("Velocity = %.2f m/s\n"%self.velocity2)
for i in range(len(self.gp)):
arq.write("%.2f %.2f\n"%(self.gp[i], self.depthLayer2[i]))
if self.layer3:
arq.write("\nLayer 3\n")
arq.write("Velocity = %.2f m/s\n"%self.velocity3)
for i in range(len(self.gp)):
arq.write("%.2f %.2f\n"%(self.gp[i], self.depthLayer3[i]))
self.tt_fig1.savefig('timeterms_layerInterpretation_%s-%s-%s__%sh%s.png'%(now.day, now.month, now.year, now.hour, now.minute))
self.tt_fig3.savefig('timeterms_velocityModel_%s-%s-%s__%sh%s.png'%(now.day, now.month, now.year, now.hour, now.minute))
messagebox.showinfo('Refrapy',"All figures were saved and time-terms analysis results are in timeterms_%s-%s-%s__%sh%s.txt"%(now.day, now.month, now.year, now.hour, now.minute))
def tt_raise(self):
self.tt_frame1.tkraise()
self.tt_frame2.tkraise()
self.tt_frame3.tkraise()
self.tt_fig1.canvas.draw()
self.tt_fig2.canvas.draw()
self.tt_fig3.canvas.draw()
self.page = 1
def tomo_raise(self):
self.tomo_frame1.tkraise()
self.tomo_frame2.tkraise()
self.tomo_frame3.tkraise()
self.tomo_fig1.canvas.draw()
self.tomo_fig2.canvas.draw()
self.tomo_fig3.canvas.draw()
self.page = 2
def back(self):
if self.page == 2:
self.tt_raise()
def next(self):
if self.page == 1:
self.tomo_raise()
def tt_VMshowG(self):
if self.tt_pltVM:
if self.tt_pltG == False:
if self.tt_topoFile:
self.tt_pltG = self.tt_ax3.scatter(self.gp, self.ge ,marker = 7, color = 'k',
edgecolor = "k", s = 50)
else:
self.tt_pltG = self.tt_ax3.scatter(self.gp, [i*0 for i in range(len(self.gp))],marker = 7, color = 'k',
edgecolor = "k", s = 50)
else:
self.tt_pltG.remove()
self.tt_pltG = False
self.tt_fig3.canvas.draw()
def tt_VMshowS(self):
if self.tt_pltVM:
if self.tt_pltS == False:
if self.tt_topoFile:
self.tt_pltS = self.tt_ax3.scatter(self.sp, self.se ,marker = "*", color = 'yellow',
edgecolor = "k", s = 100)
else:
self.tt_pltS = self.tt_ax3.scatter(self.sp, [i*0 for i in range(len(self.sp))],marker = "*", color = 'yellow',
edgecolor = "k", s = 100)
else:
self.tt_pltS.remove()
self.tt_pltS = False
self.tt_fig3.canvas.draw()
def tomo_VMshowS(self): #show/hide source positions in tomography velocity model panel
if self.tomo_pltVM:
if self.tomo_pltS:
self.tomo_pltS.remove()
self.tomo_pltS_triang.remove()
self.tomo_pltS = False
self.tomo_pltS_triang = False
else:
self.tomo_pltS = self.tomo_ax3.scatter(self.tomo_sp, self.tomo_se ,marker = "*", color = 'yellow',
edgecolor = "k", s = 100, zorder=10)
self.tomo_pltS_triang = self.tomo_ax4.scatter(self.tomo_sp, self.tomo_se ,marker = "*", color = 'yellow',
edgecolor = "k", s = 100, zorder=10)
self.tomo_fig3.canvas.draw()
self.tomo_fig4.canvas.draw()
def tomo_VMshowG(self): #show/hide geophone positions in tomography velocity model panel
if self.tomo_pltVM:
if self.tomo_pltG:
self.tomo_pltG.remove()
self.tomo_pltG_triang.remove()
self.tomo_pltG = False
self.tomo_pltG_triang = False
else:
self.tomo_pltG = self.tomo_ax3.scatter(self.tomo_gp, self.tomo_ge ,marker = 7, color = 'k',
edgecolor = "k", s = 50, zorder=10)
self.tomo_pltG_triang = self.tomo_ax4.scatter(self.tomo_gp, self.tomo_ge ,marker = 7, color = 'k',
edgecolor = "k", s = 50, zorder=10)
self.tomo_fig3.canvas.draw()
self.tomo_fig4.canvas.draw()
def tt_TTshowGrid(self):
if self.tt_pltTT == True:
if self.tt_TTGrid == True:
self.tt_ax1.grid(False)
self.tt_ax2.grid(False)
self.tt_TTGrid = False
else:
self.tt_ax1.grid(ls = '--', lw = 0.5)
self.tt_ax2.grid(ls = '--', lw = 0.5)
self.tt_TTGrid = True
self.tt_fig1.canvas.draw()
self.tt_fig2.canvas.draw()
def tt_TTshowS(self):
if self.tt_pltTT == True:
if self.tt_TTSources == True:
for i in range(len(self.tt_S)):
self.tt_S[i].set_alpha(0)
self.tt_TTSources = False
else:
for i in range(len(self.tt_S)):
self.tt_S[i].set_alpha(1)
self.tt_TTSources = True
self.tt_fig1.canvas.draw()
self.tt_fig2.canvas.draw()
def tt_VMshowGrid(self):
if self.tt_pltVM == True:
if self.tt_vmGrid == True:
self.tt_ax3.grid(False)
self.tt_vmGrid = False
else:
self.tt_ax3.grid(ls = '--', lw = 0.5)
self.tt_vmGrid = True
self.tt_fig3.canvas.draw()
def tt_LayerColors(self):
if self.tt_pltVM == True:
self.tt_layer1.set_color("red")
self.tt_layer2.set_color("green")
if self.layer3:
self.tt_layer3.set_color("blue")
self.tt_ax3.legend(loc="best")
self.tt_fig3.canvas.draw()
def tt_LayerBlack(self):
if self.tt_pltVM == True:
self.tt_layer1.set_color("lightgrey")
self.tt_layer2.set_color("grey")
if self.layer3:
self.tt_layer3.set_color("black")
self.tt_ax3.legend(loc="best")
self.tt_fig3.canvas.draw()
def tt_TTcolors(self):
if self.tt_pltTT:
colors = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(self.sp))]
for i in range(len(self.tt_TTlines)):
self.tt_TTlines[i][0].set_color(colors[i])
self.tt_fig1.canvas.draw()
def tt_TTblack(self):
if self.tt_pltTT:
for i in range(len(self.tt_TTlines)):
self.tt_TTlines[i][0].set_color("k")
self.tt_fig1.canvas.draw()
def tt_openTT(self):
if self.tt_pltTT:
messagebox.showinfo('Refrapy','An analysis of travel-time curves is already happening\nTo start a new one, please clear the current analysis.')
else:
#if ttFile != None:
x, t = np.genfromtxt(self.TTfile, usecols = (0,1), unpack = True)
self.fg = x[1]
self.gn = t[0]
self.gs = t[1]
x = np.delete(x,[0,1])
t = np.delete(t,[0,1])
self.sp = []
for i in range(len(t)):
if np.isnan(x[i]):
self.sp.append(t[i])
self.gp = np.arange(self.fg,self.fg+self.gn*self.gs,self.gs)
self.tt_ax1.grid(ls = '--', lw = 0.5)
self.tt_ax1.set_xlabel("Distance (m)")
self.tt_ax1.set_ylabel("Traveltime (ms)")
datax, datat = [], []
self.artb = []
for i in range(len(self.sp)):
datax.append({self.sp[i]:[]})
datat.append({self.sp[i]:[]})
self.artb.append({self.sp[i]:[]})
colors = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(self.sp))]
s = 0
for i in range(len(t)):
if not np.isnan(x[i]):
datax[s][self.sp[s]].append(x[i])
datat[s][self.sp[s]].append(t[i])
b = self.tt_ax1.scatter(x[i],t[i], s = 20, c = "white", edgecolor = "k", picker = 4)
self.artb[s][self.sp[s]].append(b)
else:
s+=1
self.tt_TTlines = []
self.tt_S = []
for i in range(len(self.sp)):
sPlot = self.tt_ax1.scatter(self.sp[i],0, marker = "*", c = 'white', edgecolor = "k", s = 100)
self.tt_S.append(sPlot)
l = self.tt_ax1.plot(datax[i][self.sp[i]],datat[i][self.sp[i]], c = colors[i], lw = 0.75)
self.tt_TTlines.append(l)
self.tt_fig1.canvas.draw()
self.tt_topoFile = False
self.tt_interpretation = False
self.layer1, self.layer2, self.layer3 = [],[],[]
self.layer = 1
self.tt_pltS = False
self.tt_pltG = False
self.tt_pltTT = True
self.tt_pltVM = False
self.tt_raise()
messagebox.showinfo('Refrapy','Travel-times file was loaded successfully!')
def tt_L1(self):
if self.tt_interpretation:
self.layer = 1
self.tt_ax1.set_title('Traveltime curves panel - Layer %d interpratation activated!'%self.layer)
self.tt_fig1.canvas.draw()
def tt_L2(self):
if self.tt_interpretation:
self.layer = 2
self.tt_ax1.set_title('Traveltime curves panel - Layer %d interpratation activated!'%self.layer)
self.tt_fig1.canvas.draw()
def tt_L3(self):
if self.tt_interpretation:
self.layer = 3
self.tt_ax1.set_title('Traveltime curves panel - Layer %d interpratation activated!'%self.layer)
self.tt_fig1.canvas.draw()
def tt_clearInterpretation(self):
if self.tt_interpretation:
if messagebox.askyesno("Refrainv", "Clear layer interpretation?"):
del self.layer1[:]
del self.layer2[:]
del self.layer3[:]
for i in range(len(self.sp)):
for b in self.artb[i][self.sp[i]]:
b.set_color("white")
b.set_edgecolor("k")
self.tt_fig1.canvas.draw()
messagebox.showinfo('Refrapy','Layer interpretation was restarted!')
def tt_layerInterpretation(self):
if self.tt_interpretation == False:
self.tt_interpretation = True
self.tt_ax1.set_title('Traveltime curves panel - Layer %d interpratation activated!'%self.layer)
def onpick(event):
art = event.artist
artx = art.get_offsets()[0][0]
artt = art.get_offsets()[0][1]
for i in range(len(self.sp)):
if art in self.artb[i][self.sp[i]]:
arts = self.sp[i]
iS = i
if artx >= arts:
for b in self.artb[iS][arts]:
bx = b.get_offsets()[0][0]
iG = np.where(np.array(self.artb[iS][arts]) == b)[0][0]
if arts <= bx <= artx:
bt = b.get_offsets()[0][1]
if self.layer == 1 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer1:
b.set_color("red")
self.layer1.append((bx,bt,arts,abs(arts-bx),iS, iG))#geophone_position , arrival_time , source_poisition , offset , index_source , index_geophone
if (bx,bt,arts,abs(arts-bx),iS, iG) in self.layer2:
self.layer2.remove((bx,bt,arts,abs(arts-bx),iS, iG))
elif self.layer == 2 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer1 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer2:
b.set_color("lightgreen")
self.layer2.append((bx,bt,arts,abs(arts-bx),iS, iG))
if (bx,bt,arts,abs(arts-bx),iS, iG) in self.layer3:
self.layer3.remove((bx,bt,arts,abs(arts-bx),iS, iG))
elif self.layer == 3 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer2 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer1 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer3:
b.set_color("blue")
self.layer3.append((bx,bt,arts,abs(arts-bx),iS, iG))
elif artx <= arts:
for b in self.artb[iS][arts]:
bx = b.get_offsets()[0][0]
iG = np.where(np.array(self.artb[iS][arts]) == b)[0][0]
if arts >= bx >= artx:
bt = b.get_offsets()[0][1]
if self.layer == 1 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer1:
b.set_color("red")
self.layer1.append((bx,bt,arts,abs(arts-bx),iS, iG))
if (bx,bt,arts,abs(arts-bx),iS, iG) in self.layer2:
self.layer2.remove((bx,bt,arts,abs(arts-bx),iS, iG))
elif self.layer == 2 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer1 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer2:
b.set_color("lightgreen")
self.layer2.append((bx,bt,arts,abs(arts-bx),iS, iG))
if (bx,bt,arts,abs(arts-bx),iS, iG) in self.layer3:
self.layer3.remove((bx,bt,arts,abs(arts-bx),iS, iG))
elif self.layer == 3 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer2 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer1 and (bx,bt,arts,abs(arts-bx),iS, iG) not in self.layer3:
b.set_color("blue")
self.layer3.append((bx,bt,arts,abs(arts-bx),iS, iG))
self.tt_fig1.canvas.draw()
def onkey(event):
if event.key == "1":
self.layer = 1
elif event.key == "2":
self.layer = 2
elif event.key == "3":
self.layer = 3
self.tt_ax1.set_title('Layer %d interpratation activated!'%self.layer)
if event.key == "C" or event.key == "c":
if messagebox.askyesno("Refrainv", "Clear layer interpretation?"):
del self.layer1[:]
del self.layer2[:]
del self.layer3[:]
for i in range(len(self.sp)):
for b in self.artb[i][self.sp[i]]:
b.set_color("white")
b.set_edgecolor("k")
self.tt_fig1.canvas.draw()
self.tt_pickEvent = self.tt_fig1.canvas.mpl_connect('pick_event', onpick)
self.tt_keyEvent = self.tt_fig1.canvas.mpl_connect('key_press_event', onkey)
messagebox.showinfo('Refrapy','Layer interpretation is now activated!')
self.tt_raise()
else:
self.tt_fig1.canvas.mpl_disconnect(self.tt_pickEvent)
self.tt_fig1.canvas.mpl_disconnect(self.tt_keyEvent)
self.tt_ax1.set_title('Layer interpratation off')
messagebox.showinfo('Refrapy','Layer interpretation is now off!')
self.tt_raise()
self.tt_interpretation = False
def saveTT(self):
if self.editTT_panel == 1:
out_ttFile = filedialog.asksaveasfilename(title='Save',filetypes=[('Refrapy pick', '*.rp')])
with open(out_ttFile+".rp",'w') as arqpck:
arqpck.write("%d %d\n%.2f %.2f\n"%(len(self.spED),self.gnED,self.fgED,self.gsED))
for i in range(len(self.spED)):
for j in range(len(self.dataxED[i][self.spED[i]])):
arqpck.write('%f %f 1\n'%(self.dataxED[i][self.spED[i]][j],self.datatED[i][self.spED[i]][j]))
arqpck.write('/ %f\n'%self.spED[i])
elif self.editTT_panel == 2:
if self.TTfile:
out_ttFile = filedialog.asksaveasfilename(title='Save',filetypes=[('Refrapy pick', '*.rp')])
with open(out_ttFile+".sgt",'w') as arqpck:
arqpck.write("%d %d\n%.2f %.2f\n"%(len(self.sp),self.gn,self.fg,self.gs))
for i in range(len(self.sp)):
for j in range(len(self.datax[i][self.sp[i]])):
arqpck.write('%f %f 1\n'%(self.datax[i][self.sp[i]][j],self.datat[i][self.sp[i]][j]*1000))
arqpck.write('/ %f\n'%self.sp[i])
messagebox.showinfo('Refrapy',"%s was saved!"%out_ttFile)
def editTT(self):
self.win = Toplevel()
self.win.geometry("250x75")
self.win.resizable(0,0)
self.win.title('Refrainv')
message = "This will delete stuff"
Label(self.win, text="Choose the edditing panel").grid(row=0,column=0,columnspan=2)
Button(self.win, text='Time-terms panel', command= lambda: choose_tt()).grid(row=1,column=0, pady=10, padx=10)
Button(self.win, text='Tomograpy panel', command = lambda: choose_tomo()).grid(row=1,column=1, padx= 10)
def choose_tt():
self.tt_editTT()
self.win.destroy
def choose_tomo():
self.tomo_editTT()
self.win.destroy
self.win.mainloop()
def tt_editTT(self):
self.win.destroy
try: ttFile = filedialog.askopenfilename(title='Open',filetypes=[('Refrapy pick','*.rp')])
except: pass
if ttFile:
self.tt_ax2.cla()
x, t = np.genfromtxt(ttFile, usecols = (0,1), unpack = True)
self.fgED = x[1]
self.gnED = t[0]
self.gsED = t[1]
x = np.delete(x,[0,1])
t = np.delete(t,[0,1])
self.spED = []
for i in range(len(t)):
if
|
np.isnan(x[i])
|
numpy.isnan
|
import numpy as np
from sklearn import neighbors
from mpl_toolkits.mplot3d import Axes3D
import random
from sklearn.decomposition import PCA
from PIL import Image
from sklearn.cluster import KMeans
import scipy.spatial.distance as dist
from matplotlib.colors import ListedColormap
class Question1(object):
def pcaeig(self, data):
""" Implement PCA via the eigendecomposition.
Parameters:
1. data (N, d) numpy ndarray. Each row as a feature vector.
Outputs:
1. W (d,d) numpy array. PCA transformation matrix (Note that each row of the matrix should be a principal component)
2. s (d,) numpy array. Vector consisting of the amount of variance explained in the data by each PCA feature.
Note that the PCA features are ordered in decreasing amount of variance explained, by convention.
"""
covMatrix = np.dot(data.T, data) / data.shape[0]
w, v = np.linalg.eigh(covMatrix)
W = np.fliplr(v).T
s = np.flip(w)
# Remember to check your data types
return (W, s)
def pcadimreduce(self, data, W, k):
""" Implements dimension reduction via PCA.
Parameters:
1. data (N, d) numpy ndarray. Each row as a feature vector.
2. W (d,d) numpy array. PCA transformation matrix
3. k number. Number of PCA features to retain
Outputs:
1. reduced_data (N,k) numpy ndarray, where each row contains PCA features corresponding to its input feature.
"""
reduced_data = np.dot(W[0:k, :], data.T).T
return reduced_data
def pcareconstruct(self, pcadata, W, k):
""" Implements dimension reduction via PCA.
Parameters:
1. pcadata (N, k) numpy ndarray. Each row as a PCA vector. (e.g. generated from pcadimreduce)
2. W (d,d) numpy array. PCA transformation matrix
3. k number. Number of PCA features
Outputs:
1. reconstructed_data (N,d) numpy ndarray, where the i-th row contains the reconstruction of the original i-th input feature vector (in `data`) based on the PCA features contained in `pcadata`.
"""
reconstructed_data = np.dot(W[0:k, :].T, pcadata.T).T
return reconstructed_data
def pcasvd(self, data):
"""Implements PCA via SVD.
Parameters:
1. data (N, d) numpy ndarray. Each row as a feature vector.
Returns:
1. Wsvd (d,d) numpy array. PCA transformation matrix (Note that each row of the matrix should be a principal component)
2. ssvd (d,) numpy array. Vector consisting of the amount of variance explained in the data by each PCA feature.
Note that the PCA features are ordered in decreasing amount of variance explained, by convention.
"""
_, s, v = np.linalg.svd(data)
Wsvd = v
ssvd = s*s / data.shape[0]
return Wsvd, ssvd
class Question2(object):
def unexp_var(self, X):
"""Returns an numpy array with the fraction of unexplained variance on X by retaining the first k principal components for k =1,...200.
Parameters:
1. X The input image
Returns:
1. pca The PCA object fit on X
2. unexpv A (200,) numpy ndarray, where the i-th element contains the percentage of unexplained variance on X by retaining i+1 principal components
"""
for i in range(200):
pca = PCA(n_components=i+1)
pca.fit(X)
unexpv = 1 - np.cumsum(pca.explained_variance_ratio_)
return pca, unexpv
def pca_approx(self, X_t, pca, i):
"""Returns an approimation of `X_t` using the the first `i` principal components (learned from `X`).
Parameters:
1. X_t The input image to be approximated
2. pca The PCA object to use for the transform
3. i Number of principal components to retain
Returns:
1. recon_img The reconstructed approximation of X_t using the first i principal components learned from X (As a sanity check it should be of size (1,4096))
"""
transX_t = pca.transform(X_t.reshape(1,-1))
transX_t[0, i:] = 0
recon_img = pca.inverse_transform(transX_t)
return recon_img
class Question3(object):
def pca_classify(self, traindata, trainlabels, valdata, vallabels):
"""Returns validation errors using 1-NN on the PCA features using 1,2,...,256 PCA features, the minimum validation error, and number of PCA features used.
Parameters:
traindata
trainlabels
valdata
valabels
Returns:
ve numpy array of length 256 containing the validation errors using 1,...,256 features
min_ve minimum validation error
pca_feat Number of PCA features to retain. Integer.
"""
from sklearn.neighbors import KNeighborsClassifier
ve = np.zeros(256)
for i in range(256):
pca = PCA(n_components=i+1)
pca.fit(traindata)
classifier= KNeighborsClassifier(n_neighbors=1)
classifier.fit(pca.transform(traindata), trainlabels)
predlabels = classifier.predict(pca.transform(valdata))
ve[i] = np.mean(predlabels != vallabels)
min_ve = ve[
|
np.argmin(ve)
|
numpy.argmin
|
from hcipy.mode_basis.zernike import zernike
import numpy as np
import hcipy as hc
import matplotlib.pyplot as plt
from misc import normalize,resize,printProgressBar,resize2,overlap
from os import path
import time
import LPmodes
import h5py
import screen
from astropy.io import fits
import PIAA
import PIAA_raytrace as RTPIAA
import zernike as zk
def get_u(wf:hc.Wavefront):
'''extract 2D array of complex field data from hcipy wavefront object'''
field = wf.electric_field * np.sqrt(wf.grid.weights)
size = int(np.sqrt(field.shape[0]))
shape=(size,size)
reals,imags = np.array(np.real(field)),np.array(np.imag(field))
u = reals + 1.j*imags
u = u.reshape(shape)
return u
def get_phase(wf:hc.Wavefront):
phase = wf.phase
size = int(np.sqrt(phase.shape[0]))
shape=(size,size)
phase = phase.reshape(shape)
return phase
def get_strehl(u,u_perfect):
return np.max(u*np.conj(u))/np.max(u_perfect*np.conj(u_perfect))
class AOtele:
#imaged star parameters
zero_magnitude_flux = 3.9e10 #phot/s
star_mag = 0
#lo_pupil_res = 128 # this resolution is used for the DM correction algorithm
#hi_pupil_res = 1024 # this resolution is used for propagation
def __init__(self, diameter, fnum, wavelength, num_DM_acts = 30, wavelength_0 = None, obstr_frac = 0.244, fast_boot = False, hi_pupil_res = 512, lo_pupil_res = 128, remove_TT = False):
self.remove_TT = remove_TT
self.hi_pupil_res = hi_pupil_res
self.lo_pupil_res = lo_pupil_res
if wavelength_0 is None:
wavelength_0 = wavelength
self.reference_wavelength = wavelength_0
self.diameter = diameter
self.fnum = fnum
self.num_acts = num_DM_acts
self.obstr_frac = obstr_frac
self.wavelength = wavelength
## setting up low and high res pupil grids. the low res grid is used for to only calibrate/control the DM
num_pupil_pixels = self.lo_pupil_res
pupil_pixel_samps = self.lo_pupil_res #the keck pupil fits seems be padded with zeros around border by ~5%
self.pupil_plane_res = (num_pupil_pixels,num_pupil_pixels)
pupil_grid_diam = diameter * num_pupil_pixels / pupil_pixel_samps
self.pupil_grid_diam = pupil_grid_diam
self.pupil_sample_rate = pupil_grid_diam / num_pupil_pixels
self.pupil_grid = hc.make_pupil_grid(self.lo_pupil_res,diameter = pupil_grid_diam)
self.pupil_grid_hires = hc.make_pupil_grid(self.hi_pupil_res,diameter = pupil_grid_diam)
## now set up the actual pupil fields
keck_pupil_hires = np.array(fits.open("pupil_KECK_high_res.fits")[0].data,dtype=np.float32)[50:-50,50:-50]
ap_arr = resize2(keck_pupil_hires, (self.lo_pupil_res,self.lo_pupil_res) )
ap_arr_hires = resize2(keck_pupil_hires, (self.hi_pupil_res,self.hi_pupil_res) )
eps = 1e-6
ap_arr[np.abs(ap_arr)<eps] = 0
ap_arr_hires[np.abs(ap_arr_hires)<eps] = 0
self.ap_data = ap_arr_hires.reshape((hi_pupil_res,hi_pupil_res))
self.ap = hc.Field(ap_arr.flatten(),self.pupil_grid)
self.ap_hires = hc.Field(ap_arr_hires.flatten(),self.pupil_grid_hires)
## we need to make two DMs, one sampled on the low res pupil grid and another on the hi res pupil grid
if not fast_boot:
# this stuff can be skipped if we're not using the telescope to sim ao correction
act_spacing = diameter / num_DM_acts
influence_funcs = hc.make_gaussian_influence_functions(self.pupil_grid,num_DM_acts,act_spacing)
self.DM = hc.DeformableMirror(influence_funcs)
influence_funcs_hires = hc.make_gaussian_influence_functions(self.pupil_grid_hires,num_DM_acts,act_spacing)
self.DM_hires = hc.DeformableMirror(influence_funcs_hires)
## make the rest of our optics (besides PIAA/collimator)
self.pwfs = hc.PyramidWavefrontSensorOptics(self.pupil_grid,wavelength_0=wavelength_0)
self.detector = hc.NoiselessDetector()
num_airy = 16
## focal grid set up. linear resolution in pixels is 2 * q * num_airy\
self.q, self.num_airy = int(self.hi_pupil_res/num_airy/2), num_airy
self.focal_grid = hc.make_focal_grid(q = self.q, num_airy = num_airy, f_number = fnum, reference_wavelength=wavelength_0)
#self.focal_grid = hc.make_focal_grid(q = 16, num_airy = 16, f_number = fnum, reference_wavelength=wavelength_0)
self.ref_image = None
self.rmat = None
## pupil -> focal and focal -> pupil propagators
self.propagator = hc.FraunhoferPropagator(self.pupil_grid,self.focal_grid,focal_length=diameter*fnum)
self.focal_length = diameter*fnum
## misc other stuff that is useful to cache/save
self.t_arr = None
self.DMshapes = None
self.psgen = None
self.apodize = None
self.apodize_backwards = None
self.collimate = None
self.collimator_grid = None
self.beam_radius = None
self.current_phase_screen = None
self.wf_pupil,self.wf_pupil_hires,self.wf_focal = None,None,None
self.PIAA_args,self.col_args = None,None
self.inject_TT = None
def get_args(self):
fp0 = self.fp0
wl0 = self.reference_wavelength
OSL = 0
vel = self.vel
dt = self.dt
diam = self.diameter
fnum = self.fnum
num_acts = self.num_acts
obstr_frac = self.obstr_frac
seed = self.seed
col_args,PIAA_args = self.col_args,self.PIAA_args
if col_args is None:
col_args = (0,0)
if PIAA_args is None:
PIAA_args = (0,0,0,0)
else:
PIAA_args = PIAA_args[1:]
return [fp0,wl0,OSL,vel,dt,diam,fnum,num_acts,obstr_frac,seed,*col_args,*PIAA_args]
def rescale(self,fnum):
""" change the F# of the optical system"""
self.fnum = fnum
self.focal_grid = hc.make_focal_grid(q = self.q, num_airy = self.num_airy, f_number = fnum, reference_wavelength=self.reference_wavelength)
if self.collimate is None and self.apodize is None:
# no collimation or piaa
self.propagator = hc.FraunhoferPropagator(self.pupil_grid,self.focal_grid,focal_length=self.diameter*fnum)
elif self.collimate is not None and self.apodize is None:
# just collimation
self.propagator = hc.FraunhoferPropagator(self.collimator_grid,self.focal_grid,focal_length=fnum*2*self.beam_radius)
else:
# piaa (which needs collimation, from both simulation and practical consideratrions)
self.propagator = self.propagator = hc.FraunhoferPropagator(self.collimator_grid,self.focal_grid,focal_length=fnum*2*self.beam_radius*0.55)
def init_collimator(self,beam_radius):
"""the collimator runs on a higher res than the low res grid used for DM correction to prevent phase aliasing"""
col_res = self.hi_pupil_res
self.beam_radius = beam_radius
print("setting up collimator...")
self.collimator_grid = hc.make_pupil_grid(col_res, diameter = beam_radius*2)
self.propagator = hc.FraunhoferPropagator(self.collimator_grid,self.focal_grid,focal_length=self.fnum*2*beam_radius) # what's the focal length after PIAA???
self.focal_length = self.fnum*2*beam_radius
def _inner_(wf):
_power = wf.total_power
new_wf = hc.Wavefront(hc.Field(wf.electric_field,self.collimator_grid), wf.wavelength)
# make sure power is conserved
new_wf.total_power = _power
return new_wf
self.collimate = _inner_
print("collimator setup complete")
def init_PIAA(self,beam_radius,sep,inner=0,outer=3,IOR=1.48,mode="FR"):
''' make a PIAA lens system that maps planar wavefronts to truncated Gaussians.
beam radius is preserved, with the inner edge at <inner * obstruction fraction> and
the outer edge of of the truncated Gaussian is at <outer * sigma>.
'''
#modes: FR, RT,RM
self.PIAA_args = (beam_radius,sep,inner,outer,IOR)
#Olivier's suggestion, shrink by a factor of 3 (mult by outer to get it in units of sigma)
inner = self.obstr_frac * inner * outer
print("setting up PIAA lenses...")
collimator_grid = self.collimator_grid if self.collimator_grid is not None else self.pupil_grid
#adjusting the focal length so that PIAA and no PIAA psfs are same size on screen (the coeff is empirically determined)
self.propagator = hc.FraunhoferPropagator(self.collimator_grid,self.focal_grid,focal_length=self.fnum*2*beam_radius*0.55)
self.focal_length = self.fnum*2*beam_radius*0.55
r1,r2 = PIAA.make_remapping_gauss_annulus(self.pupil_plane_res[0],self.obstr_frac,inner,outer)
r1 *= beam_radius
r2 *= beam_radius
z1,z2 = PIAA.make_PIAA_lenses(r1,r2,IOR,IOR,sep)
if mode == "FR":
self.apodize,self.apodize_backwards = PIAA.fresnel_apodizer(collimator_grid,beam_radius,sep,r1,r2,z1,z2,IOR,IOR)
elif mode == "RT":
self.apodize = RTPIAA.RT_apodize(r1,r2,z1,z2,IOR,IOR,sep,self.reference_wavelength,nres=self.hi_pupil_res,ret_wf=True)
else:
self.apodize = RTPIAA.remap_apodize(r1,r2,inner,outer,beam_radius,res)
print("PIAA setup complete")
def init_PIAA_LP0m(self,m,beam_radius,lens_sep,rcore,ncore,nclad,wl=None,IOR=1.48,inner_trunc=0.0,outer_trunc=-1.0,use_RT=False):
''' all units meters. initialize PIAA lenses which remap to a (potentially truncated) LP0m mode. outer_trunc=-1.0 sets PIAA lens to preserve beam size
(roughly)
'''
self.PIAA_args = (beam_radius,lens_sep,inner_trunc,outer_trunc,IOR)
if outer_trunc == -1.0:
outer_trunc = None
print("setting up PIAA lenses...")
collimator_grid = self.collimator_grid
#adjusting the focal length so that PIAA and no PIAA psfs are same size on screen (the coeff is empirically determined)
self.propagator = hc.FraunhoferPropagator(self.collimator_grid,self.focal_grid,focal_length=self.fnum*2*beam_radius*0.55)
self.focal_length = self.fnum*2*beam_radius*0.55
if wl is None:
wl = self.reference_wavelength
r1,r2 = PIAA.make_remapping_LP0m(m,1024,self.obstr_frac,beam_radius,rcore,ncore,nclad,wl,self.focal_length,inner_trunc,outer_trunc)
#plt.plot(np.zeros_like(r1),r1,color='white',marker='.',ls='None')
#plt.plot(np.ones_like(r2),r2,color='white',marker='.',ls='None')
#plt.show()
z1,z2 = PIAA.make_PIAA_lenses(r1,r2,IOR,IOR,lens_sep)
if not use_RT:
self.apodize,self.apodize_backwards = PIAA.fresnel_apodizer(collimator_grid,beam_radius,lens_sep,r1,r2,z1,z2,IOR,IOR)
else:
self.apodize = RTPIAA.RT_apodize(r1,r2,z1,z2,IOR,IOR,lens_sep,wl,nres=self.hi_pupil_res,ret_wf=True,nlaunch=2048)
print("PIAA setup complete")
def gen_wf(self,wl=None):
if wl is None:
wl = self.wavelength
return hc.Wavefront(self.ap,wl)
def read_out(self, wf, poisson = False):
self.detector.integrate(wf,self.dt)
if poisson:
image = hc.large_poisson(self.detector.read_out()).astype(np.float)
else:
image = self.detector.read_out()
image /= image.sum()
return image
def calibrate_DM(self, rcond = 0.1, fname_append=None, force_new = False):
"""Calculate and save the imagae reconstruction matrix for the AO system"""
from os import path
if fname_append is not None:
fname = "rmat_"+str(self.DM.num_actuators)+"_"+str(rcond).replace(".","")+"_"+fname_append
else:
fname = "rmat_"+str(self.DM.num_actuators)+"_"+str(rcond).replace(".","")
#first we make a reference image for an unaberrated wavefront passing through the pwfs
wf = hc.Wavefront(self.ap,wavelength=self.reference_wavelength)
wf_pwfs = self.pwfs.forward(wf)
self.ref_image = self.read_out(wf_pwfs,poisson=False)
if path.exists(fname+".npy") and not force_new:
print("trying to load reconstruction matrix from "+fname)
rmat = np.load(fname+str(".npy"))
print("loaded cached rmat")
else:
print("computing reconstruction matrix")
#compute the interaction matrix relating incoming abberations to wfs response
probe_disp = 0.01 * self.wavelength
slopes = []
for i in range(self.DM.num_actuators):
printProgressBar(i,self.DM.num_actuators)
slope = 0
for s in (-1,1):
disps = np.zeros((self.DM.num_actuators,))
disps[i] = s*probe_disp
self.DM.actuators = disps
wf_dm = self.DM.forward(wf) #pass through DM
wf_dm_pwfs = self.pwfs.forward(wf_dm) #pass through wfs
image = self.read_out(wf_dm_pwfs)
slope += s * (image - self.ref_image) / (2*probe_disp)
slopes.append(slope)
print("matrix inversion...")
basis = hc.ModeBasis(slopes)
rmat = hc.inverse_tikhonov(basis.transformation_matrix,rcond = rcond, svd = None)
np.save(fname,rmat)
self.DM.actuators = np.zeros((self.DM.num_actuators,))
self.rmat = rmat
return rmat
def make_turb(self,r0,wl0,T,v=10,seed = None):
""" set up phase screen generator according Mike's code. flow direction is fixed straight up because i am lazy. will
will be replaced with my phase screen generator when testing is complete
"""
if seed is None:
seed = 123456789
# save the atmos params
self.fp0 = r0
self.seed = seed
self.wl0 = wl0
self.OSL = "N/A"
self.vel = v
self.dt = T
size = self.pupil_grid_diam
sampling_scale = size / self.hi_pupil_res
if self.remove_TT == True:
filt = zk.high_pass(3)
self.psgen = screen.PhaseScreenGenerator(size, sampling_scale, v, 0, T, r0, wl0, wl0, seed=seed,filter_func=filt,filter_scale=self.diameter/2)
else:
self.psgen = screen.PhaseScreenGenerator(size, sampling_scale, v, 0, T, r0, wl0, wl0, seed=seed)
def get_screen(self):
'''advances timestep and gets the next phase screen'''
self.current_phase_screen = self.psgen.generate()
def propagate_through_turb(self,wf,downsample=False,screen=None):
'''propagate through turbulence, assuming alternate turbulence method (Mike's code) has been set up'''
wf = wf.copy()
if screen is None:
phase_screen = self.current_phase_screen * self.reference_wavelength / wf.wavelength
else:
phase_screen = screen * self.reference_wavelength / wf.wavelength
if downsample:
# if not doing full optical propagation, downsample the phase screen
phase_screen = resize(phase_screen, (self.lo_pupil_res,self.lo_pupil_res) )
wf.electric_field *= np.exp(1j * phase_screen.flatten())
return wf
def setup_TT_injector(self,rms,wavelength=None):
'''rms amplitude is at reference wavelength'''
if wavelength is None:
wavelength = self.reference_wavelength
xa=ya=np.linspace(-1,1,self.hi_pupil_res)
xg,yg=np.meshgrid(xa,ya)
tip = zk.Zj_cart(2)(xg,yg) * self.reference_wavelength / wavelength
tilt = zk.Zj_cart(3)(xg,yg) * self.reference_wavelength / wavelength
self.rs = np.random.RandomState(seed=123456789)
def _inner_(wf:hc.Wavefront):
tip_amp = self.rs.normal(scale=rms)
tilt_amp = self.rs.normal(scale=rms)
wf = wf.copy()
wf.electric_field *= np.exp(1.j*tip_amp*tip.flatten())
wf.electric_field *= np.exp(1.j*tilt_amp*tilt.flatten())
return wf
self.inject_TT = _inner_
return
def update_DM(self,wf_lowres,leakage,gain):
'''takes a low-res pupil plane wavefront and updates DM according to leaky integrator'''
wf_turb = self.propagate_through_turb(wf_lowres,True)
wf_dm = self.DM.forward(wf_turb)
wf_pyr = self.pwfs.forward(wf_dm)
wfs_image = self.read_out(wf_pyr,poisson=False) #DONT ENABLE POISSON it messes w/ random number generation!
diff_image = wfs_image-self.ref_image
#leaky integrator correction algorithm
self.DM.actuators = (1-leakage) * self.DM.actuators - gain * self.rmat.dot(diff_image)
def propagate(self,wf_hires,screen=None,get_TT_stats=False):
self.DM_hires.actuators = self.DM.actuators
_wf = self.propagate_through_turb(wf_hires,False,screen)
_wf = self.DM_hires.forward(_wf)
#optionally, now add extra tip tilt
if self.inject_TT is not None:
_wf = self.inject_TT(_wf)
if self.collimate is not None:
_wf = self.collimate(_wf)
if self.apodize is not None:
_wf = self.apodize(_wf)
_wf = self.propagator.forward(_wf)
return _wf
def half_propagate(self,wf_hires):
self.DM_hires.actuators = self.DM.actuators
_wf = self.propagate_through_turb(wf_hires,False)
_wf = self.DM_hires.forward(_wf)
if self.collimate is not None:
_wf = self.collimate(_wf)
return _wf
def save_args_to_file(self,f):
_args = f.create_group("tele_args")
_args.create_dataset("fp0",data=self.fp0)
_args.create_dataset("wl0",data=self.wl0)
_args.create_dataset("OSL",data=self.OSL)
_args.create_dataset("vel",data=self.vel)
_args.create_dataset("dt",data=self.dt)
_args.create_dataset("diam",data=self.diameter)
_args.create_dataset("fnum",data = self.fnum)
_args.create_dataset("num_acts",data = self.num_acts)
_args.create_dataset("obstr_frac",data = self.obstr_frac)
_args.create_dataset("seed",data = self.seed,dtype = int)
_args.create_dataset("res",data = self.hi_pupil_res,dtype = int)
_args.create_dataset("remove_TT",data = self.remove_TT,dtype = bool)
if self.collimate is not None:
_args.create_dataset("beam_radius",data=self.beam_radius)
if self.apodize is not None:
_args.create_dataset("PIAA_args",data=self.PIAA_args)
def get_perfect_wfs(self,wl=None):
"""compute and save perfect pupil plane and focal plane wavefronts"""
if wl is None:
wf_pupil = hc.Wavefront(self.ap,wavelength = self.wavelength)
wf_pupil_hires = hc.Wavefront(self.ap_hires,wavelength = self.wavelength)
else:
wf_pupil = hc.Wavefront(self.ap,wavelength = wl)
wf_pupil_hires = hc.Wavefront(self.ap_hires,wavelength = wl)
wf_pupil.total_power = 1
wf_pupil_hires.total_power = 1
wf_focal = wf_pupil_hires.copy()
if self.collimate is not None:
wf_focal = self.collimate(wf_focal)
if self.apodize is not None:
wf_focal = self.apodize(wf_focal)
wf_focal = self.propagator.forward(wf_focal)
# note that the total power in the focal plane is not 1. some power gets deflected
# away due to turbulence.
self.wf_pupil,self.wf_pupil_hires,self.wf_focal = wf_pupil,wf_pupil_hires,wf_focal
return wf_pupil,wf_pupil_hires,wf_focal
def run_closed_loop(self,leakage,gain,fileout,run_for = 10,freq = 1000,save_every = 100, wl=None):
"""like the original function but use's Mike's phase screen code"""
#reset DM
self.DM.actuators[:] = 0.
t_arr = np.linspace(0,run_for, int(run_for*freq) + 1)
wf_pupil,wf_pupil_hires,wf_focal = self.wf_pupil,self.wf_pupil_hires,self.wf_focal
perfect_p = get_u(wf_pupil_hires)
perfect_u = get_u(wf_focal)
num_saves = int(len(t_arr)/save_every)
DMshapes = np.empty((num_saves,len(self.DM.actuators)))
avg = np.zeros_like(perfect_u,dtype=np.float)
psf_arr = np.zeros((num_saves,perfect_u.shape[0],perfect_u.shape[1]),dtype=np.complex128)
pupil_arr =
|
np.zeros((num_saves,perfect_p.shape[0],perfect_p.shape[1]),dtype=np.complex128)
|
numpy.zeros
|
from __future__ import division # For Python 2
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import itertools
import os
import json
import numpy as np
import seaborn as sns
MAX_BW = 10e6
STATS_DICT = {"backlog": 0, "olimit": 1,
"drops": 2, "bw_rx": 3, "bw_tx": 4}
NUM_IFACES = 6
NUM_ACTIONS = 4
def check_plt_dir(plt_name):
plt_dir = os.path.dirname(plt_name)
if not plt_dir == '' and not os.path.exists(plt_dir):
print("Folder %s does not exist! Creating..." % plt_name)
os.makedirs(plt_dir)
def running_mean(x, N=100):
if (len(x) < N or N < 1):
return x
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[int(N):] - cumsum[:-int(N)]) / float(N)
def parse_config(results_dir):
with open(results_dir + "/test_config.json") as conf_file:
return json.load(conf_file)
def load_file(filename):
out = []
with open(filename, 'rb') as f:
fsz = os.fstat(f.fileno()).st_size
while f.tell() < fsz:
item = np.load(f)
if item.size > 0:
out.append(item)
item = None
flat_out = [x for sublist in out for x in sublist]
out = None
return np.array(flat_out)
def plot_barchart(plt_stats, plt_name):
algos = plt_stats.keys()
fig, ax = plt.subplots(2, 1, figsize=(20, 10))
ax_overlimits = ax[0]
ax_drops = ax[1]
ax_overlimits.set_ylabel('drop avg')
ax_drops.set_ylabel('overlimit avg')
ax_overlimits.get_xaxis().set_visible(False)
bar_overlimits = []
bar_drops = []
bar_labels = []
for algo in algos:
bar_overlimits.append(plt_stats[algo]["overlimits"])
bar_drops.append(plt_stats[algo]["drops"])
bar_labels.append(algo)
ax_overlimits.bar(range(len(bar_labels)), bar_overlimits,
tick_label=bar_labels)
ax_drops.bar(range(len(bar_labels)), bar_drops, tick_label=bar_labels)
plt.savefig(plt_name + "_bar.pdf")
plt.savefig(plt_name + "_bar.png")
plt.gcf().clear()
def plot_lineplot(plt_stats, num_timesteps, plt_name):
algos = plt_stats.keys()
# get the maximum values of
reward_max = -np.inf
reward_min = np.inf
queue_max = 0
bw_max = 0
for i, algo in enumerate(algos):
if np.amax(plt_stats[algo]["rewards"]) > reward_max:
reward_max = np.amax(plt_stats[algo]["rewards"])
if np.amin(plt_stats[algo]["rewards"]) < reward_min:
reward_min =
|
np.amin(plt_stats[algo]["rewards"])
|
numpy.amin
|
import sys
import numpy as np
from scipy.sparse import csr_matrix
if sys.version_info[0] >= 3:
from topn import topn as ct
from topn import topn_threaded as ct_thread
else:
import topn as ct
import topn_threaded as ct_thread
def awesome_topn(r, c, d, ntop, n_rows=-1, n_jobs=1):
"""
r, c, and d are 1D numpy arrays all of the same length N.
This function will return arrays rn, cn, and dn of length n <= N such
that the set of triples {(rn[i], cn[i], dn[i]) : 0 < i < n} is a subset of
{(r[j], c[j], d[j]) : 0 < j < N} and that for every distinct value
x = rn[i], dn[i] is among the first ntop existing largest d[j]'s whose
r[j] = x.
Input:
r and c: two 1D integer arrays of the same length
d: 1D array of single or double precision floating point type of the
same length as r or c
ntop maximum number of maximum d's returned
n_rows: an int. If > -1 it will replace output rn with Rn the
index pointer array for the compressed sparse row (CSR) matrix
whose elements are {C[rn[i], cn[i]] = dn: 0 < i < n}. This matrix
will have its number of rows = n_rows. Thus the length of Rn is
n_rows + 1
n_jobs: number of threads, must be >= 1
Output:
(rn, cn, dn) where rn, cn, dn are all arrays as described above, or
(Rn, cn, dn) where Rn is described above
"""
dtype = r.dtype
assert c.dtype == dtype
idx_dtype = np.int32
rr = r.astype(idx_dtype)
len_r = len(r)
if (n_rows + 1) > len_r:
rr.resize(n_rows + 1, refcheck=False)
cc = c.astype(idx_dtype)
dd = d.copy()
new_len = ct_thread.topn_threaded(
rr, cc, dd,
ntop,
n_rows,
n_jobs
)
rr.resize((new_len if n_rows < 0 else (n_rows + 1)), refcheck=False)
cc.resize(new_len, refcheck=False)
dd.resize(new_len, refcheck=False)
return rr, cc, dd
def awesome_hstack_topn(blocks, ntop, sort=True, use_threads=False, n_jobs=1):
"""
Returns, in CSR format, the matrix formed by horizontally stacking the
sequence of CSR matrices in parameter 'blocks', with only the largest ntop
elements of each row returned. Also, each row will be sorted in
descending order only when
ntop < total number of columns in blocks or sort=True,
otherwise the rows will be unsorted.
:param blocks: list of CSR matrices to be stacked horizontally.
:param ntop: int. The maximum number of elements to be returned for
each row.
:param sort: bool. Each row of the returned matrix will be sorted in
descending order only when ntop < total number of columns in blocks
or sort=True, otherwise the rows will be unsorted.
:param use_threads: bool. Will use the multi-threaded versions of this
routine if True otherwise the single threaded version will be used.
In multi-core systems setting this to True can lead to acceleration.
:param n_jobs: int. When use_threads=True, denotes the number of threads
that are to be spawned by the multi-threaded routines. Recommended
value is number of cores minus one.
Output:
(scipy.sparse.csr_matrix) matrix in CSR format
"""
n_blocks = len(blocks)
r = np.concatenate([b.indptr for b in blocks])
c = np.concatenate([b.indices for b in blocks])
d = np.concatenate([b.data for b in blocks])
n_cols = np.array([b.shape[1] for b in blocks]).astype(c.dtype)
M = blocks[0].shape[0]
N = np.sum(n_cols)
if len(d) > 0:
hstack_indptr = np.empty(M + 1, dtype=c.dtype)
hstack_indices = np.empty(len(c), dtype=c.dtype)
hstack_data = np.empty(len(d), dtype=d.dtype)
if (ntop < N) or sort:
if not use_threads:
ct.sparse_topn(
n_blocks, M, n_cols,
r, c, d,
ntop,
hstack_indptr, hstack_indices, hstack_data
)
else:
ct_thread.sparse_topn_threaded(
n_blocks, M, n_cols,
r, c, d,
ntop,
hstack_indptr, hstack_indices, hstack_data,
n_jobs
)
else:
if not use_threads:
ct.sparse_hstack(
n_blocks, M, n_cols,
r, c, d,
hstack_indptr, hstack_indices, hstack_data
)
else:
ct_thread.sparse_hstack_threaded(
n_blocks, M, n_cols,
r, c, d,
hstack_indptr, hstack_indices, hstack_data,
n_jobs
)
else:
hstack_indptr = np.zeros(M + 1, dtype=c.dtype)
hstack_indices = np.empty(0, dtype=c.dtype)
hstack_data = np.empty(0, dtype=d.dtype)
return csr_matrix((hstack_data, hstack_indices, hstack_indptr), shape=(M, N))
def awesome_hstack(blocks, use_threads=False, n_jobs=1):
"""
Returns, in CSR format, the matrix formed by horizontally stacking the
sequence of CSR matrices in parameter blocks.
:param blocks: list of CSR matrices to be stacked horizontally.
:param use_threads: bool. Will use the multi-threaded versions of this
routine if True otherwise the single threaded version will be used.
In multi-core systems setting this to True can lead to acceleration.
:param n_jobs: int. When use_threads=True, denotes the number of threads
that are to be spawned by the multi-threaded routines. Recommended
value is number of cores minus one.
Output:
(scipy.sparse.csr_matrix) matrix in CSR format
"""
n_blocks = len(blocks)
r = np.concatenate([b.indptr for b in blocks])
c =
|
np.concatenate([b.indices for b in blocks])
|
numpy.concatenate
|
import numpy as np
from scipy.ndimage.interpolation import zoom
import keras.backend as K
def grad_cam(input_model, images, layer_name, cls=-1, method = "naive",resize_to_input=True):
#check input shape first. whether its a batch or not.
batch = True
if len(images.shape) == 3:
images = np.expand_dims(images,axis=0)
batch = False
#image shape will be (batch,H,W,channel)
H = images.shape[1]
W = images.shape[2]
cam = []
for i in range(images.shape[0]):
if cls == -1:
_cls = np.argmax(input_model.predict(images[i:i+1]))
else:
_cls = cls
y_c = input_model.output[0, _cls]
conv_output = input_model.get_layer(layer_name).output
#print(i)
if method == "naive":
grads = K.gradients(y_c, conv_output)[0]
gradient_function = K.function([input_model.input, K.learning_phase()], [conv_output, grads])
_output, _grads_val = gradient_function([images[i:i+1],0])
_output, _grads_val = _output[0,:,:,:], _grads_val[0, :, :, :]
_weights = np.mean(_grads_val, axis=(0, 1))
_cam = np.dot(_output, _weights)
elif method == "gradcampp":
grads = K.gradients(y_c, conv_output)[0]
first = K.exp(y_c)*grads
second = K.exp(y_c)*grads*grads
third = K.exp(y_c)*grads*grads
gradient_function = K.function([input_model.input, K.learning_phase()], [first,second,third, conv_output, grads])
conv_first_grad, conv_second_grad,conv_third_grad, conv_output, grads_val = gradient_function([images[i:i+1],0])
global_sum = np.sum(conv_output[0].reshape((-1,conv_first_grad[0].shape[2])), axis=0)
alpha_num = conv_second_grad[0]
alpha_denom = conv_second_grad[0]*2.0 + conv_third_grad[0]*global_sum.reshape((1,1,conv_first_grad[0].shape[2]))
alpha_denom = np.where(alpha_denom != 0.0, alpha_denom, np.ones(alpha_denom.shape))
alphas = alpha_num/alpha_denom
_weights = np.maximum(conv_first_grad[0], 0.0)
alpha_normalization_constant = np.sum(
|
np.sum(alphas, axis=0)
|
numpy.sum
|
from __future__ import division
from matplotlib import pyplot as plt
import numpy as np
# Input description
rows = 4
cols = 4
start = (0,0)
goal = (3,3)
badStates = [ (1,2) ]
# Parameters
gamma = 0.99
probA = 0.9
probB = 0.05
# epsilon = 0.05
epsilon = 0.2
sigma = 1e-1
rewardGoal = 100
rewardBad = -70
rewardGeneral = -1
numEpisodes = 10000
actions = {
"L" : (-1,0),
"R" : (1,0),
"U" : (0,1),
"D" : (0,-1)
}
ACard = len(actions.keys())
Q = np.zeros((rows, cols, ACard))
actionIndices = {
"L" : 0,
"R" : 1,
"U" : 2,
"D" : 3
}
Nsa = np.zeros((rows, cols, ACard))
def whatHappens(action):
happens = np.random.choice(["intended", "lateral"], p=[probA, 2*probB])
if happens == "intended":
a = action
else:
if action == "L" or action == "R":
a = np.random.choice(["U", "D"])
else:
a = np.random.choice(["L", "R"])
return a
def nextState(state, action):
a = whatHappens(action)
tryStateX = state[0] + actions[a][0]
tryStateY = state[1] + actions[a][1]
if tryStateX >= rows or tryStateX < 0 or tryStateY >= cols or tryStateY < 0:
s = state
else:
s = (tryStateX, tryStateY)
return s
def getReward(state):
if state == goal:
return rewardGoal
elif state in badStates:
return rewardBad
else:
return rewardGeneral
def takeAction(state):
probabilities = np.zeros(ACard)
aStar = np.argmax(Q[state[0]][state[1]])
probabilities = [epsilon / ACard] * ACard
probabilities[aStar] = 1 - epsilon + (epsilon / ACard)
a = np.random.choice(["L", "R", "U", "D"], p=probabilities)
return a
def calcExpectation(state):
exp = 0
aStar =
|
np.argmax(Q[state[0]][state[1]])
|
numpy.argmax
|
#!/usr/bin/python3
#
# Original code by <NAME>.:
# https://stackoverflow.com/questions/26936094/python-load-data-and-do-multi-gaussian-fit
#
# Author: <NAME>
# Date: 09/2020
#
# Plot fitted data
#
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
INDEX_INPUT = 3
INDEX_STATIC = 7
INDEX_DYNAMIC = 8
INDEX_LASER = 10
data = np.genfromtxt('dualRailAS_0mW.csv', delimiter=';')
def gaussian(x, height, center, width, offset):
return height*np.exp(-(x - center)**2/(2*width**2)) + offset
def three_gaussians(x, h1, c1, w1, h2, c2, w2, h3, c3, w3, offset):
return (gaussian(x, h1, c1, w1, offset=0) +
gaussian(x, h2, c2, w2, offset=0) +
gaussian(x, h3, c3, w3, offset=0) + offset)
def two_gaussians(x, h1, c1, w1, h2, c2, w2, offset):
return three_gaussians(x, h1, c1, w1, h2, c2, w2, 0,0,1, offset)
errfunc3 = lambda p, x, y: (three_gaussians(x, *p) - y)**2
errfunc2 = lambda p, x, y: (two_gaussians(x, *p) - y)**2
# compute hist
COL_STATIC = []
COL_DYNAMIC = []
COL_LASER = []
COL_INPUT = []
BIN_SIZE = 0.000001 # 1uA
STEP = 0.001 # 1mA
ROUND = 3
for i in range(1, len(data[:, INDEX_STATIC])):
COL_STATIC.append(round(float(int(data[i, INDEX_STATIC]/BIN_SIZE))*STEP,ROUND))
COL_DYNAMIC.append(round(float(int(data[i, INDEX_DYNAMIC]/BIN_SIZE))*STEP,ROUND))
COL_LASER.append(round(float(int(data[i, INDEX_LASER]/BIN_SIZE))*STEP,ROUND))
COL_INPUT.append(data[i, INDEX_INPUT])
static_hist, bin_edges = np.histogram(COL_STATIC, bins = sorted(set(COL_STATIC)))
dynamic_hist, bin_edges = np.histogram(COL_DYNAMIC, bins = sorted(set(COL_DYNAMIC)))
laser_hist, bin_edges = np.histogram(COL_LASER, bins = sorted(set(COL_LASER)))
static_X=np.array(sorted(set(COL_STATIC))[0:-1])
dynamic_X=np.array(sorted(set(COL_DYNAMIC))[0:-1])
laser_X=np.array(sorted(set(COL_LASER))[0:-1])
static_histRel = np.array([])
dynamic_histRel =
|
np.array([])
|
numpy.array
|
# In the most cases it is enough to train ones with fixed sn and sf (b were assumed to be always fixed)
# and optional S, U depending on the data-set with about 50 to 100 iterations
owd = 'C:/Users/flo9fe/Desktop/GIT_IP_true/python_ip_true'
#owd = '/usr/local/home/GIT_IP_true/python_ip_true'
import os;
os.chdir(owd)
from DEEPvSSGP_opt_SV2_IP import DEEPvSSGP_opt
from scipy.optimize import minimize
import scipy.io as sio
import numpy as np
np.set_printoptions(precision=2, suppress=True)
from time import gmtime, strftime, time
strftime("%Y-%m-%d %H:%M:%S", gmtime())
os.chdir('data')
dataset = 'load_python'
run = input('Enter something: ')
mat = sio.loadmat(dataset + '.mat', squeeze_me=True) # specify filename to load
os.chdir(owd)
non_rec = 0 # choose 1, if your setting in matlab was non_rec = 'on'; otherwise 0
X = mat['X'] # input data, structured
y = mat['y'] # output data
lengthscale = mat['lengthscale'] # lengthscales l
lengthscale_p = mat['lengthscale_p'] # lengthscales p
sn = mat['sn'] # noise parameter
sf = mat['sf'] # power parameter
S = mat['S'] # spectral points
MU = mat['MU'] # variational latent state
SIGMA = mat['SIGMA'] # variational latent state variance
U = np.array(mat['U'], dtype=np.float64) # pseudo input points
b = mat['b'] # phases
D = mat['D'] # input dimensions
layers = mat['layers'] # layers
order = mat['order'] # time horizon
X = np.require(X,dtype=None,requirements='A')
y = np.require(y,dtype=None,requirements='A')
lengthscale = np.require(lengthscale,dtype=None,requirements='A')
lengthscale_p = np.require(lengthscale_p,dtype=None,requirements='A')
sn = np.require(sn,dtype=None,requirements='A')
sf = np.require(sf,dtype=None,requirements='A')
S = np.require(S,dtype=None,requirements='A')
MU = np.require(MU,dtype=None,requirements='A')
SIGMA = np.require(SIGMA,dtype=None,requirements='A')
U = np.require(U,dtype=None,requirements='A')
b = np.require(b,dtype=None,requirements='A')
D = np.require(D,dtype=None,requirements='A')
layers =
|
np.require(layers,dtype=None,requirements='A')
|
numpy.require
|
from os.path import abspath, dirname, join, isfile, normpath, relpath
from pandas.testing import assert_frame_equal
from numpy.testing import assert_allclose
from scipy.interpolate import interp1d
import matplotlib.pylab as plt
from datetime import datetime
import mhkit.wave as wave
from io import StringIO
import pandas as pd
import numpy as np
import contextlib
import unittest
import netCDF4
import inspect
import pickle
import json
import sys
import os
import time
from random import seed, randint
testdir = dirname(abspath(__file__))
datadir = normpath(join(testdir,relpath('../../examples/data/wave')))
class TestResourceSpectrum(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
df = self.f[1] - self.f[0]
Trep = 1/df
self.t = np.arange(0, Trep, 0.05)
@classmethod
def tearDownClass(self):
pass
def test_pierson_moskowitz_spectrum(self):
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
Tp0 = wave.resource.peak_period(S).iloc[0,0]
error = np.abs(self.Tp - Tp0)/self.Tp
self.assertLess(error, 0.01)
def test_bretschneider_spectrum(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_surface_elevation_seed(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
eta0 = wave.resource.surface_elevation(S, self.t)
eta1 = wave.resource.surface_elevation(S, self.t, seed=seednum)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phasing(self):
S = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
eta0 = wave.resource.surface_elevation(S, self.t)
sig = inspect.signature(wave.resource.surface_elevation)
seednum = sig.parameters['seed'].default
np.random.seed(seednum)
phases = np.random.rand(len(S)) * 2 * np.pi
eta1 = wave.resource.surface_elevation(S, self.t, phases=phases)
assert_frame_equal(eta0, eta1)
def test_surface_elevation_phases_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
phases_np = np.random.rand(S.shape[0], S.shape[1]) * 2 * np.pi
phases_pd = pd.DataFrame(phases_np, index=S.index, columns=S.columns)
eta_np = wave.resource.surface_elevation(S, self.t, phases=phases_np)
eta_pd = wave.resource.surface_elevation(S, self.t, phases=phases_pd)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_frequency_bins_np_and_pd(self):
S0 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs)
S1 = wave.resource.bretschneider_spectrum(self.f,self.Tp,self.Hs*1.1)
S = pd.concat([S0, S1], axis=1)
eta0 = wave.resource.surface_elevation(S, self.t)
f_bins_np = np.array([np.diff(S.index)[0]]*len(S))
f_bins_pd = pd.DataFrame(f_bins_np, index=S.index, columns=['df'])
eta_np = wave.resource.surface_elevation(S, self.t, frequency_bins=f_bins_np)
eta_pd = wave.resource.surface_elevation(S, self.t, frequency_bins=f_bins_pd)
assert_frame_equal(eta0, eta_np)
assert_frame_equal(eta_np, eta_pd)
def test_surface_elevation_moments(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
eta = wave.resource.surface_elevation(S, self.t)
dt = self.t[1] - self.t[0]
Sn = wave.resource.elevation_spectrum(eta, 1/dt, len(eta.values),
detrend=False, window='boxcar',
noverlap=0)
m0 = wave.resource.frequency_moment(S,0).m0.values[0]
m0n = wave.resource.frequency_moment(Sn,0).m0.values[0]
errorm0 = np.abs((m0 - m0n)/m0)
self.assertLess(errorm0, 0.01)
m1 = wave.resource.frequency_moment(S,1).m1.values[0]
m1n = wave.resource.frequency_moment(Sn,1).m1.values[0]
errorm1 = np.abs((m1 - m1n)/m1)
self.assertLess(errorm1, 0.01)
def test_surface_elevation_rmse(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
eta = wave.resource.surface_elevation(S, self.t)
dt = self.t[1] - self.t[0]
Sn = wave.resource.elevation_spectrum(eta, 1/dt, len(eta),
detrend=False, window='boxcar',
noverlap=0)
fSn = interp1d(Sn.index.values, Sn.values, axis=0)
rmse = (S.values - fSn(S.index.values))**2
rmse_sum = (np.sum(rmse)/len(rmse))**0.5
self.assertLess(rmse_sum, 0.02)
def test_jonswap_spectrum(self):
S = wave.resource.jonswap_spectrum(self.f, self.Tp, self.Hs)
Hm0 = wave.resource.significant_wave_height(S).iloc[0,0]
Tp0 = wave.resource.peak_period(S).iloc[0,0]
errorHm0 = np.abs(self.Tp - Tp0)/self.Tp
errorTp0 = np.abs(self.Hs - Hm0)/self.Hs
self.assertLess(errorHm0, 0.01)
self.assertLess(errorTp0, 0.01)
def test_plot_spectrum(self):
filename = abspath(join(testdir, 'wave_plot_spectrum.png'))
if isfile(filename):
os.remove(filename)
S = wave.resource.pierson_moskowitz_spectrum(self.f,self.Tp)
plt.figure()
wave.graphics.plot_spectrum(S)
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_chakrabarti(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti.png'))
if isfile(filename):
os.remove(filename)
D = 5
H = 10
lambda_w = 200
wave.graphics.plot_chakrabarti(H, lambda_w, D)
plt.savefig(filename)
def test_plot_chakrabarti_np(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti_np.png'))
if isfile(filename):
os.remove(filename)
D = np.linspace(5, 15, 5)
H = 10 * np.ones_like(D)
lambda_w = 200 * np.ones_like(D)
wave.graphics.plot_chakrabarti(H, lambda_w, D)
plt.savefig(filename)
self.assertTrue(isfile(filename))
def test_plot_chakrabarti_pd(self):
filename = abspath(join(testdir, 'wave_plot_chakrabarti_pd.png'))
if isfile(filename):
os.remove(filename)
D = np.linspace(5, 15, 5)
H = 10 * np.ones_like(D)
lambda_w = 200 * np.ones_like(D)
df = pd.DataFrame([H.flatten(),lambda_w.flatten(),D.flatten()],
index=['H','lambda_w','D']).transpose()
wave.graphics.plot_chakrabarti(df.H, df.lambda_w, df.D)
plt.savefig(filename)
self.assertTrue(isfile(filename))
class TestResourceMetrics(unittest.TestCase):
@classmethod
def setUpClass(self):
omega = np.arange(0.1,3.5,0.01)
self.f = omega/(2*np.pi)
self.Hs = 2.5
self.Tp = 8
file_name = join(datadir, 'ValData1.json')
with open(file_name, "r") as read_file:
self.valdata1 = pd.DataFrame(json.load(read_file))
self.valdata2 = {}
file_name = join(datadir, 'ValData2_MC.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['MC'] = data
for i in data.keys():
# Calculate elevation spectra
elevation = pd.DataFrame(data[i]['elevation'])
elevation.index = elevation.index.astype(float)
elevation.sort_index(inplace=True)
sample_rate = data[i]['sample_rate']
NFFT = data[i]['NFFT']
self.valdata2['MC'][i]['S'] = wave.resource.elevation_spectrum(elevation,
sample_rate, NFFT)
file_name = join(datadir, 'ValData2_AH.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['AH'] = data
for i in data.keys():
# Calculate elevation spectra
elevation = pd.DataFrame(data[i]['elevation'])
elevation.index = elevation.index.astype(float)
elevation.sort_index(inplace=True)
sample_rate = data[i]['sample_rate']
NFFT = data[i]['NFFT']
self.valdata2['AH'][i]['S'] = wave.resource.elevation_spectrum(elevation,
sample_rate, NFFT)
file_name = join(datadir, 'ValData2_CDiP.json')
with open(file_name, "r") as read_file:
data = json.load(read_file)
self.valdata2['CDiP'] = data
for i in data.keys():
temp = pd.Series(data[i]['S']).to_frame('S')
temp.index = temp.index.astype(float)
self.valdata2['CDiP'][i]['S'] = temp
@classmethod
def tearDownClass(self):
pass
def test_kfromw(self):
for i in self.valdata1.columns:
f = np.array(self.valdata1[i]['w'])/(2*np.pi)
h = self.valdata1[i]['h']
rho = self.valdata1[i]['rho']
expected = self.valdata1[i]['k']
k = wave.resource.wave_number(f, h, rho)
calculated = k.loc[:,'k'].values
error = ((expected-calculated)**2).sum() # SSE
self.assertLess(error, 1e-6)
def test_kfromw_one_freq(self):
g = 9.81
f = 0.1
h = 1e9
w = np.pi*2*f # deep water dispersion
expected = w**2 / g
calculated = wave.resource.wave_number(f=f, h=h, g=g).values[0][0]
error =
|
np.abs(expected-calculated)
|
numpy.abs
|
import numpy
import unittest
import sys
PyRLA_dir = '../../'
sys.path.append(PyRLA_dir)
from optimization import lsr
def approx_lsr(sketch_size, sketch_type):
w_sketch_vec, obj_val = lsr.sketched_lsr(x_mat, y_vec, sketch_size=sketch_size, sketch_type=sketch_type)
dist = w_sketch_vec - w_opt_vec
dist = numpy.sum(dist ** 2)
return obj_val, dist
class TestLSR(unittest.TestCase):
def test_srft(self):
print('######## SRFT ########')
sketch_size = 3
obj_val1, dist1 = approx_lsr(sketch_size, 'srft')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val1))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist1))
sketch_size = 5
obj_val2, dist2 = approx_lsr(sketch_size, 'srft')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val2))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist2))
sketch_size = 10
obj_val3, dist3 = approx_lsr(sketch_size, 'srft')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val3))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist3))
self.assertTrue(dist3 < dist1)
def test_count(self):
print('######## Count Sketch ########')
sketch_size = 3
obj_val1, dist1 = approx_lsr(sketch_size, 'count')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val1))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist1))
sketch_size = 5
obj_val2, dist2 = approx_lsr(sketch_size, 'count')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val2))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist2))
sketch_size = 10
obj_val3, dist3 = approx_lsr(sketch_size, 'count')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val3))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist3))
self.assertTrue(dist3 < dist1)
def test_leverage(self):
print('######## Leverage Score Sampling ########')
sketch_size = 3
obj_val1, dist1 = approx_lsr(sketch_size, 'leverage')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val1))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist1))
sketch_size = 5
obj_val2, dist2 = approx_lsr(sketch_size, 'leverage')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val2))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist2))
sketch_size = 10
obj_val3, dist3 = approx_lsr(sketch_size, 'leverage')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val3))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist3))
self.assertTrue(dist3 < dist1)
def test_shrink(self):
print('######## Shrinked Leverage Score Sampling ########')
sketch_size = 3
obj_val1, dist1 = approx_lsr(sketch_size, 'shrink')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val1))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist1))
sketch_size = 5
obj_val2, dist2 = approx_lsr(sketch_size, 'shrink')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val2))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist2))
sketch_size = 10
obj_val3, dist3 = approx_lsr(sketch_size, 'shrink')
print('Objective function value (s=' + str(sketch_size) + 'd): ' + str(obj_val3))
print('Distance to optimal (s=' + str(sketch_size) + 'd): ' + str(dist3))
self.assertTrue(dist3 < dist1)
if __name__ == '__main__':
rawdata_mat = numpy.load(PyRLA_dir + 'data/YearPredictionMSD.npy', mmap_mode='r')
rawdata_mat = rawdata_mat[0:50000, :]
x_mat = rawdata_mat[:, 1:]
n_int, d_int = x_mat.shape
y_vec = rawdata_mat[:, 0].reshape((n_int, 1))
w_opt_vec = numpy.dot(
|
numpy.linalg.pinv(x_mat)
|
numpy.linalg.pinv
|
"""
The windfarm manager contains everything required to set up a
windfarm.
"""
import __main__
import os
### Get the name of program importing this package ###
if hasattr(__main__,"__file__"):
main_file = os.path.basename(__main__.__file__)
else:
main_file = "ipython"
### This checks if we are just doing documentation ###
if not main_file in ["sphinx-build", "__main__.py"]:
from dolfin import *
import numpy as np
from sys import platform
import math
import time
import shutil, copy
from scipy.special import gamma
import scipy.interpolate as interp
### Import the cumulative parameters ###
from windse import windse_parameters, BaseHeight, CalculateDiskTurbineForces, UpdateActuatorLineForce, RadialChordForce
### Check if we need dolfin_adjoint ###
if windse_parameters.dolfin_adjoint:
from dolfin_adjoint import *
### This import improves the plotter functionality on Mac ###
if platform == 'darwin':
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
class GenericWindFarm(object):
"""
A GenericProblem contains on the basic functions required by all problem objects.
Args:
dom (:meth:`windse.DomainManager.GenericDomain`): a windse domain object.
"""
def __init__(self, dom):
### save a reference of option and create local version specifically of domain options ###
self.params = windse_parameters
self.dom = dom
self.rd_first_save = True
self.fprint = self.params.fprint
self.tag_output = self.params.tag_output
self.debug_mode = self.params.debug_mode
### Init empty design variables ###
self.cl = None; self.mcl = None
self.cd = None; self.mcd = None
self.chord = None; self.mchord = None
self.x = None; self.mx = None
self.y = None; self.my = None
self.yaw = None; self.myaw = None
self.axial = None; self.maxial = None
### Init empty design variables ###
self.cl = None; self.mcl = None
self.cd = None; self.mcd = None
self.chord = None; self.mchord = None
self.x = None; self.mx = None
self.y = None; self.my = None
self.yaw = None; self.myaw = None
self.axial = None; self.maxial = None
### Update attributes based on params file ###
for key, value in self.params["wind_farm"].items():
if isinstance(value,list):
setattr(self,key,np.array(value))
else:
setattr(self,key,value)
### Check if we need a
self.extra_kwarg = {}
if self.params.dolfin_adjoint:
self.extra_kwarg["annotate"] = False
self.optimizing = False
if self.params.performing_opt_calc:
self.layout_bounds = self.params["optimization"]["layout_bounds"]
self.control_types = self.params["optimization"]["control_types"]
self.optimizing = True
def DebugOutput(self):
if self.debug_mode:
self.tag_output("min_x", np.min(self.x))
self.tag_output("max_x", np.max(self.x))
self.tag_output("avg_x", np.mean(self.x))
self.tag_output("min_y", np.min(self.y))
self.tag_output("max_y", np.max(self.y))
self.tag_output("avg_y", np.mean(self.y))
self.tag_output("min_z", np.min(self.z))
self.tag_output("max_z", np.max(self.z))
self.tag_output("avg_z", np.mean(self.z))
self.tag_output("min_yaw", np.min(self.yaw))
self.tag_output("max_yaw", np.max(self.yaw))
self.tag_output("avg_yaw", np.mean(self.yaw))
# x, y, z, yaw, chord,
def PlotFarm(self,show=False,filename="wind_farm",power=None):
"""
This function plots the locations of each wind turbine and
saves the output to output/.../plots/
:Keyword Arguments:
* **show** (*bool*): Default: True, Set False to suppress output but still save.
"""
if self.numturbs == 0:
return
### Create the path names ###
folder_string = self.params.folder+"/plots/"
file_string = self.params.folder+"/plots/"+filename+".pdf"
### Check if folder exists ###
if not os.path.exists(folder_string) and self.params.rank == 0: os.makedirs(folder_string)
### Create a list that outlines the extent of the farm ###
if self.optimizing and "layout" in self.control_types and self.layout_bounds != "wind_farm":
ex_list_x = [self.layout_bounds[0][0],self.layout_bounds[0][1],self.layout_bounds[0][1],self.layout_bounds[0][0],self.layout_bounds[0][0]]
ex_list_y = [self.layout_bounds[1][0],self.layout_bounds[1][0],self.layout_bounds[1][1],self.layout_bounds[1][1],self.layout_bounds[1][0]]
else:
ex_list_x = [self.ex_x[0],self.ex_x[1],self.ex_x[1],self.ex_x[0],self.ex_x[0]]
ex_list_y = [self.ex_y[0],self.ex_y[0],self.ex_y[1],self.ex_y[1],self.ex_y[0]]
### Generate and Save Plot ###
fig, ax = plt.subplots()
if hasattr(self.dom,"boundary_line"):
ax.plot(*self.dom.boundary_line/self.dom.xscale,c="k")
ax.plot(np.array(ex_list_x)/self.dom.xscale, np.array(ex_list_y)/self.dom.xscale,c="r")
### Plot Blades
for i in range(self.numturbs):
blade_n = [np.cos(self.yaw[i]),np.sin(self.yaw[i])]
rr = self.RD[i]/2.0
blade_x = np.array([self.x[i]+rr*blade_n[1],self.x[i]-rr*blade_n[1]])/self.dom.xscale
blade_y = np.array([self.y[i]-rr*blade_n[0],self.y[i]+rr*blade_n[0]])/self.dom.xscale
ax.plot(blade_x,blade_y,c='k',linewidth=2,zorder=1)
### Choose coloring for the turbines ###
if isinstance(power,(list,np.ndarray)):
coloring = power
else:
coloring = np.array(self.z)/self.dom.xscale
### Plot Hub Locations
p=ax.scatter(self.x/self.dom.xscale,self.y/self.dom.xscale,c=coloring,cmap="coolwarm",edgecolors=(0, 0, 0, 1),s=20,zorder=2)
# p=plt.scatter(self.x,self.y,c="k",s=70)
plt.xlim(self.dom.x_range[0]/self.dom.xscale,self.dom.x_range[1]/self.dom.xscale)
plt.ylim(self.dom.y_range[0]/self.dom.xscale,self.dom.y_range[1]/self.dom.xscale)
clb = plt.colorbar(p)
clb.ax.set_ylabel('Hub Elevation')
### Annotate ###
for i in range(self.numturbs):
ax.annotate(i, (self.x[i]/self.dom.xscale,self.y[i]/self.dom.xscale),(5,0),textcoords='offset pixels')
if power is None:
plt.title("Location of the Turbines")
elif isinstance(power,(list,np.ndarray)):
plt.title("Objective Value: {: 5.6f}".format(sum(power)))
else:
plt.title("Objective Value: {: 5.6f}".format(power))
plt.savefig(file_string, transparent=True)
if show:
plt.show()
plt.close()
def PlotChord(self,show=False,filename="chord_profiles",power=None, bounds=None):
### Create the path names ###
folder_string = self.params.folder+"/plots/"
file_string = self.params.folder+"/plots/"+filename+".pdf"
### Check if folder exists ###
if not os.path.exists(folder_string) and self.params.rank == 0: os.makedirs(folder_string)
### Calculate x values ###
x = np.linspace(0,1,self.blade_segments)
### Plot Chords ###
plt.figure()
plt.plot(x,self.baseline_chord,label="baseline",c="k")
if bounds is None:
lower=[]
upper=[]
c_avg = 0
for k, seg_chord in enumerate(self.baseline_chord):
modifier = 2.0
max_chord = self.max_chord
lower.append(seg_chord/modifier)
upper.append(max(min(seg_chord*modifier,max_chord),c_avg))
c_avg = (c_avg*k+seg_chord)/(k+1)
plt.plot(x,lower,"--r",label="Optimization Boundaries")
plt.plot(x,upper,"--r")
else:
plt.plot(x,bounds[0][-self.blade_segments:],"--r",label="Optimization Boundaries")
plt.plot(x,bounds[1][-self.blade_segments:],"--r")
for i in range(self.numturbs):
y = np.array(self.chord[i],dtype=float)
plt.plot(x,y,'.-',label=i)
plt.xlim(0,1)
if power is None:
plt.title("Chord along blade span")
elif isinstance(power,(list,np.ndarray)):
plt.title("Objective Value: {: 5.6f}".format(sum(power)))
else:
plt.title("Objective Value: {: 5.6f}".format(power))
plt.xlabel("Blade Span")
plt.ylabel("Chord")
plt.legend()
plt.savefig(file_string, transparent=True)
if show:
plt.show()
plt.close()
def SaveWindFarm(self,val=None,filename="wind_farm"):
### Create the path names ###
folder_string = self.params.folder+"/data/"
if val is not None:
file_string = self.params.folder+"/data/"+filename+"_"+repr(val)+".txt"
else:
file_string = self.params.folder+"/data/"+filename+".txt"
### Check if folder exists ###
if not os.path.exists(folder_string) and self.params.rank == 0: os.makedirs(folder_string)
### Define the header string ###
head_str="# x y HH Yaw Diameter Thickness Axial_Induction"
### Save text file ###
Sx = self.dom.xscale
output =
|
np.array([self.x/Sx, self.y/Sx, self.HH/Sx, self.yaw, self.RD/Sx, self.thickness/Sx, self.axial])
|
numpy.array
|
""" Routines for building qutrit gates and models """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from scipy import linalg as _linalg
from .. import objects as _objs
from ..tools import unitary_to_process_mx, change_basis, Basis
#Define 2 qubit to symmetric (+) antisymmetric space transformation A:
A = _np.matrix([[1, 0, 0, 0],
# [0,0,0,1],
[0, 1. / _np.sqrt(2), 1. / _np.sqrt(2), 0],
[0, 1. / _np.sqrt(2), -1. /
|
_np.sqrt(2)
|
numpy.sqrt
|
from __future__ import print_function
import os
import sys
import torch
import torch.utils.data
import pickle, gzip
import numpy as np
import scipy.io as sio
import cv2
import json
from PIL import Image, ImageDraw
from collections import defaultdict, Sequence
from pycocotools.cocoeval import COCOeval
from upsnet.config.config import config
from upsnet.dataset.json_dataset import JsonDataset, extend_with_flipped_entries, filter_for_training, add_bbox_regression_targets
from upsnet.dataset.base_dataset import BaseDataset
from upsnet.rpn.assign_anchor import add_rpn_blobs
from upsnet.bbox.sample_rois import sample_rois
from lib.utils.logging import logger
import pycocotools.mask as mask_util
class Cityscapes_CVRN(BaseDataset):
def __init__(self, image_sets, flip=False, proposal_files=None, phase='train', result_path=''):
super(Cityscapes_CVRN, self).__init__()
self.image_dirs = {
'train': os.path.join(config.dataset.dataset_path, 'images'),
'val': os.path.join(config.dataset.dataset_path, 'images'),
'test': os.path.join(config.dataset.dataset_path, 'images'),
'train_extra': os.path.join(config.dataset.dataset_path, 'images'),
'debug': os.path.join(config.dataset.dataset_path, 'images'),
}
self.anno_files = {
'train': 'instancesonly_gtFine_train.json',
'val': 'instancesonly_gtFine_val.json',
'test': 'image_info_test.json',
'train_extra': 'instancesonly_gtCoarse_train_extra.json',
'debug': 'instancesonly_gtFine_debug.json',
}
self.panoptic_json_file = os.path.join(config.dataset.dataset_path, 'annotations_7cls', 'cityscapes_fine_val.json')
self.panoptic_gt_folder = 'data/cityscapes/panoptic'
self.flip = flip
self.result_path = result_path
self.num_classes = 9
self.phase = phase
self.image_sets = image_sets
if image_sets[0] == 'demoVideo':
assert len(image_sets) == 1
assert phase == 'test'
im_path = [_.strip() for _ in open('data/cityscapes/split/demoVideo_img.txt', 'r').readlines()]
self.roidb = [{'image': _, 'flipped': False} for _ in im_path]
return
if proposal_files is None:
proposal_files = [None] * len(image_sets)
if phase == 'train' and len(image_sets) > 1:
# combine multiple datasets
roidbs = []
for image_set, proposal_file in zip(image_sets, proposal_files):
dataset = JsonDataset('cityscapes_' + image_set,
image_dir=self.image_dirs[image_set],
anno_file=os.path.join(config.dataset.dataset_path, 'annotations_7cls', self.anno_files[image_set]))
roidb = dataset.get_roidb(gt=True, proposal_file=proposal_file, crowd_filter_thresh=config.train.crowd_filter_thresh)
if flip:
if logger:
logger.info('Appending horizontally-flipped training examples...')
extend_with_flipped_entries(roidb, dataset)
roidbs.append(roidb)
roidb = roidbs[0]
for r in roidbs[1:]:
roidb.extend(r)
roidb = filter_for_training(roidb)
add_bbox_regression_targets(roidb)
else:
assert len(image_sets) == 1
self.dataset = JsonDataset('cityscapes_' + image_sets[0],
image_dir=self.image_dirs[image_sets[0]],
anno_file=os.path.join(config.dataset.dataset_path, 'annotations_7cls',
self.anno_files[image_sets[0]]))
roidb = self.dataset.get_roidb(gt=True, proposal_file=proposal_files[0],
crowd_filter_thresh=config.train.crowd_filter_thresh if phase != 'test' else 0)
if flip:
if logger:
logger.info('Appending horizontally-flipped training examples...')
extend_with_flipped_entries(roidb, self.dataset)
if phase != 'test':
roidb = filter_for_training(roidb)
add_bbox_regression_targets(roidb)
else:
if logger:
logger.info('Filtered roidb entries: {}'.format(len(roidb)))
else:
print('Filtered roidb entries: {}'.format(len(roidb)))
self.roidb = roidb
def __getitem__(self, index):
blob = defaultdict(list)
im_blob, im_scales = self.get_image_blob([self.roidb[index]])
if config.network.has_rpn:
if self.phase != 'test':
add_rpn_blobs(blob, im_scales, [self.roidb[index]])
data = {'data': im_blob,
'im_info': blob['im_info']}
label = {'roidb': blob['roidb'][0]}
for stride in config.network.rpn_feat_stride:
label.update({
'rpn_labels_fpn{}'.format(stride): blob['rpn_labels_int32_wide_fpn{}'.format(stride)].astype(
np.int64),
'rpn_bbox_targets_fpn{}'.format(stride): blob['rpn_bbox_targets_wide_fpn{}'.format(stride)],
'rpn_bbox_inside_weights_fpn{}'.format(stride): blob[
'rpn_bbox_inside_weights_wide_fpn{}'.format(stride)],
'rpn_bbox_outside_weights_fpn{}'.format(stride): blob[
'rpn_bbox_outside_weights_wide_fpn{}'.format(stride)]
})
else:
data = {'data': im_blob,
'im_info': np.array([[im_blob.shape[-2],
im_blob.shape[-1],
im_scales[0]]], np.float32),
}
# label = {'roidb': self.roidb[index]}
label = None
else:
if self.phase != 'test':
data = {'data': im_blob,
'im_info': blob['im_info']}
label = {'roidb': self.roidb[index]}
else:
data = {'data': im_blob,
'rois': np.hstack(
(np.zeros((self.roidb[index]['boxes'].shape[0], 1)), self.roidb[index]['boxes'])).astype(
np.float32),
'im_info': np.array([[im_blob.shape[-2],
im_blob.shape[-1],
im_scales[0]]], np.float32),
'id': self.roidb[index]['id']}
label = None
if config.network.has_fcn_head:
if self.phase != 'test':
seg_gt = np.array(Image.open(self.roidb[index]['image'].replace('images', 'labels_16cls').replace('leftImg8bit.png', 'gtFine_labelTrainIds.png')))
if self.roidb[index]['flipped']:
seg_gt = np.fliplr(seg_gt)
seg_gt = cv2.resize(seg_gt, None, None, fx=im_scales[0], fy=im_scales[0], interpolation=cv2.INTER_NEAREST)
label.update({'seg_gt': seg_gt})
label.update({'gt_classes': label['roidb']['gt_classes']})
label.update({'mask_gt': np.zeros((len(label['gt_classes']), im_blob.shape[-2], im_blob.shape[-1]))})
for i in range(len(label['gt_classes'])):
img = Image.new('L', (int(im_blob.shape[-1] / im_scales[0]), int(im_blob.shape[-2] / im_scales[0])), 0)
for j in range(len(label['roidb']['segms'][i])):
ImageDraw.Draw(img).polygon(tuple(label['roidb']['segms'][i][j]), outline=1, fill=1)
label['mask_gt'][i] = cv2.resize(np.array(img), None, None, fx=im_scales[0], fy=im_scales[0], interpolation=cv2.INTER_NEAREST)
if config.train.fcn_with_roi_loss:
gt_boxes = label['roidb']['boxes'][np.where(label['roidb']['gt_classes'] > 0)[0]]
gt_boxes = np.around(gt_boxes * im_scales[0]).astype(np.int32)
label.update({'seg_roi_gt': np.zeros((len(gt_boxes), config.network.mask_size, config.network.mask_size), dtype=np.int64)})
for i in range(len(gt_boxes)):
if gt_boxes[i][3] == gt_boxes[i][1]:
gt_boxes[i][3] += 1
if gt_boxes[i][2] == gt_boxes[i][0]:
gt_boxes[i][2] += 1
label['seg_roi_gt'][i] = cv2.resize(seg_gt[gt_boxes[i][1]:gt_boxes[i][3], gt_boxes[i][0]:gt_boxes[i][2]], (config.network.mask_size, config.network.mask_size), interpolation=cv2.INTER_NEAREST)
else:
pass
return data, label, index
# return data.copy(), label, index
def get_image_blob(self, roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
if self.phase == 'train':
scale_inds = np.random.randint(
0, high=len(config.train.scales), size=num_images
)
else:
scale_inds = np.random.randint(
0, high=len([config.test.scales[0]]), size=num_images
)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
if self.phase == 'train':
target_size = config.train.scales[scale_inds[i]]
im, im_scale = self.prep_im_for_blob(
im, config.network.pixel_means, [target_size], config.train.max_size
)
else:
target_size = config.test.scales[0]
im, im_scale = self.prep_im_for_blob(
im, config.network.pixel_means, [target_size], config.test.max_size
)
im_scales.append(im_scale[0])
processed_ims.append(im[0].transpose(2, 0, 1))
# Create a blob to hold the input images
assert len(processed_ims) == 1
blob = processed_ims[0]
return blob, im_scales
def vis_all_mask(self, all_boxes, all_masks, save_path=None):
"""
visualize all detections in one image
:param im_array: [b=1 c h w] in rgb
:param detections: [ numpy.ndarray([[x1 y1 x2 y2 score]]) for j in classes ]
:param class_names: list of names in imdb
:param scale: visualize the scaled image
:return:
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import random
import cv2
palette = {
'person': (220, 20, 60),
'rider': (255, 0, 0),
'car': (0, 0, 142),
'bus': (0, 60, 100),
'motorcycle': (0, 0, 230),
'bicycle': (119, 11, 32),
#
'road': (128, 64, 128),
'sidewalk': (244, 35, 232),
'building': (70, 70, 70),
'wall': (102, 102, 156),
'fence': (190, 153, 153),
'pole': (153, 153, 153),
'sky': (70, 130, 180),
'traffic light': (250, 170, 30),
'traffic sign': (220, 220, 0),
'vegetation': (107, 142, 35)
}
name2id = {
'road': 0,
'sidewalk': 1,
'building': 2,
'wall': 3,
'fence': 4,
'pole': 5,
'traffic light': 6,
'traffic sign': 7,
'vegetation': 8,
'terrain': 9
}
self.classes = [
'__background__',
'person',
'rider',
'car',
'bus',
'motorcycle',
'bicycle',
]
if save_path is not None:
os.makedirs(save_path, exist_ok=True)
for i in range(len(self.roidb)):
im = np.array(Image.open(self.roidb[i]['image']))
fig = plt.figure(frameon=False)
fig.set_size_inches(im.shape[1] / 200, im.shape[0] / 200)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.axis('off')
fig.add_axes(ax)
ax.imshow(im)
for j, name in enumerate(self.classes):
if name == '__background__':
continue
boxes = all_boxes[j][i]
segms = all_masks[j][i]
if segms == []:
continue
masks = mask_util.decode(segms)
for k in range(boxes.shape[0]):
score = boxes[k, -1]
mask = masks[:, :, k]
if score < 0.5:
continue
bbox = boxes[k, :]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]), bbox[2] - bbox[0], bbox[3] - bbox[1],
fill=False, edgecolor='g', linewidth=2, alpha=0.5)
)
# ax.text(bbox[0], bbox[1] - 2, name + '{:0.2f}'.format(score).lstrip('0'), fontsize=6, family='serif',
# bbox=dict(facecolor='g', alpha=0.4, pad=0, edgecolor='none'), color='white')
# contour, hier = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
_, contour, hier = cv2.findContours(mask.copy(), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
color = (palette[name][0] / 255, palette[name][1] / 255, palette[name][2] / 255)
for c in contour:
ax.add_patch(
Polygon(
c.reshape((-1, 2)),
fill=True, facecolor=color, edgecolor='w', linewidth=1, alpha=0.5
)
)
if save_path is None:
plt.show()
else:
fig.savefig(os.path.join(save_path, '{}.png'.format(self.roidb[i]['image'].split('/')[-1][:-16])), dpi=100)
plt.close('all')
def evaluate_masks(
self,
all_boxes,
all_segms,
output_dir,
):
res_file = os.path.join(
output_dir, 'segmentations_' + self.dataset.name + '_results.json'
)
results = []
for cls_ind, cls in enumerate(self.dataset.classes):
if cls == '__background__':
continue
if cls_ind >= len(all_boxes):
break
cat_id = self.dataset.category_to_id_map[cls]
results.extend(self.segms_results_one_category(all_boxes[cls_ind], all_segms[cls_ind], cat_id))
if logger:
logger.info(
'Writing segmentation results json to: {}'.format(
os.path.abspath(res_file)))
with open(res_file, 'w') as fid:
json.dump(results, fid)
coco_dt = self.dataset.COCO.loadRes(str(res_file))
coco_eval = COCOeval(self.dataset.COCO, coco_dt, 'segm')
coco_eval.evaluate()
coco_eval.accumulate()
# self.log_detection_eval_metrics(coco_eval, os.path.join(output_dir, 'instance_segmentation_results.txt'))
mAP, AP_all = self.log_detection_eval_metrics_ap50(coco_eval)
return mAP, AP_all
def get_pallete(self):
pallete_raw = np.zeros((256, 3)).astype('uint8')
pallete =
|
np.zeros((256, 3))
|
numpy.zeros
|
#!/usr/bin/env python
# Spacenet challenge
# Creates and trains CNN to recgonize buildings
# vklimkov Dec 2016
import argparse
import os
import sys
import numpy as np
import glob
from random import shuffle
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers import TimeDistributed
from keras.layers import BatchNormalization
from keras.layers import Activation
from keras.layers import Dropout
from keras.layers import Flatten
from keras.constraints import maxnorm
from keras.optimizers import adam
from keras.layers.convolutional import Convolution2D
from keras.layers.convolutional import Deconvolution2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.convolutional import ZeroPadding2D
from keras.utils import np_utils
from keras import backend as K
# fix random seed for reproducibility
seed = 42
np.random.seed(seed)
DIM = 128
CHANNELS = 4
CLASSES = 2
BATCHSIZE = 16
# this was calculated from the assumtion:
# 776kb per training sample * 3 augmentation moves = 2.273 mB per sample
# 12 gB RAM on GPU. (200)
DATAGEN_PERSTEP = BATCHSIZE * 80
EPOCHS = 100
SAVE_MODEL_EVERY_NTH_EPOCH = 10
# just vertical and horizontal flip
AUGMENTATION = 3
LEARNING_RATE = 0.0001
class DataGenerator:
def __init__(self, indir, targetdir, perstep=DATAGEN_PERSTEP,
minibatch=BATCHSIZE):
self.train_idx = 0
self.perstep = perstep
self.minibatch = minibatch
print('training step %d' % self.perstep)
self.indir = indir
self.targetdir = targetdir
# some sanity checks
innames = glob.glob('%s/*.npy' % indir)
# dont forget to remove "in_" from names
innames = [os.path.splitext(os.path.basename(x))[0][3:]
for x in innames]
targetnames = glob.glob('%s/*.npy' % targetdir)
# dont forget to remove "target_" from names
targetnames = [os.path.splitext(os.path.basename(x))[0][7:]
for x in targetnames]
if len(innames) != len(targetnames):
raise RuntimeError('different amount of in and target files')
targetnames = set(targetnames)
for i in innames:
if i not in targetnames:
raise RuntimeError('%s is missing from target directory' % i)
shuffle(innames)
self.train_names = innames
print('total num of training imgs %d' % len(self.train_names))
def flow_training(self):
train_to = min(len(self.train_names), self.train_idx + self.perstep)
samples = (train_to - self.train_idx) * AUGMENTATION
x_train = np.zeros((samples, DIM, DIM, CHANNELS))
y_train = np.zeros((samples, DIM * DIM, CLASSES))
sample_idx = 0
for i in range(self.train_idx, train_to):
name = self.train_names[i]
x = np.load(os.path.join(self.indir, 'in_%s.npy' % name))
y = np.load(os.path.join(self.targetdir, 'target_%s.npy' % name))
x_train[sample_idx, :, :, :] = x
y_train[sample_idx, :, :] = y
sample_idx += 1
# augmentation
yy = np.reshape(y, (DIM, DIM, CLASSES))
# left-to-right mirror
x_aug = np.fliplr(x)
y_aug = np.fliplr(yy)
y_aug = np.reshape(y_aug, (DIM * DIM, CLASSES))
x_train[sample_idx, :, :, :] = x_aug
y_train[sample_idx, :, :] = y_aug
sample_idx += 1
# up to down mirror
x_aug = np.flipud(x)
y_aug = np.flipud(yy)
y_aug =
|
np.reshape(y_aug, (DIM * DIM, CLASSES))
|
numpy.reshape
|
import pathlib
import numpy as np
import PIL.Image
import torch
import torch.utils.data
import torch.nn.functional
import random
import utils_dset
########################################################################################################################
def read_file_lines(filename):
"""
Reads a text file, skips comments, and lines.
:param filename:
:return:
"""
with open(filename) as f:
lines = [l.replace('\n', '') for l in f if (len(l) > 0 and l[0] != '#')]
return lines
########################################################################################################################
def parse_camera_lines(lines):
"""
Parse metadata: youtube URL + cam intrinsics + extrinsics
:param lines:
:return:
"""
# The first line contains the YouTube video URL.
# Format of each subsequent line: timestamp fx fy px py k1 k2 row0 row1 row2
# Column number: 0 1 2 3 4 5 6 7-10 11-14 15-18
youtube_url = lines[0]
# record_defaults = ([['']] + [[0.0]] * 18)
data = [[float(n) if idx > 0 else int(n) for idx, n in enumerate(l.split(' '))] for l in
lines[1:]] # simple parse csv by splitting by space
# We don't accept non-zero k1 and k2.
assert (0 == len(list([x for x in data if x[5] != 0.0 or x[6] != 0.0])))
timestamps = [l[0] for l in data]
intrinsics = [l[1:5] for l in data] # tf.stack(data[1:5], axis=1)
poses = [[l[7:11], l[11:15], l[15:19], [0., 0., 0., 1.]] for l in
data] # utils.build_matrix([data[7:11], data[11:15], data[15:19]])
# In camera files, the video id is the last part of the YouTube URL, it comes
# after the =.
youtubeIDOffset = youtube_url.find("/watch?v=") + len('/watch?v=')
youtube_id = youtube_url[youtubeIDOffset:]
return {
'youtube_id': youtube_id,
'timestamps': timestamps,
'intrinsics': intrinsics,
'poses': poses, # poses is world to camera (c_f_w o w_t_c)
}
########################################################################################################################
class DsetRealEstate1(torch.utils.data.Dataset):
"""
Real estate dataset
"""
def __init__(self, dataset_path, is_valid=False, min_dist=200e3, max_dist=1500e3,
im_w=200, im_h=200, num_planes=10, num_views=3, max_w=600, no_crop = False):
print(f'DsetRealEstate: dataset_path={dataset_path}, is_valid={is_valid}')
self.is_valid = is_valid
self.dataset_path = pathlib.Path(dataset_path)
self.min_dist = min_dist
self.max_dist = max_dist
self.im_w = im_w
self.im_h = im_h
self.max_w = max_w
self.max_h = None
self.num_planes = num_planes
self.num_views = num_views
self.no_crop = no_crop
metadata_path = self.dataset_path / 'RealEstate10K' / ('test' if is_valid else 'train')
scenes = []
for p in metadata_path.iterdir():
lines = read_file_lines(p)
scene = parse_camera_lines(lines)
scenes.append(scene)
self.scenes = [scene for scene in scenes if self.get_scene_idx(scene)]
def __len__(self):
return len(self.scenes)
def get_scene_idx(self, scene):
tss = scene['timestamps']
n_ts = len(tss)
if n_ts < self.num_views + 1:
return None
img_range = list(range(n_ts))
ref_random_range = np.array(img_range)
|
np.random.shuffle(ref_random_range)
|
numpy.random.shuffle
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides tools for simulating rank deficient operators."""
import itertools
import numpy
from openfermion.config import EQ_TOLERANCE
import openfermion.utils as utils
def get_chemist_two_body_coefficients(two_body_coefficients, spin_basis=True):
r"""Convert two-body operator coefficients to low rank tensor.
The input is a two-body fermionic Hamiltonian expressed as
$\sum_{pqrs} h_{pqrs} a^\dagger_p a^\dagger_q a_r a_s$
We will convert this to the chemistry convention expressing it as
$\sum_{pqrs} g_{pqrs} a^\dagger_p a_q a^\dagger_r a_s$
but without the spin degree of freedom.
In the process of performing this conversion, constants and one-body
terms come out, which will be returned as well.
Args:
two_body_coefficients (ndarray): an N x N x N x N
numpy array giving the $h_{pqrs}$ tensor.
spin_basis (bool): True if the two-body terms are passed in spin
orbital basis. False if already in spatial orbital basis.
Returns:
one_body_correction (ndarray): an N x N array of floats giving
coefficients of the $a^\dagger_p a_q$ terms that come out.
chemist_two_body_coefficients (ndarray): an N x N x N x N numpy array
giving the $g_{pqrs}$ tensor in chemist notation.
Raises:
TypeError: Input must be two-body number conserving
FermionOperator or InteractionOperator.
"""
# Initialize.
n_orbitals = two_body_coefficients.shape[0]
chemist_two_body_coefficients = numpy.transpose(two_body_coefficients,
[0, 3, 1, 2])
# If the specification was in spin-orbitals, chop down to spatial orbitals
# assuming a spin-symmetric interaction.
if spin_basis:
n_orbitals = n_orbitals // 2
alpha_indices = list(range(0, n_orbitals * 2, 2))
beta_indices = list(range(1, n_orbitals * 2, 2))
chemist_two_body_coefficients = chemist_two_body_coefficients[numpy.ix_(
alpha_indices, alpha_indices, beta_indices, beta_indices)]
# Determine a one body correction in the spin basis from spatial basis.
one_body_correction = numpy.zeros((2 * n_orbitals, 2 * n_orbitals), complex)
for p, q, r, s in itertools.product(range(n_orbitals), repeat=4):
for sigma, tau in itertools.product(range(2), repeat=2):
if (q == r) and (sigma == tau):
one_body_correction[2 * p + sigma, 2 * s + tau] -= (
chemist_two_body_coefficients[p, q, r, s])
# Return.
return one_body_correction, chemist_two_body_coefficients
def low_rank_two_body_decomposition(two_body_coefficients,
truncation_threshold=1e-8,
final_rank=None,
spin_basis=True):
r"""Convert two-body operator into sum of squared one-body operators.
As in arXiv:1808.02625, this function decomposes
$\sum_{pqrs} h_{pqrs} a^\dagger_p a^\dagger_q a_r a_s$ as
$\sum_{l} \lambda_l (\sum_{pq} g_{lpq} a^\dagger_p a_q)^2$
l is truncated to take max value L so that
$\sum_{l=0}^{L-1} (\sum_{pq} |g_{lpq}|)^2 |\lambda_l| < x$
Args:
two_body_coefficients (ndarray): an N x N x N x N
numpy array giving the $h_{pqrs}$ tensor.
This tensor must be 8-fold symmetric (real integrals).
truncation_threshold (optional Float): the value of x, above.
final_rank (optional int): if provided, this specifies the value of
L at which to truncate. This overrides truncation_threshold.
spin_basis (bool): True if the two-body terms are passed in spin
orbital basis. False if already in spatial orbital basis.
Returns:
eigenvalues (ndarray of floats): length L array
giving the $\lambda_l$.
one_body_squares (ndarray of floats): L x N x N array of floats
corresponding to the value of $g_{pql}$.
one_body_correction (ndarray): One-body correction terms that result
from reordering to chemist ordering, in spin-orbital basis.
truncation_value (float): after truncation, this is the value
$\sum_{l=0}^{L-1} (\sum_{pq} |g_{lpq}|)^2 |\lambda_l| < x$
Raises:
TypeError: Invalid two-body coefficient tensor specification.
"""
# Initialize N^2 by N^2 interaction array.
one_body_correction, chemist_two_body_coefficients = (
get_chemist_two_body_coefficients(two_body_coefficients, spin_basis))
n_orbitals = chemist_two_body_coefficients.shape[0]
full_rank = n_orbitals**2
interaction_array = numpy.reshape(chemist_two_body_coefficients,
(full_rank, full_rank))
# Make sure interaction array is symmetric and real.
asymmetry = numpy.sum(
numpy.absolute(interaction_array - interaction_array.transpose()))
imaginary_norm = numpy.sum(numpy.absolute(interaction_array.imag))
if asymmetry > EQ_TOLERANCE or imaginary_norm > EQ_TOLERANCE:
raise TypeError('Invalid two-body coefficient tensor specification.')
# Decompose with exact diagonalization.
eigenvalues, eigenvectors = numpy.linalg.eigh(interaction_array)
# Get one-body squares and compute weights.
term_weights = numpy.zeros(full_rank)
one_body_squares = numpy.zeros((full_rank, 2 * n_orbitals, 2 * n_orbitals),
complex)
# Reshape and add spin back in.
for l in range(full_rank):
one_body_squares[l] = numpy.kron(
numpy.reshape(eigenvectors[:, l], (n_orbitals, n_orbitals)),
numpy.eye(2))
term_weights[l] = abs(eigenvalues[l]) * numpy.sum(
numpy.absolute(one_body_squares[l]))**2
# Sort by weight.
indices = numpy.argsort(term_weights)[::-1]
eigenvalues = eigenvalues[indices]
term_weights = term_weights[indices]
one_body_squares = one_body_squares[indices]
# Determine upper-bound on truncation errors that would occur.
cumulative_error_sum =
|
numpy.cumsum(term_weights)
|
numpy.cumsum
|
# -*- coding:Utf-8 -*-
"""
This module handles CORMORAN measurement data
CorSer Class
============
.. autoclass:: CorSer
:members:
Notes
-----
Useful members
distdf : distance between radio nodes (122 columns)
devdf : device data frame
"""
#import mayavi.mlab as mlabc
import os
import pdb
import sys
import pandas as pd
import numpy as np
import numpy.ma as ma
import scipy.io as io
from pylayers.util.project import *
from pylayers.util.pyutil import *
from pylayers.mobility.ban.body import *
from pylayers.gis.layout import *
import pylayers.antprop.antenna as antenna
from matplotlib.widgets import Slider, CheckButtons, Button, Cursor
from pylayers.signal.DF import *
# from moviepy.editor import *
from skimage import img_as_ubyte
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pickle
try:
from tvtk.api import tvtk
from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
#Those lines handle incompatibility between mayavi and VTK
#and redirect noisy warning message into a log file
# import vtk
# output=vtk.vtkFileOutputWindow()
# output.SetFileName("mayaviwarninglog.tmp")
# vtk.vtkOutputWindow().SetInstance(output)
def cor_log(short=True):
""" display cormoran measurement campaign logfile
Parameters
----------
short : boolean
enable short version
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> cor_log(short=True)
"""
filelog = os.path.join(os.environ['CORMORAN'],'RAW','Doc','MeasurementLog.csv')
log = pd.read_csv(filelog)
if short :
log['day'] = [x.split('/')[0] for x in log['Date'].values]
log['serie']=log['Meas Serie']
return log[['serie','day','Subject','techno','Short Notes']]
else:
return log
def time2npa(lt):
""" convert pd.datetime.time to numpy array
Parameters
----------
lt : pd.datetime.time
Returns
-------
ta : numpy array
time in seconds
"""
ta = (lt.microsecond*1e-6+
lt.second+
lt.minute*60+
lt.hour*3600)
return(ta)
class CorSer(PyLayers):
""" Handle CORMORAN measurement data
Hikob data handling from CORMORAN measurement campaign
11/06/2014
single subject (Bernard and Nicolas)
12/06/2014
several subject (Jihad, Eric , Nicolas)
"""
def __init__(self,serie=6,day=11,source='CITI',layout=False):
"""
Parameters
----------
serie : int
day : int
source : string
Notes
-----
The environment variable CORMORAN is indicating the location of data directory
"""
assert (day in [11,12]),"wrong day"
try:
self.rootdir = os.environ['CORMORAN']
except:
raise NameError('Please add a CORMORAN environement variable \
pointing to the data')
# infos
self.serie = serie
self.day = day
self.loadlog()
if day == 11:
if serie in [7,8]:
raise 'Serie '+str(serie) + ' has no hkb data and will not be loaded'
if day ==12:
if serie in [17,18,19,20]:
raise AttributeError('Serie '+str(serie) + \
' has no hkb data and will not be loaded')
#Measures
if day==11:
self.stcr = [1,2,3,4,10,11,12,32,33,34,35,9,17,18,19,20,25,26]
self.shkb = [5,6,13,14,15,16,21,22,23,24,27,28,29,30,31,32,33,34,35]
self.sbs = [5,6,7,8,13,14,15,16,21,22,23,24,27,28,29,30,31,32,33,34,35]
self.mocap = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35]
self.mocapinterf=[]
if day==12:
self.stcr = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]
self.shkb = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.sbs = [9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.mocap =[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24]
self.mocapinterf = [5,6,7,8,13,14,15,16,21,22,23,24,]
self.typ=''
# HIKOB
if serie in self.shkb:
self._loadhkb(serie=serie,day=day,source=source)
# IR-UWB TCR
if serie in self.stcr:
self._loadTCR(serie=serie,day=day)
# BeSpoon
if serie in self.sbs:
self._loadBS(serie=serie,day=day)
# set filename
if self.typ=='FULL':
self._filename = 'Sc' + self.scenario + '_S' + str(self.serie) + '_R' + str(self.run) + '_' + self.typ.capitalize()
else:
self._filename = 'Sc' + self.scenario + '_S' + str(self.serie) + '_R' + str(self.run) + '_' + self.typ
#Layout
if layout:
self.L= Layout('MOCAP-small2.lay')
# Load Infrastructure Nodes
self._loadinfranodes()
# Load cameras
self._loadcam()
#BODY & interferers
self.subject = str(self.log['Subject'].values[0].replace('jihad','Jihad')).split(' ')
#filter typos in self.subject
self.subject = [ x for x in self.subject if len(x)!=0 ]
if 'Jihad' in self.subject :
uj = self.subject.index('Jihad')
self.subject[uj]='Jihan'
if serie in self.mocap :
# load bodies from mocap file
self._loadbody(serie=serie,day=day)
self._distancematrix()
self._computedevpdf()
if isinstance(self.B,dict):
for b in self.B:
if hasattr(self,'L'):
self.B[b].traj.Lfilename=copy.copy(self.L._filename)
else:
self.B[b].traj.Lfilename='notloaded'
else :
self.B.traj.Lfilename=copy.copy(self.L._filename)
# reference time is tmocap
self.tmocap = self.B[self.subject[0]].time
# load offset dict
self.offset= self._load_offset_dict()
########################
#realign Radio on mocap
########################
# 1 - Resample radio time => mocap time
# 2 - (if available) apply offset
if ('BS' in self.typ) or ('FULL' in self.typ):
print( '\nBS data frame index: ',)
self._align_on_devdf(typ='BS')
print( 'Align on mocap OK...',)
try:
self._apply_offset('BS')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No BS offset not yet set => use self.offset_setter ')
if ('TCR' in self.typ) or ('FULL' in self.typ):
print ('\nTCR data frame index:', )
self._align_on_devdf(typ='TCR')
print ('Align on mocap OK...',)
try:
self._apply_offset('TCR')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No TCR offset not yet set => use self.offset_setter')
if ('HK' in self.typ) or ('FULL' in self.typ):
print ('\nHKB data frame index:',)
self._align_on_devdf(typ='HKB')
print ('Align on mocap OK...',)
try:
# self._apply_offset('HKB')
print ('time-offset applied OK')
except:
print ('WARNING time-offset NOT applied')
print ('No HKB offset not yet set => use self.offset_setter')
print ('\nCreate distance Dataframe...',)
self._computedistdf()
print ('OK',)
def __repr__(self):
st = ''
st = st + 'filename : ' + self._filename + '\n'
st = st + 'filewear : ' + self.filewear + '\n'
st = st + 'filebody : ' + self.filebody + '\n'
st = st + 'filemocap : ' + self.filemocap + '\n'
st = st + 'Day : '+ str(self.day)+'/06/2014'+'\n'
st = st + 'Serie : '+ str(self.serie)+'\n'
st = st + 'Scenario : '+str(self.scenario)+'\n'
st = st + 'Run : '+ str(self.run)+'\n'
st = st + 'Type : '+ str(self.typ)+'\n'
st = st + 'Original Video Id : '+ str(self.video)+'\n'
st = st + 'Subject(s) : '
for k in self.subject:
st = st + k + ' '
st = st + '\n\n'
st = st+'Body available: ' + str('B' in dir(self)) + '\n\n'
try :
st = st+'BeSPoon : '+self._fileBS+'\n'
except:
pass
try :
st = st+'HIKOB : '+self._filehkb+'\n'
except:
pass
try :
st = st+'TCR : '+self._fileTCR+'\n'
except:
pass
st = st + '----------------------\n\n'
for k in self.log.columns:
st = st + k + ' :' + str(self.log[k].values)+'\n'
return(st)
# @property
# def dev(self):
# """ display device techno, id , id on body, body owner,...
# """
# title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
# print title + '\n' + '-'*len(title)
# if ('HK' in self.typ) or ('FULL' in self.typ):
# hkbkeys = self.idHKB.keys()
# hkbkeys.sort()
# for d in hkbkeys:
# dev = self.devmapper(self.idHKB[d],'HKB')
# print '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3])
# if ('TCR' in self.typ) or ('FULL' in self.typ):
# tcrkeys = self.idTCR.keys()
# tcrkeys.sort()
# for d in tcrkeys:
# dev = self.devmapper(self.idTCR[d],'TCR')
# print '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3])
@property
def dev(self):
""" display device techno, id , id on body, body owner,...
"""
title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
print( title + '\n' + '='*len(title))
# access points HKB
for d in self.din:
if ('HK' in d) :
dev = self.devmapper(d,'HKB')
print('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.din:
if ('BS' in d) :
dev = self.devmapper(d,'BS')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
# access points TCR
for d in self.din:
if ('TCR' in d) :
dev = self.devmapper(d,'TCR')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
#device per RAT per body
for b in self.B:
if b not in self.interf:
#HKB per body
for d in self.B[b].dev.keys():
if ('HK' in d):
dev = self.devmapper(d,'HKB')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
#bespoon
if ('FULL' in self.typ) or ('HKB' in self.typ):
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('BS' in d):
dev = self.devmapper(d,'BS')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
# print '{0:66}'.format('-'*len(title) )
#TCR per body
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('TCR' in d):
dev = self.devmapper(d,'TCR')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
@property
def ant(self):
""" display device techno, id , id on body, body owner,...
"""
title = '{0:21} | {1:7} | {2:8} | {3:10} '.format('Name in Dataframe', 'Real Id', 'Body Id', 'Subject')
print (title + '\n' + '='*len(title) )
# access points HKB
for d in self.din:
if ('HK' in d) :
dev = self.devmapper(d,'HKB')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.din:
if ('BS' in d) :
dev = self.devmapper(d,'BS')
print ('{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
if 'FULL' in self.typ:
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
# access points TCR
for d in self.din:
if ('TCR' in d) :
dev = self.devmapper(d,'TCR')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print ('{0:66}'.format('-'*len(title) ))
#device per RAT per body
for b in self.B:
if b not in self.interf:
#HKB per body
for d in self.B[b].dev.keys():
if ('HK' in d):
dev = self.devmapper(d,'HKB')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
#bespoon
if ('FULL' in self.typ) or ('HKB' in self.typ):
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('BS' in d):
dev = self.devmapper(d,'BS')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
# print '{0:66}'.format('-'*len(title) )
#TCR per body
if 'FULL' in self.typ:
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format('','','',''))
for d in self.B[b].dev.keys():
if ('TCR' in d):
dev = self.devmapper(d,'TCR')
print( '{0:21} | {1:7} | {2:8} | {3:10} '.format(dev[0],dev[1],dev[2],dev[3]))
print( '{0:66}'.format('-'*len(title) ))
def _loadcam(self):
""" load camera position
Returns
-------
update self.cam
"""
self.cam = np.array([
[-6502.16643961174,5440.97951452912,2296.44437108561],
[-7782.34866625776,4998.47624994092,2417.5861326688],
[8308.82897665828,3618.50516290547,2698.07710953287],
[5606.68337709102,-6354.17891528277,2500.27779697402],
[-8237.91886515041,-2332.98639475305,4765.31798299242],
[5496.0942989988,6216.91946236788,2433.30012872688],
[-8296.19706598514,2430.07325486109,4794.01607841197],
[7718.37527064615,-4644.26760522485,2584.75330667172],
[8471.27154730777,-3043.74550832061,2683.45089703377],
[-8213.04824602894,-4034.57371591121,2368.54548665579],
[-7184.66711497403,-4950.49444503781,2317.68563412347],
[7531.66103727189,5279.02353243886,2479.36291603544],
[-6303.08628709464,-7057.06193926342,2288.84938553817],
[-5441.17834354692,6637.93014323586,2315.15657646861],
[8287.79937470615,59.1614281340528,4809.14535447027]
])*1e-3
def _loadinfranodes(self):
""" load infrastructure nodes
nico
A4
mpts[6,7,8]
X
A3 A1
mpts[9,10,11] mpts[3,4,5]
X X
A2
mpts[0,1,2]
X
TCR = mpts[0,3,6,9]
HKB = mpts[1,2,
4,5,
7,8,
10,11]
bernard
A3
mpts[3,4,5]
X
A2 A4
mpts[6,7,8] mpts[0,1,2]
X X
A1
mpts[9,10,11]
X
TCR = mpts[0,3,6,9]
HKB = mpts[1,2,
4,5,
7,8,
10,11]
"""
filename = os.path.join(self.rootdir,'RAW','11-06-2014','MOCAP','scene.c3d')
print( "\nload infrastructure node position:",)
a, self.infraname, pts, i = c3d.ReadC3d(filename)
pts = pts/1000.
mpts = np.mean(pts, axis=0)
self.din={}
if ('HK' in self.typ) or ('FULL' in self.typ):
uhkb = np.array([[1,2], [4,5], [7,8], [10,11]])
mphkb = np.mean(mpts[uhkb], axis=1)
self.din.update(
{'HKB:1':{'p' : mphkb[3],
# 'T' : np.eye(3),
's3off' : 0.},
'HKB:2':{'p' : mphkb[2],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':0.} ,
'HKB:3':{'p':mphkb[1],
# 'T':array([[-0.59846007, -0.80115264, 0.],
# [ 0.80115264, -0.59846007, 0.],
# [ 0.,0., 1.]]),
's3off':0.},
'HKB:4':{'p':mphkb[0],
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.}
})
# TCR:31 is the coordinator which was not captured.
# The position has been determined via optimization
if ('TCR' in self.typ) or ('FULL' in self.typ):
self.din.update({'TCR:32':{'p':mpts[9],
'T':np.eye(3),
's3off':0.1},
'TCR:24':{'p':mpts[6],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':0.1},
'TCR:27':{'p':mpts[3],
# 'T':array([[-0.59846007, -0.80115264, 0.],
# [ 0.80115264, -0.59846007, 0.],
# [ 0.,0., 1.]]),
's3off':0.1},
'TCR:28':{'p':mpts[0],
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.1},
'TCR:31':{'p':array([1.7719,-3.2655,1.74]),
# 'T':array([[-0.44807362, -0.89399666, 0.],
# [ 0.89399666, -0.44807362, 0.],
# [ 0.,0., 1.]]),
's3off':0.0}
})
if self.day == 12:
#BS idem HKB:1 and HKB:2
if ('BS' in self.typ) or ('FULL' in self.typ):
self.din.update(
{'BS:74':{'p':mphkb[3],
# 'T':np.eye(3),
's3off':-0.2},
'BS:157':{'p':mphkb[2],
# 'T': np.array([[-0.44807362, 0.89399666, 0.],
# [-0.89399666, -0.44807362, 0.],
# [ 0.,0.,1. ]]),
's3off':-0.2} ,
})
#load extra information from inifile (antenna, rotation matrix,...)
inifile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','BodyandWear','AccesPoints.ini')
config = ConfigParser.ConfigParser()
config.read(inifile)
for d in self.din:
self.din[d]['antname']=config.get(d,'file')
self.din[d]['ant']=antenna.Antenna(config.get(d,'file'))
self.din[d]['T']=eval(config.get(d,'t'))
self.din[d]['comment']=config.get(d,'comment')
# self.pts= np.empty((12,3))
# self.pts[:,0]= -mpts[:,1]
# self.pts[:,1]= mpts[:,0]
# self.pts[:,2]= mpts[:,2]
# return mpts
# self.dist = np.sqrt(np.sum((mpts[:,np.newaxis,:]-mpts[np.newaxis,:])**2,axis=2))
def loadlog(self):
""" load in self.log the log of the current serie
from MeasurementLog.csv
"""
filelog = os.path.join(self.rootdir,'RAW','Doc','MeasurementLog.csv')
log = pd.read_csv(filelog)
date = str(self.day)+'/06/14'
self.log = log[(log['Meas Serie'] == self.serie) & (log['Date'] == date)]
def _loadbody(self,day=11,serie=''):
""" load body from motion capture file
Parameters
----------
day :
serie :
"""
assert day in [11,12],"wrong day in _loadbody"
self.B={}
color=['LightBlue','YellowGreen','PaleVioletRed','white','white','white','white','white','white','white']
for us,subject in enumerate(self.subject):
print( "\nload ",subject, " body:",)
seriestr = str(self.serie).zfill(3)
if day == 11:
self.filemocap = os.path.join(self.rootdir,'RAW',str(self.day)+'-06-2014','MOCAP','serie_'+seriestr+'.c3d')
elif day == 12:
self.filemocap = os.path.join(self.rootdir,'RAW',str(self.day)+'-06-2014','MOCAP','Nav_serie_'+seriestr+'.c3d')
# body and wear directory
baw = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','BodyandWear')
if subject =='Jihad':
subject ='Jihan'
#
# Load body cylinder description : "Subject.ini"
# Load wearable device description (contains antenna filename) :
#
self.filebody = os.path.join(baw, subject + '.ini')
self.filewear = os.path.join(baw,subject + '_' +str(self.day)+'-06-2014_' + self.typ + '.ini')
if len(self.subject) >1 or self.mocapinterf:
multi_subject=True
else:
multi_subject=False
self.B.update({subject:Body(_filebody=self.filebody,
_filemocap=self.filemocap,unit = 'mm', loop=False,
_filewear=self.filewear,
centered=False,
multi_subject_mocap=multi_subject,
color=color[us])})
if self.serie in self.mocapinterf:
self.interf = ['Anis_Cylindre:',
'Benoit_Cylindre:',
'Bernard_Cylindre:',
'Claude_Cylindre:',
'Meriem_Cylindre:']
intertmp=[]
if self.serie==13:
self.interf.remove('Bernard_Cylindre:')
for ui,i in enumerate(self.interf):
#try:
print( "load ",i, " interfering body:",)
_filemocap = pyu.getshort(self.filemocap)
self.B.update({i:Cylinder(name=i,
_filemocap=_filemocap,
unit = 'mm',
color = color[ui])})
intertmp.append(i)
#except:
# print "Warning ! load ",i, " FAIL !"
self.interf=intertmp
else :
self.interf=[]
# if len(self.subject) == 1:
# self.B = self.B[self.subject]
def _loadTCR(self,day=11,serie='',scenario='20',run=1):
""" load TCR data
Parameters
----------
day :
serie :
scenario :
run :
"""
#
# TNET : (NodeId,MAC)
#
self.TNET={0:31,
1:2,
7:24,
8:25,
9:26,
10:27,
11:28,
12:30,
14:32,
15:33,
16:34,
17:35,
18:36,
19:37,
20:48,
21:49}
if day==11:
self.dTCR ={'Unused':49,
'COORD':31,
'AP1':32,
'AP2':24,
'AP3':27,
'AP4':28,
'HeadRight':34,
'TorsoTopRight':25,
'TorsoTopLeft':30,
'BackCenter':35,
'HipRight':2,
'WristRight':26,
'WristLeft':48,
'KneeLeft':33,
'AnkleRight':36,
'AnkleLeft':37}
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','TCR')
if day==12:
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','TCR')
self.dTCR ={ 'COORD':31,
'AP1':32,
'AP2':24,
'AP3':27,
'AP4':28,
'Jihad:TorsoTopRight':35,
'Jihad:TorsoTopLeft':2,
'Jihad:BackCenter':33,
'Jihad:ShoulderLeft':37,
'Nicolas:TorsoTopRight':34,
'Nicolas:TorsoTopLeft':49,
'Nicolas:BackCenter':48,
'Nicolas:ShoulderLeft':36,
'Eric:TorsoCenter':30,
'Eric:BackCenter':25,
'Eric:ShoulderLeft':26}
#
# TCR : (Name , MAC)
# iTCR : (MAC , Name)
# dTCR : (NodeId, Name)
#
self.idTCR={}
for k in self.dTCR:
self.idTCR[self.dTCR[k]]=k
dTCRni={}
for k in self.TNET.keys():
dTCRni[k]=self.idTCR[self.TNET[k]]
files = os.listdir(dirname)
if serie != '':
try:
self._fileTCR = filter(lambda x : '_S'+str(serie)+'_' in x ,files)[0]
except:
self._fileTCR = filter(lambda x : '_s'+str(serie)+'_' in x ,files)[0]
tt = self._fileTCR.split('_')
self.scenario=tt[0].replace('Sc','')
self.run = tt[2].replace('R','')
self.typ = tt[3].replace('.csv','').upper()
self.video = 'NA'
else:
filesc = filter(lambda x : 'Sc'+scenario in x ,files)
self._fileTCR = filter(lambda x : 'R'+str(run) in x ,filsc)[0]
self.scenario= scenario
self.run = str(run)
filename = os.path.join(dirname,self._fileTCR)
dtTCR = pd.read_csv(filename)
tcr={}
for k in dTCRni:
for l in dTCRni:
if k!=l:
d = dtTCR[((dtTCR['ida']==k) & (dtTCR['idb']==l))]
d.drop_duplicates('time',inplace=True)
del d['lqi']
del d['ida']
del d['idb']
d = d[d['time']!=-1]
d.index = d['time']
del d['time']
if len(d)!=0:
sr = pd.Series(d['dist']/1000,index=d.index)
tcr[dTCRni[k]+'-'+dTCRni[l]]= sr
self.tcr = pd.DataFrame(tcr)
self.tcr = self.tcr.fillna(0)
ts = 75366400./1e9
t = np.array(self.tcr.index)*ts
t = t-t[0]
self.tcr.index = t
self.ttcr=self.tcr.index
def _loadBS(self,day=11,serie='',scenario='20',run=1):
""" load BeSpoon data
Parameters
----------
day : int
serie : string
scenario : string
run : int
"""
if day == 11:
self.dBS = {'WristRight':157,'AnkleRight':74,'HandRight':0}
elif day == 12:
self.dBS = {'AP1':157,'AP2':74,'HandRight':0}
self.idBS={}
for k in self.dBS:
self.idBS[self.dBS[k]]=k
if day==11:
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','BeSpoon')
if day==12:
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','BeSpoon')
files = os.listdir(dirname)
if serie != '':
#self._fileBS = filter(lambda x : 'S'+str(serie) in x ,files)[0]
self._fileBS = [ x for x in files if 'S'+str(serie) in x ][0]
else:
self._fileBS = [ x for x in files if 'R'+str(serie) in x ][0]
#filesc = filter(lambda x : 'Sc'+scenario in x ,files)
self._fileBS = filter(lambda x : 'R'+str(run) in x ,filsc)[0]
bespo = pd.read_csv(os.path.join(dirname,self._fileBS),index_col='ts')
gb = bespo.groupby(['Sensor'])
#get device id
devid,idevid = np.unique(bespo['Sensor'],return_index=True)
# get index of each group
dgb={d:gb.get_group(d) for d in devid}
lgb=[]
for i in dgb:
ind = dgb[i].index/1e3
dti = pd.to_datetime(ind,unit='s')
npai = time2npa(dti)
npai = npai - npai[0]
dgb[i].index=pd.Index(npai)
lgb.append(pd.DataFrame(dgb[i]['d'].values,columns=[self.idBS[0]+'-'+self.idBS[i]],index=dgb[i].index))
df = lgb[0].join(lgb[1])
self.bespo = df
#self.s157 = self.bespo[self.bespo['Sensor']==157]
#self.s157.set_index(self.s157['tu'].values/1e9)
#self.s74 = self.bespo[self.bespo['Sensor']==74]
#self.s74.set_index(self.s74['tu'].values/1e9)
#t157 = np.array(self.s157['tu']/(1e9))
#self.t157 = t157-t157[0]
#t74 = np.array(self.s74['tu']/(1e9))
#self.t74 = t74 - t74[0]
def _loadhkb(self,day=11,serie='',scenario='20',run=1,source='CITI'):
""" load hkb measurement data
Parameters
----------
day : string
serie : string
scenario : string
run : int
source : 'string'
Returns
-------
update self.hkb
"""
if day == 11:
if serie == 5:
source = 'UR1'
if day==11:
self.dHKB ={'AP1':1,'AP2':2,'AP3':3,'AP4':4,
'HeadRight':5,'TorsoTopRight':6,'TorsoTopLeft':7,'BackCenter':8,'ElbowRight':9,'ElbowLeft':10,'HipRight':11,'WristRight':12,'WristLeft':13,'KneeLeft':14,'AnkleRight':16,'AnkleLeft':15}
if source=='UR1' :
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','HIKOB')
elif source=='CITI':
dirname = os.path.join(self.rootdir,'POST-TREATED','11-06-2014','HIKOB','CITI')
if day==12:
self.dHKB= {'AP1':1,'AP2':2,'AP3':3,'AP4':4,'Jihad:TorsoTopRight':10,'Jihad:TorsoTopLeft':9,'Jihad:BackCenter':11,'JihadShoulderLeft':12,
'Nicolas:TorsoTopRight':6,'Nicolas:TorsoTopLeft':5,'Nicolas:BackCenter':7,'Nicolas:ShoulderLeft':8,
'Eric:TooTopRight':15,'Eric:TorsoTopLeft':13,'Eric:BackCenter':16,'Eric:ShoulderLeft':14}
#if source=='UR1':
dirname = os.path.join(self.rootdir,'POST-TREATED','12-06-2014','HIKOB')
files = os.listdir(dirname)
self.idHKB={}
for k in self.dHKB:
self.idHKB[self.dHKB[k]]=k
if serie != '':
self._filehkb = [ x for x in files if 'S'+str(serie) in x][0]
tt = self._filehkb.split('_')
if source == 'UR1':
self.scenario=tt[0].replace('Sc','')
self.run = tt[2].replace('R','')
self.typ = tt[3]
self.video = tt[4].replace('.mat','')
elif source == 'CITI':
self.scenario=tt[0].replace('Sc','')+tt[1]
self.run = tt[3].replace('r','')
self.typ = tt[4]
if self.typ == 'HKB':
self.typ = 'HKBS'
self.video = tt[5].replace('.mat','')
else:
filesc = [ x for x in files if x in 'Sc'+scenario ][0]
if source=='UR1':
self._filehkb = [ x for x in filesc if x in 'R'+str(run)][0]
else:
self._filehkb = [ x for x in filesc if x in 'r'+str(run)][0]
data = io.loadmat(os.path.join(dirname,self._filehkb))
if source=='UR1':
self.rssi = data['rssi']
self.thkb = data['t']
else:
self.rssi = data['val']
self.thkb = np.arange(np.shape(self.rssi)[2])*25.832e-3
def topandas():
try:
self.hkb = pd.DataFrame(index=self.thkb[0])
except:
self.hkb = pd.DataFrame(index=self.thkb)
for k in self.idHKB:
for l in self.idHKB:
if k!=l:
col = self.idHKB[k]+'-'+self.idHKB[l]
rcol = self.idHKB[l]+'-'+self.idHKB[k]
if rcol not in self.hkb.columns:
rssi = self.rssi[k-1,l-1,:]
self.hkb[col] = rssi
topandas()
self.hkb = self.hkb[self.hkb!=0]
def compute_visibility(self,techno='HKB',square_mda=True,all_links=True):
""" determine visibility of links for a given techno
Parameters
----------
techno string
select the given radio technology of the nodes to determine
the visibility matrix
square_mda boolean
select ouput format
True : (device x device x timestamp)
False : (link x timestamp)
all_links : bool
compute all links or just those for which data is available
Return
------
if square_mda = True
intersection : (ndevice x nbdevice x nb_timestamp)
matrice of intersection (1 if link is cut 0 otherwise)
links : (nbdevice)
name of the links
if square_mda = False
intersection : (nblink x nb_timestamp)
matrice of intersection (1 if link is cut 0 otherwise)
links : (nblink x2)
name of the links
Example
-------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=14,day=12)
>>> inter,links=C.compute_visibility(techno='TCR',square_mda=True)
>>> inter.shape
(15, 15, 12473)
>>>C.imshowvisibility_i(inter,links)
"""
if techno == 'TCR':
if not ((self.typ == 'TCR') or (self.typ == 'FULL')):
raise AttributeError('Serie has not data for techno: ',techno)
hname = self.tcr.keys()
dnode=copy.copy(self.dTCR)
dnode.pop('COORD')
prefix = 'TCR:'
elif techno=='HKB':
if not ((self.typ == 'HKBS') or (self.typ == 'FULL')):
raise AttributeError('Serie has not data for techno: '+techno)
hname = self.hkb.keys()
dnode=self.dHKB
prefix = 'HKB:'
# get link list
if all_links:
import itertools
links =[l for l in itertools.combinations(dnode.keys(),2)]
else:
links=[n.split('-') for n in hname]
links = [l for l in links if ('COORD' not in l[0]) and ('COORD' not in l[1])]
#mapping between device name in self.hkb and on body/in self.devdf
dev_bid = [self.devmapper(k,techno=techno)[2] for k in dnode.keys()]
nb_totaldev=len(np.unique(self.devdf['id']))
# extract all dev position on body
# Mpdev : (3 x (nb devices and nb infra nodes) x nb_timestamp)
Mpdev = np.empty((3,len(dev_bid),len(self.devdf.index)/nb_totaldev))
# get all positions
for ik,i in enumerate(dev_bid) :
if i in self.din:
Mpdev[:,ik,:] = self.din[i]['p'][:,np.newaxis]
else:
pts = self.devdf[self.devdf['id']==i][['x','y','z']].values.T
if np.prod(pts.shape)!=0:
Mpdev[:,ik,:] = pts
# create A and B from links
nA = np.array([prefix+ str(dnode[l[0]]) for l in links])
nB = np.array([prefix+ str(dnode[l[1]]) for l in links])
dma = dict(zip(dev_bid,range(len(dev_bid))))
mnA = [dma[n] for n in nA]
mnB = [dma[n] for n in nB]
A=Mpdev[:,mnA]
B=Mpdev[:,mnB]
# intersect2D matrix is
# d_0: nb links
#d_1: (cylinder number) * nb body + 1 * nb cylinder_object
# d_2 : nb frame
intersect2D = np.zeros((len(links),
11*len(self.subject) + len(self.interf),
Mpdev.shape[-1]))
# usub : index axes subject
usub_start=0
usub_stop=0
# C-D correspond to bodies segments
#C or D : 3 x 11 body segments x time
# radius of cylinders are (nb_cylinder x time)
for b in self.B:
print( 'processing shadowing from ',b)
# if b is a body not a cylinder
if not 'Cylindre' in b:
uta = self.B[b].sl[:,0].astype('int')
uhe = self.B[b].sl[:,1].astype('int')
rad = self.B[b].sl[:,2]
C = self.B[b].d[:,uta,:]
D = self.B[b].d[:,uhe,:]
try:
radius = np.concatenate((radius,rad[:,np.newaxis]*np.ones((1,C.shape[2]))),axis=0)
except:
radius = rad[:,np.newaxis]*np.ones((1,C.shape[2]))
usub_start=usub_stop
usub_stop=usub_stop+11
else:
cyl = self.B[b]
# top of cylinder
top = cyl.d[:,cyl.topnode,:]
# bottom of cylinder =top with z =0
bottom = copy.copy(cyl.d[:,cyl.topnode,:])
bottom[2,:]=0.02
#top 3 x 1 X time
C=top[:,np.newaxis,:]
D=bottom[:,np.newaxis,:]
radius = np.concatenate((radius,cyl.radius[np.newaxis]))
usub_start=usub_stop
usub_stop=usub_stop+1
f,g,X,Y,alpha,beta,dmin=seg.segdist(A,B,C,D,hard=True)
intersect2D[:,usub_start:usub_stop,:]=g
# import ipdb
# ipdb.set_trace()
#USEFUL Lines for debug
#########################
# def plt3d(ndev=53,ncyl=0,kl=11499):
# fig=plt.figure()
# ax=fig.add_subplot(111,projection='3d')
# if not isinstance(kl,list):
# kl=[kl]
# for ktime in kl:
# ax.plot([A[0,ndev,ktime],B[0,ndev,ktime]],[A[1,ndev,ktime],B[1,ndev,ktime]],[A[2,ndev,ktime],B[2,ndev,ktime]])
# [ax.plot([C[0,k,ktime],D[0,k,ktime]],[C[1,k,ktime],D[1,k,ktime]],[C[2,k,ktime],D[2,k,ktime]],'k') for k in range(11) ]
# ax.plot([X[0,ndev,ncyl,ktime],Y[0,ndev,ncyl,ktime]],[X[1,ndev,ncyl,ktime],Y[1,ndev,ncyl,ktime]],[X[2,ndev,ncyl,ktime],Y[2,ndev,ncyl,ktime]])
# ax.auto_scale_xyz([-5, 5], [-5, 5], [0, 2])
# plt.show()
# import ipdb
# ipdb.set_trace()
uinter1 = np.where((intersect2D<=(radius-0.01)))
uinter0 = np.where((intersect2D>(radius-0.01)))
# intersect2D_=copy.copy(intersect2D)
intersect2D[uinter1[0],uinter1[1],uinter1[2]]=1
intersect2D[uinter0[0],uinter0[1],uinter0[2]]=0
# #integrate the effect of all bodies by summing on axis 1
intersect = np.sum(intersect2D,axis=1)>0
if square_mda:
dev= np.unique(links)
ddev = dict(zip(dev,range(len(dev))))
lmap = np.array(map(lambda x: (ddev[x[0]],ddev[x[1]]),links))
M = np.nan*np.ones((len(dev),len(dev),intersect.shape[-1]))
for i in range(len(intersect)):
id1 = lmap[i][0]
id2 = lmap[i][1]
M[id1,id2,:]=intersect[i,:]
M[id2,id1,:]=intersect[i,:]
intersect=M
links = dev
self._visilinks = links
self._visiintersect = intersect
return intersect,links
def imshowvisibility(self,techno='HKB',t=0,**kwargs):
""" imshow visibility mda
Parameters
----------
techno : (HKB|TCR)
t : float
time in second
Examples
--------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=6,day=12)
>>> inter,links=C.compute_visibility(techno='TCR',square_mda=True)
>>> i,l=C.imshowvisibility_i(inter,links)
See Also
--------
pylayers.measures.CorSer.compute_visibility()
"""
defaults = { 'grid':True,
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'fig' not in kwargs:
fig = plt.figure()
else:
fig = kwargs.pop('fig')
if 'ax' not in kwargs:
ax = fig.add_subplot(111)
else:
ax = kwargs.pop('ax')
if not '_visiintersect' in dir(self):
print( 'Visibility computed only once')
self.compute_visibility(techno=techno)
links = self._visilinks
inter = self._visiintersect
kt=np.where(self.tmocap <= t)[0][-1]
plt.xticks(np.arange(0, len(links), 1.0))
plt.yticks(np.arange(0, len(links), 1.0))
ax.set_xlim([-0.5,len(links)-0.5])
ax.set_ylim([len(links)-0.5,-0.5])
ax.xaxis.set_ticks_position('top')
xtickNames = plt.setp(ax, xticklabels=links)
ytickNames = plt.setp(ax, yticklabels=links)
plt.setp(xtickNames, rotation=90, fontsize=8)
plt.setp(ytickNames, rotation=0, fontsize=8)
ims=[]
ax.imshow(inter[:,:,kt],interpolation='nearest')
if kwargs['grid']:
ax.grid()
return fig,ax
def _show3i(self,t=0,**kwargs):
""" show3 interactive
"""
fig =plt.figure(num='Jog',figsize=(5,1.5))
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
time=self.B[self.subject[0]].time
fId = np.where(time<= t)[0][-1]
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.5, 0.8, 0.3])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, len(time),
valinit=fId, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
#QUIT by pressing 'q'
def press(event):
if event.key == 'q':
mlab.close(mayafig)
plt.close(fig)
fig.canvas.mpl_connect('key_press_event', press)
#-1 frame axes
axm = plt.axes([0.2, 0.05, 0.1, 0.15])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.15])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.15])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.8, 0.05, 0.1, 0.15])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
plt.show()
def _show3idemo(self,t=0,**kwargs):
""" show3 interactive
"""
defaults={'nodename':'TorsoTopLeft'}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fig =plt.figure(num='Jog',figsize=(5,1.5))
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
time=self.B[self.subject[0]].time
fId = np.where(time<= t)[0][-1]
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.5, 0.8, 0.3])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, len(time),
valinit=fId, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
vline0.set_data(([time[value],time[value]],[0,1]))
vline1.set_data(([time[value],time[value]],[0,1]))
vline2.set_data(([time[value],time[value]],[0,1]))
vline3.set_data(([time[value],time[value]],[0,1]))
fig.canvas.draw_idle()
fig2.canvas.draw_idle()
sliderx.on_changed(update_x)
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
#QUIT by pressing 'q'
def press(event):
if event.key == 'q':
mlab.close(mayafig)
plt.close(fig)
plt.close(fig2)
fig.canvas.mpl_connect('key_press_event', press)
#-1 frame axes
axm = plt.axes([0.2, 0.05, 0.1, 0.15])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.15])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.15])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.8, 0.05, 0.1, 0.15])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
fig2,ax2 = plt.subplots(4,1,figsize=(12,6))
ax2=ax2.ravel()
df0 = self.getlink(kwargs['nodename'],'AP1',techno='HKB')
df0.plot(ax=ax2[0],fig=fig2)
df1 = self.getlink(kwargs['nodename'],'AP2',techno='HKB')
df1.plot(ax=ax2[1],fig=fig2)
df2 = self.getlink(kwargs['nodename'],'AP3',techno='HKB')
df2.plot(ax=ax2[2],fig=fig2)
df3 = self.getlink(kwargs['nodename'],'AP4',techno='HKB')
df3.plot(ax=ax2[3],fig=fig2)
ax2[0].set_ylabel('AP1')
ax2[1].set_ylabel('AP2')
ax2[2].set_ylabel('AP3')
ax2[3].set_ylabel('AP4')
vline0 = ax2[0].axvline(x=time[fId], color='red')
vline1 = ax2[1].axvline(x=time[fId], color='red')
vline2 = ax2[2].axvline(x=time[fId], color='red')
vline3 = ax2[3].axvline(x=time[fId], color='red')
fig2.suptitle(kwargs['nodename'])
plt.show()
def __refreshshow3i(self,kt):
""" show3 update for interactive mode
USED in imshowvisibility_i
"""
t=self.tmocap[kt]
for ib,b in enumerate(self.B):
self.B[b].settopos(t=t,cs=True)
try:
# body
X=np.hstack((self.B[b]._pta,self.B[b]._phe))
self.B[b]._mayapts.mlab_source.set(x=X[0,:], y=X[1,:], z=X[2,:])
# device
udev = [self.B[b].dev[i]['uc3d'][0] for i in self.B[b].dev]
Xd=self.B[b]._f[kt,udev,:].T
self.B[b]._mayadev.mlab_source.set(x=Xd[0,:], y=Xd[1,:], z=Xd[2,:])
# name
uupper = np.where(X[2]==X[2].max())[0]
self.B[b]._mayaname.actors.pop()
self.B[b]._mayaname = mlab.text3d(X[0,uupper][0],X[1,uupper][0],X[2,uupper][0],self.B[b].name,scale=0.05,color=(1,0,0))
# s = np.hstack((cylrad,cylrad))
except:
# cylinder
X=np.vstack((self.B[b].top,self.B[b].bottom))
self.B[b]._mayapts.mlab_source.set(x=X[:,0], y=X[:,1], z=X[:,2])
# name
self.B[b]._mayaname.actors.pop()
self.B[b]._mayaname = mlab.text3d(self.B[b].top[0],self.B[b].top[1],self.B[b].top[2],self.B[b].name,scale=0.05,color=(1,0,0))
#vdict
V = self.B[b].traj[['vx','vy','vz']].iloc[self.B[b].toposFrameId].values
self.B[b]._mayavdic.mlab_source.set(x= self.B[b].top[0],y=self.B[b].top[1],z=self.B[b].top[2],u=V[ 0],v=V[ 1],w=V[ 2])
def imshowvisibility_i(self,techno='HKB',t=0,**kwargs):
""" imshow visibility mda interactive
Parameters
----------
inter : (nb link x nb link x timestamps)
links : (nblinks)
time : intial time (s)
Example
-------
>>> from pylayers.measures.cormoran import *
>>> import matplotlib.pyplot as plt
>>> C=CorSer(serie=6,day=12)
>>> inter,links=C.visimda(techno='TCR',square_mda=True)
>>> i,l=C.imshowvisibility_i(inter,links)
"""
# if in_ipynb():
# notebook = False #program launch in ipyhon notebook
# from IPython.html import widgets # Widget definitions
# from IPython.display import display, clear_output# Used to display widgets in the notebook
# else :
# notebook = False
if not '_visiintersect' in dir(self):
print( 'Visibility is computed only once, Please wait\n')
self.compute_visibility(techno=techno)
links = self._visilinks
inter = self._visiintersect
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.3)
time=self.tmocap
fId = np.where(time<=t)[0][-1]
vertc = [(0,-10),(0,-10),(0,10),(0,-10)]
poly = plt.Polygon(vertc)
pp = ax.add_patch(poly)
plt.xticks(np.arange(0, len(links), 1.0))
plt.yticks(np.arange(0, len(links), 1.0))
ax.set_xlim([-0.5,len(links)-0.5])
ax.set_ylim([len(links)-0.5,-0.5])
ax.xaxis.set_ticks_position('top')
xtickNames = plt.setp(ax, xticklabels=links)
ytickNames = plt.setp(ax, yticklabels=links)
plt.setp(xtickNames, rotation=90, fontsize=8)
plt.setp(ytickNames, rotation=0, fontsize=8)
ims=[]
l=ax.imshow(inter[:,:,fId],interpolation='nearest')
#set time to -10 is a trick to make appear interferers cylinder
#because __refreshshow3i only update the data of the cylinder.
# if cylinder is not present in the first _show3, they are not displayed
# later.
kwargs['bodytime']=[self.tmocap[-10]]
kwargs['returnfig']=True
kwargs['tagtraj']=False
mayafig = self._show3(**kwargs)
self.__refreshshow3i(fId)
# ax.grid()
# matplotlib Widgets
slax=plt.axes([0.1, 0.15, 0.8, 0.05])
slax.set_title('t='+str(time[fId]),loc='left')
sliderx = Slider(slax, "time", 0, inter.shape[-1],
valinit=fId, color='#AAAAAA')
# else :
# int_range = widgets.IntSliderWidget(min=0,max=inter.shape[-1],step=1,value=fId)
# display(int_range)
def update_x(val):
value = int(sliderx.val)
sliderx.valtext.set_text('{}'.format(value))
l.set_data(inter[:,:,value])
self.__refreshshow3i(val)
slax.set_title('t='+str(time[val]),loc='left')
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
# else:
# def update_x(name,value):
# clear_output(wait=True)
# display(plt.gcf())
# plt.imshow(inter[:,:,value],interpolation='nearest')
# # l.set_data(inter[:,:,value])
# kwargs['bodytime']=[self.tmocap[value]]
# self._show3(**kwargs)
# myu.inotshow('fig1',width=200,height=200,magnification=1)
# # slax.set_title('t='+str(time[val]),loc='left')
# # fig.canvas.draw_idle()
# int_range.on_trait_change(update_x, 'value')
def plus(event):
sliderx.set_val(sliderx.val +1)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def minus(event):
sliderx.set_val(sliderx.val -1)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def pplus(event):
sliderx.set_val(sliderx.val +10)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
def mminus(event):
sliderx.set_val(sliderx.val -10)
fig.canvas.draw_idle()
# if not notebook:
sliderx.on_changed(update_x)
# #QUIT by pressing 'q'
# def press(event):
# if event.key == 'q':
# mlab.close(mayafig)
# plt.close(fig)
# fig.canvas.mpl_connect('key_press_event', press)
# if not notebook:
#-1 frame axes
axm = plt.axes([0.3, 0.05, 0.1, 0.075])
bm = Button(axm, '-1')
bm.on_clicked(minus)
#+1 frame axes
axp = plt.axes([0.7, 0.05, 0.1, 0.075])
bp = Button(axp, '+1')
bp.on_clicked(plus)
#-10 frames axes
axmm = plt.axes([0.1, 0.05, 0.1, 0.075])
bmm = Button(axmm, '-10')
bmm.on_clicked(mminus)
#+10 frames axes
axpp = plt.axes([0.9, 0.05, 0.1, 0.075])
bpp = Button(axpp, '+10')
bpp.on_clicked(pplus)
plt.show()
def _distancematrix(self):
"""Compute the distance matrix between the nodes
self.dist : (nb frame x nb_node x nb_node)
self.dist_nodesmap : list of used nodes (useful to make the association ;) )
"""
if not isinstance(self.B,dict):
B={self.subject[0]:self.B}
else :
B=self.B
bn= []
for b in B:
if 'dev' in dir(B[b]):
tdev=[]
for k in B[b].dev:
bn.append(k)
tdev.append(B[b].dev[k]['uc3d'][0])
tdev=np.array(tdev)
try:
pnb = np.concatenate((pnb,B[b]._f[:,tdev,:]),axis=1)
except:
pnb = B[b]._f[:,tdev,:]
ln = []
uin = []
# infrastructure nodes
if ('HK' in self.typ) or ('FULL' in self.typ):
uin.extend(['HKB:1','HKB:2','HKB:3','HKB:4'])
if ('TCR' in self.typ) or ('FULL' in self.typ):
# TCR:31 is the coordinator (1.7719,-3.26)
uin.extend(['TCR:32','TCR:24','TCR:27','TCR:28','TCR:31'])
if self.day == 12:
if ('BS' in self.typ) or ('FULL' in self.typ):
uin.extend(['BS:74','BS:157'])
ln = uin + bn
pin = np.array([self.din[d]['p'] for d in uin])
pin2 = np.empty((pnb.shape[0],pin.shape[0],pin.shape[1]))
pin2[:,:,:] = pin
p = np.concatenate((pin2,pnb),axis=1)
self.points = p
self.dist = np.sqrt(np.sum((p[:,:,np.newaxis,:]-p[:,np.newaxis,:,:])**2,axis=3))
self.dist_nodesmap = ln
def _computedistdf(self):
"""Compute the distance dataframe from distance matrix
"""
# HIKOB
if ('HK' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'hkb')[0]:self.devmapper(k,'hkb')[2] for k in self.dHKB}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),self.dist_nodesmap.index(devmap[k.split('-')[1]])] for k in self.hkb.keys()])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
df = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
# BE Spoon
if ('BS' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'BS')[0]:self.devmapper(k,'BS')[2] for k in self.dBS}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),self.dist_nodesmap.index(devmap[k.split('-')[1]])] for k in self.bespo.keys()])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
dfb = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
df = df.join(dfb)
del dfb
if ('TCR' in self.typ) or ('FULL' in self.typ):
devmap = {self.devmapper(k,'tcr')[0]:self.devmapper(k,'tcr')[2] for k in self.dTCR}
udev = np.array([[self.dist_nodesmap.index(devmap[k.split('-')[0]]),
self.dist_nodesmap.index(devmap[k.split('-')[1]])]
for k in self.tcr.keys() ])
# for k in self.tcr.keys() if not 'COORD' in k])
iudev =np.array([(self.dist_nodesmap[u[0]]+'-'+self.dist_nodesmap[u[1]]) for u in udev])
dft = pd.DataFrame(self.dist[:,udev[:,0],udev[:,1]],columns=iudev,index=self.tmocap)
if ('FULL' in self.typ):
df = df.join(dft)
else :
df = dft
del dft
self.distdf=df
# def accessdm(self,a,b,techno=''):
# """ access to the distance matrix
# give name|id of node a and b and a given techno. retrun Groung truth
# distance between the 2 nodes
# # """
# # a,ia,bia,subja=self.devmapper(a,techno)
# # b,ib,bib,subjb=self.devmapper(b,techno)
# if 'HKB' in techno :
# if isinstance(a,str):
# ia = self.dHKB[a]
# else:
# ia = a
# a = self.idHKB[a]
# if isinstance(b,str):
# ib = self.dHKB[b]
# else:
# ib = b
# b = self.idHKB[b]
# elif 'TCR' in techno :
# if isinstance(a,str):
# ia = self.dTCR[a]
# else:
# ia = a
# a = self.idTCR[a]
# if isinstance(b,str):
# ib = self.dTCR[b]
# else:
# ib = b
# b = self.idTCR[b]
# else :
# raise AttributeError('please give only 1 techno or radio node')
# ka = techno+':'+str(ia)
# kb = techno+':'+str(ib)
# ua = self.dist_nodesmap.index(ka)
# ub = self.dist_nodesmap.index(kb)
# return(ua,ub)
# c3ds = self.B._f.shape
# if 'Full' in self.typ:
# pdev= np.empty((c3ds[0],len(self.dHKB)+len(self.tcr)+len(bs),3))
# elif 'HK' in self.typ:
# pdev= np.empty((c3ds[0],len(self.dHKB)+len(bs),3))
# elif 'TCR' in self.typ:
# pdev= np.empty((c3ds[0],len(self.tcr),3))
# else:
# raise AttributeError('invalid self.typ')
# self.B.network()
# DB = self.B.D2
# ludev = np.array([[i,self.B.dev[i]['uc3d'][0]] for i in self.B.dev])
# for i in ludev:
# pdev[:,eval(i[0])-1,:] = self.B._f[:,i[1],:]
# # self.dist = np.sqrt(np.sum((mpts[:,np.newaxis,:]-mpts[np.newaxis,:])**2,axis=2))
def vlc(self):
""" play video of the associated serie
"""
videofile = os.path.join(self.rootdir,'POST-TREATED', str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = map(lambda x : self._filename in x,ldir)
try:
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
os.system('vlc '+filename +'&' )
except:
raise AttributeError('file '+ self._filename + ' not found')
def snapshot(self,t0=0,offset=15.5,title=True,save=False,fig=[],ax=[],figsize=(10,10)):
""" single snapshot plot
Parameters
----------
t0: float
offset : float
title : boolean
save : boolean
fig
ax
figsize : tuple
"""
if fig ==[]:
fig=plt.figure(figsize=figsize)
if ax == []:
ax = fig.add_subplot(111)
if 'video_sec' in self.offset[self._filename]:
offset = self.offset[self._filename]['video_sec']
elif offset != '':
offset = offset
else:
offset=0
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = map(lambda x : self._filename in x,ldir)
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(t0+offset)
I0 = img_as_ubyte(F0)
ax.imshow(F0)
if title:
ax.set_title('t = '+str(t0)+'s')
if save :
plt.savefig(self._filename +'_'+str(t0) + '_snap.png',format='png')
return fig,ax
def snapshots(self,t0=0,t1=10,offset=15.5):
""" take snapshots
Parameters
----------
t0 : float
t1 : float
"""
if 'video_sec' in self.offset[self._filename]:
offset = self.offset[self._filename]['video_sec']
elif offset != '':
offset = offset
else:
offset=0
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = [ self._filename in x for x in ldir ]
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(t0+offset)
F1 = vc.get_frame(t1+offset)
I0 = img_as_ubyte(F0)
I1 = img_as_ubyte(F1)
plt.subplot(121)
plt.imshow(F0)
plt.title('t = '+str(t0)+'s')
plt.subplot(122)
plt.imshow(F1)
plt.title('t = '+str(t1)+'s')
def _show3(self,**kwargs):
""" mayavi 3d show of scenario
Parameters
----------
L : boolean
display layout (True)
body :boolean
display bodytime(True)
bodyname : boolean
display body name
bodytime: list
list of time instant where body topos has to be shown
devsize : float
device on body size (100)
devlist : list
list of device name to show on body
pattern : boolean
display devices pattern
trajectory : boolean
display trajectory (True)
tagtraj : boolean
tag on trajectory at the 'bodytime' instants (True)
tagname : list
name of the tagtrajs
tagpoffset : ndarray
offset of the tag positions (nb_of_tags x 3)
fontsizetag : float
size of the tag names
inodes : boolean
display infrastructure nodes
inname : boolean
display infra strucutre node name
innamesize : float,
size of name of infrastructure nodes (0.1)
incolor: str
color of infrastructure nodes ('r')
insize
size of infrastructure nodes (0.1)
camera : boolean
display Vicon camera position (True)
cameracolor : str
color of camera nodes ('b')
camerasize : float
size of camera nodes (0.1)
Examples
--------
>>> S = Corser(6)
>>> S._show3()
"""
defaults = { 'L':True,
'body':True,
'bodyname':True,
'subject':[],
'interf':True,
'trajectory' :False,
'trajectory_list' :[],
'devsize':100,
'devlist':[],
'pattern':False,
'inodes' : True,
'inname' : True,
'innamesize' : 0.1,
'incolor' : 'r',
'insize' : 0.1,
'camera':True,
'cameracolor' :'k',
'camerasize' :0.1,
'bodytime':[],
'tagtraj':True,
'tagname':[],
'tagpoffset':[],
'fontsizetag':0.1,
'trajectory_color_range':True,
'trajectory_linewidth':0.01
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
cold = pyu.coldict()
camhex = cold[kwargs['cameracolor']]
cam_color = tuple(pyu.rgb(camhex)/255.)
inhex = cold[kwargs['incolor']]
in_color = tuple(pyu.rgb(inhex)/255.)
if kwargs['subject'] == []:
subject = self.subject
else:
subject = kwargs['subject']
if kwargs['L']:
self.L._show3(opacity=0.5)
v = self.din.items()
if kwargs['inodes']:
X= np.array([v[i][1]['p'] for i in range(len(v))])
mlab.points3d(X[:,0],X[:,1], X[:,2],scale_factor=kwargs['insize'],color=in_color)
if kwargs['pattern']:
for i in range(len(v)):
if not hasattr(self.din[v[i][0]]['ant'],'SqG'):
self.din[v[i][0]]['ant'].eval()
self.din[v[i][0]]['ant']._show3(po=v[i][1]['p'],
T=self.din[v[i][0]]['T'],
ilog=False,
minr=0.01,
maxr=0.2,
newfig=False,
title=False,
colorbar=False,
)
if kwargs['inname']:
[mlab.text3d(v[i][1]['p'][0],
v[i][1]['p'][1],
v[i][1]['p'][2]+v[i][1]['s3off'],
v[i][0],
scale=kwargs['innamesize'],color=in_color) for i in range(len(v))]
if kwargs['body']:
if kwargs['bodytime']==[]:
time =np.linspace(0,self.B[subject[0]].time[-1],5).astype(int)
# time=range(10,100,20)
else :
time=kwargs['bodytime']
for ki, i in enumerate(time):
for ib,b in enumerate(subject):
self.B[b].settopos(t=i,cs=True)
self.B[b]._show3(dev=True,
name = kwargs['bodyname'],
devlist=kwargs['devlist'],
devsize=kwargs['devsize'],
tube_sides=12,
pattern=kwargs['pattern'])
if kwargs['tagtraj']:
X=self.B[b].traj[['x','y','z']].values[self.B[b].toposFrameId]
if kwargs['tagpoffset']==[]:
X[2]=X[2]+0.2
else :
X=X+kwargs['tagpoffset'][ki]
if kwargs['tagname']==[]:
name = 't='+str(i)+'s'
else :
name = str(kwargs['tagname'][ki])
mlab.text3d(X[0],X[1],X[2],name,scale=kwargs['fontsizetag'])
if kwargs['interf']:
for ib,b in enumerate(self.interf):
self.B[b].settopos(t=i,cs=True)
self.B[b]._show3(name=kwargs['bodyname'],tube_sides=12)
if kwargs['trajectory']:
if kwargs['trajectory_list']==[]:
tr_subject = subject
else:
tr_subject = kwargs['trajectory_list']
for b in tr_subject:
self.B[b].traj._show3(color_range=kwargs['trajectory_color_range'],
linewidth=kwargs['trajectory_linewidth'])
if kwargs['camera'] :
mlab.points3d(self.cam[:,0],self.cam[:,1], self.cam[:,2],scale_factor=kwargs['camerasize'],color=cam_color)
mlab.view(-111.44127634143871,
60.40674368088245,
24.492297713984197,
array([-0.07235499, 0.04868631, -0.00314969]))
mlab.view(-128.66519195313163,
50.708933839573511,
24.492297713984247,
np.array([-0.07235499, 0.04868631, -0.00314969]))
def anim(self):
self._show3(body=False,inname=False,trajectory=False)
[self.B[b].anim() for b in self.B]
mlab.view(-43.413544538477254,
74.048193730704611,
11.425837641867618,
array([ 0.48298163, 0.67806043, 0.0987967 ]))
def imshow(self,time=100,kind='time'):
""" DEPRECATED
Parameters
----------
kind : string
'mean','std'
"""
fig = plt.figure(figsize=(10,10))
self.D = self.rssi-self.rssi.swapaxes(0,1)
try:
timeindex = np.where(self.thkb[0]-time>0)[0][0]
except:
timeindex = np.where(self.thkb-time>0)[0][0]
if kind=='time':
dt1 = self.rssi[:,:,timeindex]
dt2 = self.D[:,:,timeindex]
if kind == 'mean':
dt1 = ma.masked_invalid(self.rssi).mean(axis=2)
dt2 = ma.masked_invalid(self.D).mean(axis=2)
if kind == 'std':
dt1 = ma.masked_invalid(self.rssi).std(axis=2)
dt2 = ma.masked_invalid(self.D).std(axis=2)
ax1 = fig.add_subplot(121)
#img1 = ax1.imshow(self.rssi[:,:,timeindex],interpolation='nearest',origin='lower')
img1 = ax1.imshow(dt1,interpolation='nearest')
labels = [ self.idHKB[x] for x in range(1,17)]
plt.xticks(range(16),labels,rotation=80,fontsize=14)
plt.yticks(range(16),labels,fontsize=14)
if kind=='time':
plt.title('t = '+str(time)+ ' s')
if kind=='mean':
plt.title(u'$mean(\mathbf{L})$')
if kind=='std':
plt.title(u'$std(\mathbf{L})$')
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.05)
clb1 = fig.colorbar(img1,cax1)
clb1.set_label('level dBm',fontsize=14)
ax2 = fig.add_subplot(122)
#img2 = ax2.imshow(self.D[:,:,timeindex],interpolation='nearest',origin='lower')
img2 = ax2.imshow(dt2,interpolation='nearest')
plt.title(u'$\mathbf{L}-\mathbf{L}^T$')
divider = make_axes_locatable(ax2)
plt.xticks(range(16),labels,rotation=80,fontsize=14)
plt.yticks(range(16),labels,fontsize=14)
cax2 = divider.append_axes("right", size="5%", pad=0.05)
clb2 = fig.colorbar(img2,cax2)
clb2.set_label('level dBm',fontsize=14)
plt.tight_layout()
plt.show()
#for k in range(1,17):
# for l in range(1,17):
# self.dHKB[(k,l)]=iHKB[k]+' - '+iHKB[l]
# cpt = cpt + 1
return fig,(ax1,ax2)
def lk2nd(self,lk):
""" transcode a lk from Id to real name
Parameters
----------
lk : string
Examples
--------
>>> C=Corser(6)
>>> lk = 'HKB:15-HKB:7'
>>> C.lk2nd(lk)
"""
u = lk.replace('HKB:','').split('-')
v = [ self.idHKB[int(x)] for x in u ]
return(v)
def _load_offset_dict(self):
""" load offset_dictionnary.bin
Returns
-------
d : dict
{'Sc20_S5_R1_HKBS': {'hkb_index': -148, 'video_sec': 32.622087273809527},
'Sc20_S6_R2_HKBS': {'bs_index': -124, 'hkb_index': -157},
'Sc21a_S13_R1_HKBS': {'hkb_index': 537},
'Sc21a_S14_R2_HKBS': {'hkb_index': 752},
'Sc21a_S15_R3_HKBS': {'hkb_index': 438},
'Sc21a_S16_R4_HKBS': {'hkb_index': 224},
'Sc21b_S21_R1_HKBS': {'hkb_index': 368},
'Sc21b_S22_R2_HKBS': {'hkb_index': -333},
'Sc21b_S23_R3_HKBS': {'hkb_index': 136},
'Sc22a_S9_R1_Full': {'hkb_index': 678}}
Notes
-----
This is used for synchronization purpose
"""
path = os.path.join(os.environ['CORMORAN'],'POST-TREATED')
d = pickle.load( open( os.path.join(path,'offset_dictionnary.bin'), "rb" ) )
return d
def _save_offset_dict(self,d):
path = os.path.join(os.environ['CORMORAN'],'POST-TREATED')
d = pickle.dump( d, open( os.path.join(path,'offset_dictionnary.bin'), "wb" ) )
def _save_data_off_dict(self,filename,typ,value):
""" save
- a given "value" of an for,
- a serie/run "filename",
- of a given typ (video|hkb|tcr|...)
"""
d = self._load_offset_dict()
try:
d[filename].update({typ:value})
except:
d[filename]={}
d[filename][typ]=value
self._save_offset_dict(d)
def offset_setter_video(self,a='AP1',b='WristRight',**kwargs):
""" video offset setter
"""
defaults = { 'inverse':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
fig, axs = plt.subplots(nrows=2,ncols=1)
fig.subplots_adjust(bottom=0.3)
if isinstance(a,str):
ia = self.dHKB[a]
else:
ia = a
a = self.idHKB[a]
if isinstance(b,str):
ib = self.dHKB[b]
else:
ib = bq
b = self.idHKB[b]
time = self.thkb
if len(time) == 1:
time=time[0]
sab = self.hkb[a+'-'+b].values
sabt = self.hkb[a+'-'+b].index
hkb = axs[1].plot(sabt,sab,label = a+'-'+b)
axs[1].legend()
try :
init = self.offset[self._filename]['video_sec']
except:
init=time[0]
videofile = os.path.join(self.rootdir,'POST-TREATED',str(self.day)+'-06-2014','Videos')
ldir = os.listdir(videofile)
luldir = [ self._filename in x for x in ldir ]
uldir = luldir.index(True)
_filename = ldir[uldir]
filename = os.path.join(videofile,_filename)
vc = VideoFileClip(filename)
F0 = vc.get_frame(init)
I0 = img_as_ubyte(F0)
axs[0].imshow(F0)
########
# slider
########
slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.05])
sliderx = Slider(slide_xoffset_ax, "video offset", 0, self.hkb.index[-1],
valinit=init, color='#AAAAAA')
# vertc = [(0,-10),(0,-10),(0,10),(0,-10)]
# poly = plt.Polygon(vertc)
# pp = axs[1].add_patch(poly)
def update_x(val):
F0 = vc.get_frame(val)
I0 = img_as_ubyte(F0)
axs[0].imshow(F0)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
# def cursor(val):
# try :
# pp.remove()
# except:
# pass
# vertc = [(sabt[0]+val,min(sab)-10),(sabt[0]+val,min(sab)-10),(sabt[0]+val,max(sab)+10),(sabt[0]+val,max(sab)-10)]
# poly = plt.Polygon(vertc)
# pp = axs[1].add_patch(poly)
# sliderx.on_changed(cursor)
def plus(event):
sliderx.set_val(sliderx.val +0.2)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def minus(event):
sliderx.set_val(sliderx.val -0.2)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
def setter(event):
self._save_data_off_dict(self._filename,'video_sec',sliderx.val)
self.offset= self._load_offset_dict()
axp = plt.axes([0.3, 0.05, 0.1, 0.075])
axset = plt.axes([0.5, 0.05, 0.1, 0.075])
axm = plt.axes([0.7, 0.05, 0.1, 0.075])
bp = Button(axp, '<-')
bp.on_clicked(minus)
bset = Button(axset, 'SET offs.')
bset.on_clicked(setter)
bm = Button(axm, '->')
bm.on_clicked(plus)
plt.show()
def offset_setter(self,a='HKB:1',b='HKB:12',techno='',**kwargs):
""" offset setter
"""
defaults = { 'inverse':True
}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if plt.isinteractive():
interactive = True
plt.ioff()
else :
interactive = False
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2, left=0.3)
a,ia,bia,subja,techno=self.devmapper(a,techno)
b,ib,bib,subjb,techno=self.devmapper(b,techno)
time = self.tmocap
if len(time.shape) == 2:
time = time[0,:]
try :
init = time[0]#self.offset[self._filename]['hkb_index']
except:
init=time[0]
var = self.getlinkd(ia,ib,techno).values
if kwargs['inverse']:
var = 10*np.log10(1./(var)**2)
gt = ax.plot(time,var)
ab = self.getlink(ia,ib,techno)
sab = ab.values
sabt = ab.index.values
technoval = ax.plot(sabt,sab)
########
# slider
########
slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.02])
sliderx = Slider(slide_xoffset_ax, techno + " offset", -(len(sabt)/16), (len(sabt)/16),
valinit=init, color='#AAAAAA')
slide_yoffset_ax = plt.axes([0.1, 0.10, 0.8, 0.02])
slidery = Slider(slide_yoffset_ax, "gt_yoff", -100, 0,
valinit=0, color='#AAAAAA')
slide_alpha_ax = plt.axes([0.1, 0.05, 0.8, 0.02])
slideralpha = Slider(slide_alpha_ax, "gt_alpha", 0, 60,
valinit=30, color='#AAAAAA')
def update_x(val):
value = int(sliderx.val)
rtechnoval = np.roll(sab,value)
sliderx.valtext.set_text('{}'.format(value))
technoval[0].set_xdata(sabt)
technoval[0].set_ydata(rtechnoval)
fig.canvas.draw_idle()
sliderx.on_changed(update_x)
sliderx.drawon = False
def update_y(val):
yoff = slidery.val
alpha = slideralpha.val
gt[0].set_ydata(alpha*var + yoff)
fig.canvas.draw_idle()
#initpurpose
update_y(5)
slidery.on_changed(update_y)
slideralpha.on_changed(update_y)
def setter(event):
value = int(sliderx.val)
try :
nval = self.offset[self._filename][techno.lower()+'_index'] + value
except :
nval = value
self._save_data_off_dict(self._filename,techno.lower()+'_index',nval)
self.offset= self._load_offset_dict()
ax.set_title('WARNING : Please Reload serie to Valide offset change',color='r',weight='bold')
axset = plt.axes([0.0, 0.5, 0.2, 0.05])
bset = Button(axset, 'SET ' +techno+' offs.')
bset.on_clicked(setter)
plt.show()
if interactive :
plt.ion()
# def offset_setter_hkb(self,a='AP1',b='WristRight',**kwargs):
# """ offset setter
# """
# defaults = { 'inverse':True
# }
# for k in defaults:
# if k not in kwargs:
# kwargs[k] = defaults[k]
# if plt.isinteractive():
# interactive = True
# plt.ioff()
# else :
# interactive = False
# fig, ax = plt.subplots()
# fig.subplots_adjust(bottom=0.2, left=0.3)
# a,ia,bia,subja,techno=self.devmapper(a,'HKB')
# b,ib,bib,subjb,techno=self.devmapper(b,'HKB')
# time = self.thkb
# if len(time.shape) == 2:
# time = time[0,:]
# try :
# init = time[0]#self.offset[self._filename]['hkb_index']
# except:
# init=time[0]
# var = self.getlinkd(ia,ib,'HKB').values
# if kwargs['inverse']:
# var = 10*np.log10(1./(var)**2)
# gt = ax.plot(self.B[self.B.keys()[0]].time,var)
# sab = self.hkb[a+'-'+b].values
# sabt = self.hkb[a+'-'+b].index
# hkb = ax.plot(sabt,sab)
# ########
# # slider
# ########
# slide_xoffset_ax = plt.axes([0.1, 0.15, 0.8, 0.02])
# sliderx = Slider(slide_xoffset_ax, "hkb offset", -(len(sabt)/16), (len(sabt)/16),
# valinit=init, color='#AAAAAA')
# slide_yoffset_ax = plt.axes([0.1, 0.10, 0.8, 0.02])
# slidery = Slider(slide_yoffset_ax, "gt_yoff", -100, 0,
# valinit=0, color='#AAAAAA')
# slide_alpha_ax = plt.axes([0.1, 0.05, 0.8, 0.02])
# slideralpha = Slider(slide_alpha_ax, "gt_alpha", 0, 10,
# valinit=5, color='#AAAAAA')
# def update_x(val):
# value = int(sliderx.val)
# rhkb = np.roll(sab,value)
# sliderx.valtext.set_text('{}'.format(value))
# hkb[0].set_xdata(sabt)
# hkb[0].set_ydata(rhkb)
# fig.canvas.draw_idle()
# sliderx.on_changed(update_x)
# sliderx.drawon = False
# def update_y(val):
# yoff = slidery.val
# alpha = slideralpha.val
# gt[0].set_ydata(alpha*var + yoff)
# fig.canvas.draw_idle()
# #initpurpose
# update_y(5)
# slidery.on_changed(update_y)
# slideralpha.on_changed(update_y)
# def setter(event):
# value = int(sliderx.val)
# try :
# nval = self.offset[self._filename]['hkb_index'] + value
# except :
# nval = value
# self._save_data_off_dict(self._filename,'hkb_index',nval)
# self.offset= self._load_offset_dict()
# ax.set_title('WARNING : Please Reload serie to Valide offset change',color='r',weight='bold')
# axset = plt.axes([0.0, 0.5, 0.2, 0.05])
# bset = Button(axset, 'SET offs.')
# bset.on_clicked(setter)
# plt.show()
# if interactive:
# plt.ion()
def mtlbsave(self):
""" Matlab format save
S{day}_{serie}
node_name
node_place
node_coord
HKB.{linkname}.tr
HKB.{linkname}.rssi
HKB.{linkname}.td
HKB.{linkname}.dist
HKB.{linkname}.sh
HKB.{linkname}.dsh
TCR.{linkname}.tr
HKB.{linkname}.range
HKB.{linkname}.td
HKB.{linkname}.dist
HKB.{linkname}.sh
"""
key = 'S'+str(self.day)+'_'+str(self.serie)
filemat = key+'.mat'
d = {}
d[key]={}
d[key]['node_name'] = self.dist_nodesmap
d[key]['node_place'] = [ self.devmapper(x)[0] for x in self.dist_nodesmap ]
d[key]['node_coord'] = self.points
for subject in self.interf:
sub = subject.replace(':','')
d[key][sub]=np.mean(self.B[subject].d,axis=1)
if ('HKB' in self.typ.upper()) or ('FULL' in self.typ.upper()):
d[key]['HKB']={}
links = list(self.hkb.columns)
inter,lks = self.compute_visibility(techno='HKB')
for l in links:
ls = l.split('-')
nl = ls[0]+'_'+ls[1]
nl=nl.replace('Jihad','J').replace('Nicolas','N').replace('Eric','E')
d[key]['HKB'][nl] = {}
ix0 = np.where(lks==ls[0])[0]
ix1 =
|
np.where(lks==ls[1])
|
numpy.where
|
from collections import Counter
import os
import sys; sys.path.append('./../../')
import pickle
import numpy as np
import pandas as pd
import networkx as nx
import scipy.stats as st
import multiprocessing as mp
from pathlib import Path
from src.Tree import TreeNode
from src.utils import load_pickle
from src.graph_stats import GraphStats
from src.graph_comparison import GraphPairCompare
def load_df(path):
for subdir, dirs, files in os.walk(path):
for filename in files:
if 'csv' in filename:
print(f'\tloading {subdir} {filename} ... ', end='', flush=True)
df = pd.read_csv(os.path.join(subdir, filename), sep=','), filename
print('done')
yield df
def compute_stats(densities):
#padding = max(len(l) for l in js)
#for idx, l in enumerate(js):
# while len(js[idx]) < padding:
# js[idx] += [np.NaN]
for idx, l in enumerate(densities):
if l == []:
densities[idx] = [0]
print(densities)
mean = np.nanmean(densities, axis=0)
ci = []
for row in np.asarray(densities).T:
ci.append(st.t.interval(0.95, len(row)-1, loc=np.mean(row), scale=st.sem(row)))
return np.asarray(mean),
|
np.asarray(ci)
|
numpy.asarray
|
#############################################################################################################
################################################## IMPORTS ##################################################
#############################################################################################################
from tensorflow.keras.applications.resnet_v2 import ResNet50V2, ResNet152V2, preprocess_input, decode_predictions
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam, SGD, RMSprop, Nadam
from tensorflow.keras.losses import CategoricalCrossentropy, SparseCategoricalCrossentropy, BinaryCrossentropy
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout
from tensorflow.keras.utils import to_categorical
from sklearn.utils.class_weight import compute_class_weight
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report, confusion_matrix
from keras import backend as K
from random import shuffle
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os, math
import getpass
# from trains import Task
# task = Task.init(project_name="DL_CNN_Final_Project", task_name="Overfitting_Model")
# logger = task.get_logger()
##############################################################################################################
################################################## SETTINGS ##################################################
##############################################################################################################
classes = [ 'Animals',
'Buildings',
'Carts',
'Children',
'Corpses',
'German Symbols',
'Gravestones',
'Railroad cars',
'Signs',
'Snow',
"Uniforms",
"Vehicles",
"Views",
'Weapons',
'Women',
]
classes = sorted(classes)
IM_WIDTH, IM_HEIGHT = 224, 224
EPOCHS = 30
BATCH_SIZE = 64*8
FC_SIZE = 2048
NUM_CLASSES = len(classes)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
if getpass.getuser() == 'assafsh':
train_directory = "/mnt/data/Storage/yad-vashem-dataset/data/train"
validation_directory = "/mnt/data/Storage/yad-vashem-dataset/data/validation"
test_directory = "/mnt/data/Storage/yad-vashem-dataset/data/test"
else:
train_directory = os.path.join(BASE_DIR, "data/train")
validation_directory = os.path.join(BASE_DIR, "data/validation")
test_directory = os.path.join(BASE_DIR, "data/test")
###############################################################################################################
################################################## FUNCTIONS ##################################################
###############################################################################################################
def categorical_focal_loss(alpha, gamma=2.):
"""
Softmax version of focal loss.
When there is a skew between different categories/labels in your data set, you can try to apply this function as a
loss.
m
FL = ∑ -alpha * (1 - p_o,c)^gamma * y_o,c * log(p_o,c)
c=1
where m = number of classes, c = class and o = observation
Parameters:
alpha -- the same as weighing factor in balanced cross entropy. Alpha is used to specify the weight of different
categories/labels, the size of the array needs to be consistent with the number of classes.
gamma -- focusing parameter for modulating factor (1-p)
Default value:
gamma -- 2.0 as mentioned in the paper
alpha -- 0.25 as mentioned in the paper
References:
Official paper: https://arxiv.org/pdf/1708.02002.pdf
https://www.tensorflow.org/api_docs/python/tf/keras/backend/categorical_crossentropy
Usage:
model.compile(loss=[categorical_focal_loss(alpha=[[.25, .25, .25]], gamma=2)], metrics=["accuracy"], optimizer=adam)
"""
alpha = np.array(alpha, dtype=np.float32)
def categorical_focal_loss_fixed(y_true, y_pred):
"""
:param y_true: A tensor of the same shape as `y_pred`
:param y_pred: A tensor resulting from a softmax
:return: Output tensor.
"""
# Clip the prediction value to prevent NaN's and Inf's
epsilon = K.epsilon()
y_pred = K.clip(y_pred, epsilon, 1. - epsilon)
# Calculate Cross Entropy
cross_entropy = -y_true * K.log(y_pred)
# Calculate Focal Loss
loss = alpha * K.pow(1 - y_pred, gamma) * cross_entropy
# Compute mean loss in mini_batch
return K.mean(K.sum(loss, axis=-1))
return categorical_focal_loss_fixed
def generators():
'''
This function creates a generator for the dataset - generator for train, generator for validation and generator for test
For each image in the dataset an augmentation is being executed
'''
# Set train and test data generators
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rescale=1./255,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(
rescale=1./255
)
# Get images from train directory and insert into generator
train_generator = train_datagen.flow_from_directory(
train_directory,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
)
# Get images from validation directory and insert into generator
validation_generator = test_datagen.flow_from_directory(
validation_directory,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
)
# Get images from test directory and insert into generator
test_generator = test_datagen.flow_from_directory(
test_directory,
target_size=(IM_WIDTH, IM_HEIGHT),
batch_size=BATCH_SIZE,
shuffle=True,
)
return train_generator, validation_generator, test_generator
''' End function '''
def generate_class_weights(train_generator):
'''
Input:
Output:
'''
labels_dict = {
'Animals': 1559,
'Buildings':9052,
'Carts': 1540,
'Children': 16525,
'Corpses': 4606,
"German Symbols": 2476,
'Gravestones': 5648,
'Railroad cars': 1018,
'Signs': 2038,
'Snow': 1716,
"Uniforms": 12356,
"Vehicles": 3036,
"Views": 8776,
'Weapons': 1260,
'Women': 27642
}
class_weights_dict = dict()
total_samples = sum(labels_dict.values())
mu = 0.15
for key in labels_dict.keys():
score = math.log(mu * total_samples / float(labels_dict[key]))
class_weights_dict[classes.index(key)] = score if score > 1.0 else 1.0
print(class_weights_dict)
return class_weights_dict
''' End function '''
def create_classifier(base_model):
'''
Creates new classifiers based on ResNet50
'''
# Add global average pooling and 2 FC for fine tuning
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(FC_SIZE, activation='relu')(x)
x = Dense(FC_SIZE//2, activation='relu')(x)
predictions = Dense(NUM_CLASSES, activation='softmax')(x)
# Create the model
model = Model(base_model.input, predictions)
return model
''' End function '''
def fit_predict_overfitting(classifier, number, class_weight_dict):
'''
Input: classifier
Output: train on 80 images per class, validate on 10 and test on 10.
'''
train_df, validation_df, test_df = [], [], []
categories_path_train, categories_path_validation, categories_path_test = [], [], []
for category in classes:
if ' ' in category:
category = category.replace(" ", "_")
categories_path_train.append(os.path.join(train_directory, category))
categories_path_validation.append(os.path.join(validation_directory, category))
categories_path_test.append(os.path.join(test_directory, category))
for class_num, path in enumerate(categories_path_train):
dir_path = os.listdir(path)
for i, child in enumerate(dir_path):
if i == 80:
break
img = load_img(os.path.join(path, child), target_size=(IM_HEIGHT, IM_WIDTH, 3))
x = img_to_array(img)
train_df.append([x, class_num])
for class_num, path in enumerate(categories_path_validation):
dir_path = os.listdir(path)
for i, child in enumerate(dir_path):
if i == 10:
break
img = load_img(os.path.join(path, child), target_size=(IM_HEIGHT, IM_WIDTH, 3))
x = img_to_array(img)
validation_df.append([x, class_num])
for class_num, path in enumerate(categories_path_test):
dir_path = os.listdir(path)
for i, child in enumerate(dir_path):
if i == 10:
break
img = load_img(os.path.join(path, child), target_size=(IM_HEIGHT, IM_WIDTH, 3))
x = img_to_array(img)
test_df.append([x, class_num])
shuffle(train_df)
shuffle(validation_df)
shuffle(test_df)
X_train, X_validation, X_test = [], [], []
Y_train, Y_validation, Y_test = [], [], []
for image, label in train_df:
X_train.append(image)
Y_train.append(label)
for image, label in validation_df:
X_validation.append(image)
Y_validation.append(label)
for image, label in test_df:
X_test.append(image)
Y_test.append(label)
X_train = np.array(X_train) / 255.0
Y_train = np.array(Y_train)
X_validation = np.array(X_validation) / 255.0
Y_validation = np.array(Y_validation)
X_test =
|
np.array(X_test)
|
numpy.array
|
"""
Copyright (c) 2019 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pytest
import re
import numpy as np
from accuracy_checker.config import ConfigError
from accuracy_checker.launcher.input_feeder import InputFeeder
from accuracy_checker.data_readers import DataRepresentation
# InputInfo from openvino is needed here, but there is no appropriate API
# to create InputInfo with specific shape, therefore lets use analog
class InputInfo_test:
layout = ''
precision = ''
shape = []
def __init__(self, layout = '', precision = '', shape = []):
self.layout = layout
self.precision = precision
self.shape = shape
class TestInputFeeder:
def test_create_input_feeder_without_inputs_raise_config_error(self):
with pytest.raises(ConfigError):
InputFeeder([], {})
def test_create_input_feeder_with_config_inputs_and_empty_network_inputs_raise_config_error(self):
with pytest.raises(ConfigError):
InputFeeder([{'name': 'const_data', 'type': 'CONST_INPUT', 'value': '[1, 1, 1, 1]'}], {})
def test_create_input_feeder_with_config_const_inputs_not_in_network_inputs_raise_config_error(self):
with pytest.raises(ConfigError):
InputFeeder([{'name': 'const_data', 'type': 'CONST_INPUT', 'value': '[1, 1, 1, 1]'}], {'data': (1, 3, 10, 10)})
def test_create_input_feeder_with_config_inputs_not_in_network_inputs_raise_config_error(self):
with pytest.raises(ConfigError):
InputFeeder([{'name': 'data2', 'type': 'INPUT', 'value': '.'}], {'data': (1, 3, 10, 10)})
def test_create_input_feeder_without_config_inputs(self):
input_feeder = InputFeeder([], {'data': (1, 3, 10, 10)})
assert not input_feeder.const_inputs
assert not input_feeder.inputs_mapping
assert input_feeder.non_constant_inputs == ['data']
def test_create_input_feeder_config_inputs_fully_match_to_network_inputs(self):
input_feeder = InputFeeder([{'name': 'data', 'type': 'INPUT', 'value': '.'}], {'data': (1, 3, 10, 10)})
assert not input_feeder.const_inputs
assert input_feeder.inputs_mapping == {'data': re.compile('.')}
assert input_feeder.non_constant_inputs == ['data']
def test_create_input_feeder_config_inputs_contain_only_const_inputs_with_list_value(self):
input_feeder = InputFeeder([{'name': 'const_data', 'type': 'CONST_INPUT', 'value': [1, 1, 1, 1]}], {'data': (1, 3, 10, 10), 'const_data': (1, 4)})
assert np.array_equal(input_feeder.const_inputs['const_data'], np.ones(4))
assert not input_feeder.inputs_mapping
assert input_feeder.non_constant_inputs == ['data']
def test_create_input_feeder_config_inputs_contain_only_const_inputs_with_not_list_value(self):
input_feeder = InputFeeder(
[{'name': 'const_data', 'type': 'CONST_INPUT', 'value': 'value'}],
{'data': (1, 3, 10, 10), 'const_data': (1, 4)}
)
assert input_feeder.const_inputs['const_data'] == 'value'
assert not input_feeder.inputs_mapping
assert input_feeder.non_constant_inputs == ['data']
def test_create_input_feeder_not_all_non_constant_inputs_in_config_raise_config_error(self):
with pytest.raises(ConfigError):
InputFeeder(
[{'name': '0', 'type': 'INPUT', 'value': '.'}],
{'0': (1, 3, 10, 10), '1': (1, 3, 10, 10)}
)
def test_fill_non_constant_input_with_one_input_without_specific_mapping_batch_1(self):
input_feeder = InputFeeder([], { 'input': InputInfo_test(shape=(1, 3, 10, 10)) })
result = input_feeder.fill_non_constant_inputs([DataRepresentation(np.zeros((10, 10, 3)), identifier='0')])[0]
expected_data = np.zeros((1, 3, 10, 10))
assert 'input' in result
assert np.array_equal(result['input'], expected_data)
def test_fill_non_constant_input_without_specific_mapping_batch_2(self):
input_feeder = InputFeeder([], { 'input': InputInfo_test(shape=(1, 3, 10, 10))})
result = input_feeder.fill_non_constant_inputs([
DataRepresentation(np.zeros((10, 10, 3)), identifier='0'),
DataRepresentation(np.zeros((10, 10, 3)), identifier='1')
])[0]
expected_data = np.zeros((2, 3, 10, 10))
assert 'input' in result
assert np.array_equal(result['input'], expected_data)
def test_fill_non_constant_input_with_specific_mapping_batch_1(self):
input_feeder = InputFeeder([{'name': 'input', 'type': 'INPUT', 'value': '.'}], {'input': InputInfo_test(shape=(1, 3, 10, 10))})
result = input_feeder.fill_non_constant_inputs([DataRepresentation(np.zeros((10, 10, 3)), identifier='0')])[0]
expected_data = np.zeros((1, 3, 10, 10))
assert 'input' in result
assert np.array_equal(result['input'], expected_data)
def test_fill_non_constant_input_with_specific_mapping_sevaral_image_matched(self):
input_feeder = InputFeeder([{'name': 'input', 'type': 'INPUT', 'value': '.'}], {'input': InputInfo_test(shape=(1, 3, 10, 10))})
result = input_feeder.fill_non_constant_inputs([DataRepresentation([np.zeros((10, 10, 3)), np.ones((10, 10, 3))], identifier=['0', '1'])])[0]
expected_data = np.zeros((1, 3, 10, 10))
assert 'input' in result
assert np.array_equal(result['input'], expected_data)
def test_fill_non_constant_input_with_specific_mapping_not_match_raise_config_error(self):
input_feeder = InputFeeder([{'name': 'input', 'type': 'INPUT', 'value': '1.'}], {'input': InputInfo_test(shape=(1, 3, 10, 10))})
with pytest.raises(ConfigError):
input_feeder.fill_non_constant_inputs([DataRepresentation(np.zeros((10, 10, 3)), identifier='0')])
def test_fill_non_constant_input_with_specific_mapping_batch_2(self):
input_feeder = InputFeeder([{'name': 'input', 'type': 'INPUT', 'value': '.'}], {'input': InputInfo_test(shape=(1, 3, 10, 10))})
result = input_feeder.fill_non_constant_inputs([
DataRepresentation(np.zeros((10, 10, 3)), identifier='0'),
DataRepresentation(np.zeros((10, 10, 3)), identifier='1')
])[0]
expected_data = np.zeros((2, 3, 10, 10))
assert 'input' in result
assert np.array_equal(result['input'], expected_data)
def test_fill_non_constant_input_with_specific_mapping_not_all_image_in_batch_matched_raise_config_error(self):
input_feeder = InputFeeder([{'name': 'input', 'type': 'INPUT', 'value': '0+'}], {'input': InputInfo_test(shape=(1, 3, 10, 10))})
with pytest.raises(ConfigError):
input_feeder.fill_non_constant_inputs([
DataRepresentation(np.zeros((10, 10, 3)), identifier='0'),
DataRepresentation(np.zeros((10, 10, 3)), identifier='1')
])
def test_fill_non_constant_inputs_without_specific_mapping_batch_1(self):
input_feeder = InputFeeder([], { 'input1': InputInfo_test(shape=(1, 3, 10, 10)), 'input2': InputInfo_test(shape=(1, 3, 10, 10))})
result = input_feeder.fill_non_constant_inputs([DataRepresentation(np.zeros((10, 10, 3)), identifier='0')])[0]
expected_data = np.zeros((1, 3, 10, 10))
assert 'input1' in result
assert np.array_equal(result['input1'], expected_data)
assert 'input2' in result
assert
|
np.array_equal(result['input2'], expected_data)
|
numpy.array_equal
|
import sys
import os
import time
import telnetlib
import multiprocessing
import numpy as np
import logging
import logging.handlers
import numpy.core._methods
import numpy.lib.format
class hp4195a(multiprocessing.Process):
def __init__(self, command_queue, message_queue, data_queue, logger_queue):
super(hp4195a, self).__init__()
self.command_queue = command_queue
self.message_queue = message_queue
self.data_queue = data_queue
self.logging_queue = logger_queue
self.mag_data = []
self.phase_data = []
self.freq_data = []
self.host = 'bi-gpib-01.dyndns.cern.ch'
self.port = '1234'
self.gpib_addr = 11
self.telnet_id = 'Prologix GPIB-ETHERNET Controller version 01.06.06.00'
self.device_id = 'HP4195A'
def run(self):
'''
This function will run when the class is launched as a separate
process.
'''
self.qh = logging.handlers.QueueHandler(self.logging_queue)
self.root = logging.getLogger()
self.root.setLevel(logging.DEBUG)
self.root.addHandler(self.qh)
self.logger = logging.getLogger(__name__)
while True:
self.command = self.command_queue.get()
self.logger.info('Received \"{}\" from GUI'.format(self.command))
self.logger.info('Command queue size = {}'.format(self.command_queue.qsize()))
if self.command == 'connect':
self.logger.info('Connecting to HP4195A')
self.telnet_connect()
elif self.command == 'disconnect':
self.logger.info('Disconnecting from HP4195A')
self.telnet_disconnect()
elif self.command == 'start_acquisition':
self.logger.info('Starting data acquisition')
if self.acquire_mag_data():
if self.acquire_phase_data():
if self.acquire_freq_data():
self.logger.info('Acquired data OK')
else:
self.logger.warning('Frequency data acquisition failed')
self.message_queue.put(False)
else:
self.logger.warning('Phase data acquisition failed')
self.message_queue.put(False)
else:
self.logger.warning('Magnitude data acquisition failed')
self.message_queue.put(False)
mag_check = len(self.mag_data) == len(self.freq_data)
phase_check = len(self.phase_data) == len(self.freq_data)
if mag_check and phase_check:
self.logger.info('Data length check passed ({}, {}, {})'.format(len(self.mag_data),len(self.phase_data),len(self.freq_data)))
self.message_queue.put(True)
self.data_queue.put(self.mag_data)
self.data_queue.put(self.phase_data)
self.data_queue.put(self.freq_data)
self.mag_data = []
self.phase_data = []
self.freq_data = []
else:
self.logger.warning('Data length check failed ({}, {}, {})'.format(len(self.mag_data),len(self.phase_data),len(self.freq_data)))
self.message_queue.put(False)
elif self.command == 'send_command':
self.command = self.command_queue.get()
self.logger.info('Sending GPIB command: {}'.format(self.command))
self.response = self.send_query(self.command)
self.logger.info('Response: {}'.format(self.response))
self.data_queue.put(self.response)
def telnet_connect(self):
self.logger.info('Starting Telnet communications')
self.tn = telnetlib.Telnet(self.host, self.port)
if self.send_query('++ver') == self.telnet_id:
self.logger.info('Successfully established connection with {}'.format(self.telnet_id))
self.init_device()
else:
self.tn.close()
self.logger.warning('Failed to setup Telnet communications')
self.message_queue.put(False)
def telnet_disconnect(self):
self.logger.info('Disconnecting Telnet connection')
self.tn.close()
self.message_queue.put(True)
def init_device(self):
self.logger.info('Querying HP4195A')
if self.send_query('ID?') == self.device_id:
self.logger.info('Successfully found {}'.format(self.device_id))
self.logger.info('Initialising HP4195A')
self.send_command('++auto 1')
self.message_queue.put(True)
else:
self.tn.close()
self.logger.warning('Error unrecognised device')
self.message_queue.put(False)
def acquire_mag_data(self):
raw_mag_data = self.send_query('A?')
mag_data = np.fromstring(raw_mag_data, dtype=float, sep=',')
if len(mag_data) > 0:
self.mag_data = mag_data
return True
def acquire_phase_data(self):
raw_phase_data = self.send_query('B?')
phase_data =
|
np.fromstring(raw_phase_data, dtype=float, sep=',')
|
numpy.fromstring
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 22 2021
@author: <NAME>
"""
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
from environment import Environment
from agent import Agent
from pg import PolicyNet
from utils import *
# Define Environments and Agents
gamma = 0.8
train_environment = Environment(display=False, magnification=500, id=' Train')
train_agent = Agent(train_environment, gamma=gamma)
test_environment = Environment(display=False, magnification=500, id=' Test')
test_agent = Agent(test_environment, gamma=gamma)
policy = PolicyNet(2, 256, train_agent.environment.num_actions)
num_episodes = 200
num_episode_steps = 20
# Model Parameters
lr = 0.002
optimiser = optim.Adam(policy.parameters(), lr=lr)
print_every_episode = 1
lookback = 5
train_rewards = []
test_rewards = []
for episode in range(1, num_episodes + 1):
# Training
loss, train_reward, train_trace = train(train_agent, policy, optimiser, gamma, num_episode_steps)
train_agent.environment.plot_trace(train_trace)
# Validation
test_reward, test_trace = evaluate(test_agent, policy, num_episode_steps)
test_agent.environment.plot_trace(test_trace)
train_rewards.append(train_reward)
test_rewards.append(test_reward)
mean_train_rewards = np.mean(train_rewards[-lookback:])
mean_test_rewards =
|
np.mean(test_rewards[-lookback:])
|
numpy.mean
|
from typing import Tuple, Union, Optional, Any, Dict, Type
import gym
import numpy as np
import torch as th
from stable_baselines3.common.type_aliases import Schedule, GymEnv
from torch import nn
from torch.nn import functional as F
from gym import spaces
from stable_baselines3 import PPO as SB3PPO
from stable_baselines3.common.callbacks import BaseCallback
from stable_baselines3.common.policies import ActorCriticPolicy
from stable_baselines3.common.utils import obs_as_tensor, explained_variance
from stable_baselines3.common.vec_env import VecEnv
from yacht.agents.buffers import StudentRolloutBuffer, SupervisedRolloutBuffer
from yacht.agents.modules.torch_layers import SupervisedMlpExtractor
class PPO(SB3PPO):
def train(self) -> None:
super().train()
self.logger.dump()
# TODO: Merge SupervisedPPO with StudentPPO.
# TODO: Fork stable_baselines3 and move implementation there.
class SupervisedPPO(SB3PPO):
class SupervisedActorCriticPolicy(ActorCriticPolicy):
def __init__(self, *args, **kwargs):
num_labels = kwargs.pop('num_labels')
assert num_labels is not None
self.num_labels = num_labels
self.supervised_labels_net = None
super().__init__(*args, **kwargs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = SupervisedMlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
super()._build(lr_schedule)
latent_dim_supervised = self.mlp_extractor.latent_dim_supervised
self.supervised_labels_net = nn.Sequential(
nn.Linear(latent_dim_supervised, self.num_labels),
nn.Sigmoid()
)
def forward(
self,
obs: th.Tensor,
deterministic: bool = False
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
latent_pi, latent_vf, latent_sde, latent_supervised = self._get_latent(obs)
supervised_predictions = self.supervised_labels_net(latent_supervised)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob, supervised_predictions
def evaluate_actions(
self,
obs: th.Tensor,
actions: th.Tensor
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
latent_pi, latent_vf, latent_sde, latent_supervised = self._get_latent(obs)
supervised_predictions = self.supervised_labels_net(latent_supervised)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy(), supervised_predictions
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
latent_pi, _, latent_sde, _ = self._get_latent(observation)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution.get_actions(deterministic=deterministic)
def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor, th.Tensor]:
"""
Get the latent code (i.e., activations of the last layer of each network)
for the different networks.
:param obs: Observation
:return: Latent codes
for the actor, the value function and for gSDE function
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf, latent_supervised = self.mlp_extractor(features)
# Features for sde
latent_sde = latent_pi
if self.sde_features_extractor is not None:
latent_sde = self.sde_features_extractor(features)
return latent_pi, latent_vf, latent_sde, latent_supervised
def __init__(
self,
# Policy is not used, but it is kept for interface compatibility.
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: Optional[int] = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
supervised_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
num_labels: int = 1
):
if policy_kwargs is not None:
policy_kwargs['num_labels'] = num_labels
self.num_labels = num_labels
self.supervised_coef = supervised_coef
super().__init__(
policy=self.SupervisedActorCriticPolicy,
env=env,
learning_rate=learning_rate,
n_steps=n_steps,
batch_size=batch_size,
n_epochs=n_epochs,
gamma=gamma,
gae_lambda=gae_lambda,
clip_range=clip_range,
clip_range_vf=clip_range_vf,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
target_kl=target_kl,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=_init_setup_model,
)
def _setup_model(self):
super()._setup_model()
self.rollout_buffer = SupervisedRolloutBuffer(
buffer_size=self.n_steps,
observation_space=self.observation_space,
action_space=self.action_space,
device=self.device,
gamma=self.gamma,
gae_lambda=self.gae_lambda,
n_envs=self.n_envs,
num_labels=self.num_labels
)
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
rollout_buffer: SupervisedRolloutBuffer,
n_rollout_steps: int,
) -> bool:
"""
Collect experiences using the current policy and fill a ``RolloutBuffer``.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param rollout_buffer: Buffer to fill with rollouts
:param n_steps: Number of experiences to collect per environment
:return: True if function returned with at least `n_rollout_steps`
collected, False if callback terminated rollout prematurely.
"""
assert self._last_obs is not None, "No previous observation was provided"
n_steps = 0
rollout_buffer.reset()
# Sample new weights for the state dependent exploration
if self.use_sde:
self.policy.reset_noise(env.num_envs)
callback.on_rollout_start()
while n_steps < n_rollout_steps:
if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.policy.reset_noise(env.num_envs)
with th.no_grad():
# Convert to pytorch tensor or to TensorDict
obs_tensor = obs_as_tensor(self._last_obs, self.device)
actions, values, log_probs, _ = self.policy.forward(obs_tensor)
actions = actions.cpu().numpy()
# Rescale and perform action
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)
new_obs, rewards, dones, infos = env.step(clipped_actions)
self.num_timesteps += env.num_envs
# Give access to local variables
callback.update_locals(locals())
if callback.on_step() is False:
return False
self._update_info_buffer(infos)
n_steps += 1
if isinstance(self.action_space, gym.spaces.Discrete):
# Reshape in case of discrete action
actions = actions.reshape(-1, 1)
labels = np.array([info['label'] for info in infos], dtype=np.float32)
rollout_buffer.add(
self._last_obs,
actions,
rewards,
self._last_episode_starts,
values,
log_probs,
labels
)
self._last_obs = new_obs
self._last_episode_starts = dones
with th.no_grad():
# Compute value for the last timestep
obs_tensor = obs_as_tensor(new_obs, self.device)
_, values, _, _ = self.policy.forward(obs_tensor)
rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)
callback.on_rollout_end()
return True
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
supervised_losses = []
continue_training = True
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
# TODO: investigate why there is no issue with the gradient
# if that line is commented (as in SAC)
if self.use_sde:
self.policy.reset_noise(self.batch_size)
values, log_prob, entropy, predictions = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
# Supervised loss
supervised_loss = F.binary_cross_entropy(
predictions,
rollout_data.labels,
reduction='mean'
)
supervised_losses.append(supervised_loss.item())
loss = \
policy_loss + \
self.ent_coef * entropy_loss + \
self.vf_coef * value_loss + \
self.supervised_coef * supervised_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/supervised_loss", np.mean(supervised_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))
self.logger.record("train/clip_fraction", np.mean(clip_fractions))
self.logger.record("train/loss", loss.item())
self.logger.record("train/explained_variance", explained_var)
if hasattr(self.policy, "log_std"):
self.logger.record("train/std", th.exp(self.policy.log_std).mean().item())
self.logger.record("train/n_updates", self._n_updates, exclude="tensorboard")
self.logger.record("train/clip_range", clip_range)
if self.clip_range_vf is not None:
self.logger.record("train/clip_range_vf", clip_range_vf)
self.logger.dump()
class StudentPPO(PPO):
class StudentActorCriticPolicy(ActorCriticPolicy):
def forward(
self, obs: th.Tensor, deterministic: bool = False
) -> Tuple[th.Tensor, th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value, log probability of the action, probabilities of the action
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
probabilities = th.cat([th.unsqueeze(d.probs, dim=-1) for d in distribution.distribution], dim=-1)
return actions, values, log_prob, probabilities
def __init__(
self,
# Policy is not used, but it is kept for interface compatibility.
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[GymEnv, str],
learning_rate: Union[float, Schedule] = 3e-4,
n_steps: int = 2048,
batch_size: Optional[int] = 64,
n_epochs: int = 10,
gamma: float = 0.99,
gae_lambda: float = 0.95,
clip_range: Union[float, Schedule] = 0.2,
clip_range_vf: Union[None, float, Schedule] = None,
ent_coef: float = 0.0,
vf_coef: float = 0.5,
max_grad_norm: float = 0.5,
use_sde: bool = False,
sde_sample_freq: int = -1,
target_kl: Optional[float] = None,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
distillation_coef: float = 0.01,
distillation_loss_weights: Optional[list] = None
):
super(PPO, self).__init__(
policy=self.StudentActorCriticPolicy,
env=env,
learning_rate=learning_rate,
n_steps=n_steps,
batch_size=batch_size,
n_epochs=n_epochs,
gamma=gamma,
gae_lambda=gae_lambda,
clip_range=clip_range,
clip_range_vf=clip_range_vf,
ent_coef=ent_coef,
vf_coef=vf_coef,
max_grad_norm=max_grad_norm,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
target_kl=target_kl,
tensorboard_log=tensorboard_log,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
create_eval_env=create_eval_env,
seed=seed,
_init_setup_model=_init_setup_model,
)
self.distillation_coef = distillation_coef
if distillation_loss_weights is not None:
self.distillation_loss_weights = th.tensor(distillation_loss_weights, dtype=th.float32)
self.distillation_loss_weights = self.distillation_loss_weights.to(self.device)
else:
self.distillation_loss_weights = None
def _setup_model(self):
super()._setup_model()
self.rollout_buffer = StudentRolloutBuffer(
self.n_steps,
self.observation_space,
self.action_space,
self.device,
gamma=self.gamma,
gae_lambda=self.gae_lambda,
n_envs=self.n_envs,
)
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
rollout_buffer: StudentRolloutBuffer,
n_rollout_steps: int,
) -> bool:
"""
Collect experiences using the current policy and fill a ``RolloutBuffer``.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param rollout_buffer: Buffer to fill with rollouts
:param n_steps: Number of experiences to collect per environment
:return: True if function returned with at least `n_rollout_steps`
collected, False if callback terminated rollout prematurely.
"""
assert self._last_obs is not None, "No previous observation was provided"
n_steps = 0
rollout_buffer.reset()
# Sample new weights for the state dependent exploration
if self.use_sde:
self.policy.reset_noise(env.num_envs)
callback.on_rollout_start()
while n_steps < n_rollout_steps:
if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.policy.reset_noise(env.num_envs)
with th.no_grad():
# Convert to pytorch tensor or to TensorDict
obs_tensor = obs_as_tensor(self._last_obs, self.device)
actions, values, log_probs, all_actions_probs = self.policy.forward(obs_tensor)
actions = actions.cpu().numpy()
# Rescale and perform action
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.action_space.low, self.action_space.high)
new_obs, rewards, dones, infos = env.step(clipped_actions)
self.num_timesteps += env.num_envs
# Give access to local variables
callback.update_locals(locals())
if callback.on_step() is False:
return False
self._update_info_buffer(infos)
n_steps += 1
if isinstance(self.action_space, gym.spaces.Discrete):
# Reshape in case of discrete action
actions = actions.reshape(-1, 1)
teacher_actions = np.array([info['teacher_action'] for info in infos], dtype=np.float32)
rollout_buffer.add(
self._last_obs,
actions,
rewards,
self._last_episode_starts,
values,
log_probs,
teacher_actions,
all_actions_probs,
)
self._last_obs = new_obs
self._last_episode_starts = dones
with th.no_grad():
# Compute value for the last timestep
obs_tensor = obs_as_tensor(new_obs, self.device)
_, values, _, _ = self.policy.forward(obs_tensor)
rollout_buffer.compute_returns_and_advantage(last_values=values, dones=dones)
callback.on_rollout_end()
return True
def train(self) -> None:
"""
Update policy using the currently gathered rollout buffer.
"""
# Update optimizer learning rate
self._update_learning_rate(self.policy.optimizer)
# Compute current clip range
clip_range = self.clip_range(self._current_progress_remaining)
# Optional: clip range for the value function
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
entropy_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
distillation_losses = []
continue_training = True
# train for n_epochs epochs
for epoch in range(self.n_epochs):
approx_kl_divs = []
# Do a complete pass on the rollout buffer
for rollout_data in self.rollout_buffer.get(self.batch_size):
actions = rollout_data.actions
if isinstance(self.action_space, spaces.Discrete):
# Convert discrete action from float to long
actions = rollout_data.actions.long().flatten()
# Re-sample the noise matrix because the log_std has changed
# TODO: investigate why there is no issue with the gradient
# if that line is commented (as in SAC)
if self.use_sde:
self.policy.reset_noise(self.batch_size)
values, log_prob, entropy = self.policy.evaluate_actions(rollout_data.observations, actions)
values = values.flatten()
# Normalize advantage
advantages = rollout_data.advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
# clipped surrogate loss
policy_loss_1 = advantages * ratio
policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
# Logging
pg_losses.append(policy_loss.item())
clip_fraction = th.mean((th.abs(ratio - 1) > clip_range).float()).item()
clip_fractions.append(clip_fraction)
if self.clip_range_vf is None:
# No clipping
values_pred = values
else:
# Clip the different between old and new value
# NOTE: this depends on the reward scaling
values_pred = rollout_data.old_values + th.clamp(
values - rollout_data.old_values, -clip_range_vf, clip_range_vf
)
# Value loss using the TD(gae_lambda) target
value_loss = F.mse_loss(rollout_data.returns, values_pred)
value_losses.append(value_loss.item())
# Entropy loss favor exploration
if entropy is None:
# Approximate entropy when no analytical form
entropy_loss = -th.mean(-log_prob)
else:
entropy_loss = -th.mean(entropy)
entropy_losses.append(entropy_loss.item())
# Policy distillation loss.
probabilities = rollout_data.action_probabilities
teacher_actions = rollout_data.teacher_actions
assert teacher_actions[teacher_actions == -1].shape[0] == 0
distillation_loss = F.nll_loss(
input=probabilities.log(),
target=teacher_actions,
weight=self.distillation_loss_weights
)
distillation_losses.append(distillation_loss.item())
loss = \
policy_loss + \
self.ent_coef * entropy_loss + \
self.vf_coef * value_loss + \
self.distillation_coef * distillation_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
# and discussion in PR #419: https://github.com/DLR-RM/stable-baselines3/pull/419
# and Schulman blog: http://joschu.net/blog/kl-approx.html
with th.no_grad():
log_ratio = log_prob - rollout_data.old_log_prob
approx_kl_div = th.mean((th.exp(log_ratio) - 1) - log_ratio).cpu().numpy()
approx_kl_divs.append(approx_kl_div)
if self.target_kl is not None and approx_kl_div > 1.5 * self.target_kl:
continue_training = False
if self.verbose >= 1:
print(f"Early stopping at step {epoch} due to reaching max kl: {approx_kl_div:.2f}")
break
# Optimization step
self.policy.optimizer.zero_grad()
loss.backward()
# Clip grad norm
th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)
self.policy.optimizer.step()
if not continue_training:
break
self._n_updates += self.n_epochs
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
self.logger.record("train/entropy_loss",
|
np.mean(entropy_losses)
|
numpy.mean
|
import numpy as np
from numpy import reshape
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
print(__name__)
model = pickle.load(open('model_pricing.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
'''
For rendering results on HTML GUI
'''
print("request values : ", request.form.values())
int_features = [int(x) for x in request.form.values()]
print("int_features : ", int_features)
final_features = [np.array(int_features)]
print("final features : ", final_features)
final_features =
|
reshape(final_features, (-1, 1))
|
numpy.reshape
|
from typing import Dict, Mapping, List, Tuple
import itertools
import demes
import numpy as np
import matplotlib
import matplotlib.patheffects
from . import utils
class Tube:
"""
A deme represented as a tube. The tube has a length along the time
dimension and a width equal to the deme's population size, which may
change over time.
:ivar list[float] time: Coordinates along the time dimension.
:ivar list[float] size1: Coordinates along the non-time dimension,
corresponding to the first side of the tube.
:ivar list[float] size2: Coordinates along the non-time dimension,
corresponding to the second side of the tube.
"""
def __init__(
self,
deme: demes.Deme,
mid: float,
inf_start_time: float,
log_time: bool = False,
):
"""
:param demes.Deme deme: The deme for which to calculate coordinates.
:param float mid: The mid point of the deme along the non-time dimension.
:param float inf_start_time: The value along the time dimension which
is used instead of infinity (for epochs with infinite start times).
:param bool log_time: The time axis uses a log-10 scale.
"""
self.deme = deme
self.mid = mid
self._coords(deme, mid, inf_start_time, log_time)
def _coords(
self,
deme: demes.Deme,
mid: float,
inf_start_time: float,
log_time: bool,
num_points: int = 100,
) -> None:
"""Calculate tube coordinates."""
time: List[float] = []
size1: List[float] = []
size2: List[float] = []
for k, epoch in enumerate(deme.epochs):
start_time = epoch.start_time
if np.isinf(start_time):
start_time = inf_start_time
end_time = epoch.end_time
if epoch.size_function == "constant":
t = np.array([start_time, end_time])
N1 = [mid - epoch.start_size / 2] * 2
N2 = [mid + epoch.end_size / 2] * 2
elif epoch.size_function == "exponential":
if log_time:
t = np.exp(
np.linspace(
np.log(start_time), np.log(max(1, end_time)), num=num_points
)
)
else:
t = np.linspace(start_time, end_time, num=num_points)
dt = (start_time - t) / (start_time - end_time)
r = np.log(epoch.end_size / epoch.start_size)
N = epoch.start_size * np.exp(r * dt)
N1 = mid - N / 2
N2 = mid + N / 2
elif epoch.size_function == "linear":
if log_time:
t = np.exp(
np.linspace(
np.log(start_time), np.log(max(1, end_time)), num=num_points
)
)
else:
t =
|
np.linspace(start_time, end_time, num=num_points)
|
numpy.linspace
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import unittest
from enum import Enum
import libpymo
import numpy as np
from scipy.stats import norm
class TestBiasCorrection(unittest.TestCase):
def test_bias_correction(self):
# Generating random numbers from a normal distribution for the weights and biases of the current and prev layer
np.random.seed(1)
shape = (2, 3, 2, 2)
# output 1
o1 = np.random.randn(*shape)
# output 2
o2 = np.random.randn(*shape)
biasCorrection = libpymo.BiasCorrection()
biasCorrection.storePreActivationOutput(o1)
biasCorrection.storePreActivationOutput(o1)
biasCorrection.storeQuantizedPreActivationOutput(o2)
biasCorrection.storeQuantizedPreActivationOutput(o2)
bias_tensor = libpymo.TensorParamBiasCorrection()
bias = np.array(np.random.randn(shape[1]))
bias_tensor.data = bias
biasCorrection.correctBias(bias_tensor)
bias_python = correct_bias(o1, o2, bias)
print(bias_tensor.data)
print(bias_python)
assert np.allclose(bias_tensor.data, bias_python)
def test_bias_correction_bn_params_no_activation(self):
np.random.seed(1)
shape = (3, 3, 2, 2)
weight = np.random.randn(*shape)
quantized_weight = np.random.randn(*shape)
bn_params = libpymo.BnParamsBiasCorr()
gamma = np.array(np.random.randn(3))
beta = np.array(np.random.randn(3))
bn_params.gamma = gamma
bn_params.beta = beta
bias_tensor = libpymo.TensorParamBiasCorrection()
bias = np.array(np.random.randn(shape[1]))
bias_copy = bias.copy()
bias_tensor.data = bias
activation = libpymo.ActivationType.noActivation
biasCorrection = libpymo.BnBasedBiasCorrection()
biasCorrection.correctBias(bias_tensor, quantized_weight, weight, bn_params, activation)
bias_python = bn_based_bias_correction(weight, quantized_weight, bias_copy, beta,
gamma, ActivationType.no_activation)
assert (np.allclose(bias_python, bias_tensor.data))
def test_bias_correction_bn_params_relu_activation(self):
|
np.random.seed(1)
|
numpy.random.seed
|
# -*- coding: utf-8 -*-
"""
Unittests for pointcloud
@author: simlk
"""
import os
# import sys
import unittest
# import time
import logging
import numpy as np
import tempfile
import json
# import ctypes
from thatsDEM2 import pointcloud, osr_utils
LOG = logging.getLogger(__name__)
class TestPointcloud(unittest.TestCase):
def test_pointcloud_constructor1(self):
LOG.info("Testing pointcloud constructor")
pc = pointcloud.Pointcloud(
|
np.ones((2, 2))
|
numpy.ones
|
# EXPRES-specific functions from which Excalibur emerges
# Ha ha, get it?
import os
from os.path import basename, join, isfile
from glob import glob
import numpy as np
import pandas as pd
from astropy.io import fits
from astropy.constants import c
from astropy.time import Time
from scipy.io import readsav
from tqdm.auto import tqdm
# LFC Constants
rep_rate = 14e9
lfc_offset = 6.19e9
###########################################################
# Functions for Reading in Data
###########################################################
def readParams(file_name):
"""
Given the file name of a check_point file,
load in all relevant data into 1D vectors
Returns vectors for line center in pixel (x),
order (y), error in line center fit in pixels (e),
and wavelength of line (w)
"""
try:
info = np.load(file_name,allow_pickle=True)[()]
except FileNotFoundError:
if file_name.split('/')[-2] == 'checkpoint':
lfc_id_dir = '/expres/extracted/lfc_cal/lfc_id/'
file_name = lfc_id_dir + os.path.basename(file_name)
info = np.load(file_name,allow_pickle=True)[()]
else:
raise FileNotFoundError
# Assemble information into "fit-able" form
num_orders = len(info['params'])
lines = [p[:,1] for p in info['params'] if p is not None]
errs = [np.sqrt(cov[:,1,1]) for cov in info['cov'] if cov is not None]
ordrs = [o for o in np.arange(86) if info['params'][o] is not None]
waves = [w for w in info['wvln'] if w is not None]
# I believe, but am not sure, that the wavelengths are multiplied by order
# to separate them from when orders overlap at the edges
waves = [wvln for order, wvln in zip(ordrs,waves)]
ordrs = [np.ones_like(x) * m for m,x in zip(ordrs, lines)]
x = np.concatenate(lines)
y =
|
np.concatenate(ordrs)
|
numpy.concatenate
|
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
from interaction3.reconstruction import sim_functions as sim
from interaction3.reconstruction import Beamformer
filename = 'test_rf_data.npz'
nelements = 32
pitch = 300e-6
angles = np.linspace(-5, 5, 21)
xx, yy, zz = np.mgrid[-0.02:0.02:81j, 0:1:1j, 0.001:0.041:81j]
field_pos = np.c_[xx.ravel(), yy.ravel(), zz.ravel()]
xv, yv, zv = np.linspace(0, (nelements - 1) * pitch, nelements) - (nelements - 1) * pitch / 2, 0, 0
xx, yy, zz = np.meshgrid(xv, yv, zv)
array_pos = np.c_[xx.ravel(), yy.ravel(), zz.ravel()]
def test_planewave_beamformer():
with np.load(filename) as file:
rfdata = file['planewave_rf']
t0 = file['planewave_t0']
kwargs = dict()
kwargs['sample_frequency'] = 100e6
kwargs['t0'] = t0
kwargs['window'] = 101
kwargs['transmit_pos'] = [0, 0, 0]
kwargs['receive_pos'] = array_pos
kwargs['field_pos'] = field_pos
kwargs['rfdata'] = rfdata
kwargs['planewave'] = True
kwargs['resample'] = 8
bf = Beamformer(**kwargs)
bfdata = bf.run()
envdata = sim.envelope(bfdata, axis=1)
imgdata = np.max(envdata, axis=1).reshape((81, 81))
plt.figure()
plt.imshow(imgdata)
return bfdata, imgdata
def test_synthetic_beamformer():
with np.load(filename) as file:
rfdata = file['synthetic_rf']
t0 = file['synthetic_t0']
kwargs = dict()
kwargs['sample_frequency'] = 100e6
kwargs['t0'] = t0
kwargs['window'] = 101
kwargs['transmit_pos'] = array_pos
kwargs['receive_pos'] = array_pos
kwargs['field_pos'] = field_pos
kwargs['rfdata'] = rfdata
kwargs['planewave'] = False
kwargs['resample'] = 8
bf = Beamformer(**kwargs)
bfdata = bf.run()
envdata = sim.envelope(np.sum(bfdata, axis=-1), axis=1)
# envdata = sim.envelope(bfdata, axis=1)
imgdata =
|
np.max(envdata, axis=1)
|
numpy.max
|
# coding: utf-8
# # Linear Regression
#
# ## 1. Introduction
#
# ### 1.1 Abstract
#
# In this exercise, we illustrate Hoeffding's inequality in the context of virtual coin flips.
#
# ### 1.2 Hoeffding's Inequality for a Single Bin
#
# For an infinite bin, from which we pick a sample of size $N$, we can calculate the probability bounds between the sample frequency, $\nu$, and unknown the bin frequency, $\mu$ via [Hoeffding's Inequality](https://en.wikipedia.org/wiki/Hoeffding%27s_inequality):
#
# $$\mathbb{P} \left[ \left| \nu - \mu \right| > \epsilon \right] \le 2e^{-2\epsilon^2 N}$$
#
# It is valid for all $N$ and $\epsilon$ and the bound does not depend on $\mu$. It illustrates the tradeoff between $N$, $\epsilon$, and the bound.
#
# ### 1.3 Libraries
#
# The Python libraries used are:
# In[1]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import multiprocessing as mp
import itertools
import numpy as np
import matplotlib.pyplot as plt
np.set_printoptions(edgeitems=3,infstr='inf',linewidth=75,nanstr='nan',precision=4,suppress=False,threshold=500,formatter=None)
get_ipython().magic('matplotlib inline')
# ## 2. Virtual Coin Flips
#
# ### 2.1 Mean Frequency of "Obtaining a Head" for Three Coins
#
# In this virtual experiment, 1000 fair coins are simulated. Each coin is flipped 10 times. The data is stored in a matrix where the 10 columns represent each of the 10 flips and the 1000 rows represent the 1000 individual coins.
#
# It is assumed that 'heads' are represented by '1' and 'tails' are represented by '0', so that we can use `np.sum(..., axis=1)` to get the number of heads for each coin.
#
# A total of three coins are selected from the 1000 fair coins - $c_1$ is the first coin, $c_{rand}$ is a random coin, and $c_{min}$ is the coin that yielded the minimum number of heads (out of the 10 coin flips). The following code calculates the mean frequency of "obtaining a head" for each of these three coins.
# In[2]:
def get_coin_flip_data():
c = np.random.randint(2,size=(1000,10))
idx = np.random.choice(1000) # choose index for a random coin
n_heads =
|
np.sum(c,axis=1)
|
numpy.sum
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_approx_equal,
assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal_nulp, suppress_warnings)
import pytest
from pytest import raises as assert_raises
from scipy import signal
from scipy.fft import fftfreq
from scipy.signal import (periodogram, welch, lombscargle, csd, coherence,
spectrogram, stft, istft, check_COLA, check_NOLA)
from scipy.signal.spectral import _spectral_helper
class TestPeriodogram(object):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_odd(self):
x = np.zeros(15)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0)
q[0] = 0
assert_allclose(p, q)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, scaling='spectrum')
g, q = periodogram(x, scaling='density')
assert_allclose(f, np.linspace(0, 0.5, 9))
assert_allclose(p, q/16.0)
def test_integer_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_integer_odd(self):
x = np.zeros(15, dtype=int)
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8)
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-15)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0)
q[0] = 0
assert_allclose(p, q)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 5.0/16.0)
q[0] = 0
assert_allclose(p, q)
def test_unk_scaling(self):
assert_raises(ValueError, periodogram, np.zeros(4, np.complex128),
scaling='foo')
def test_nd_axis_m1(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((2,1,10))
x[:,:,0] = 1.0
f, p = periodogram(x)
assert_array_equal(p.shape, (2, 1, 6))
assert_array_almost_equal_nulp(p[0,0,:], p[1,0,:], 60)
f0, p0 = periodogram(x[0,0,:])
assert_array_almost_equal_nulp(p0[np.newaxis,:], p[1,:], 60)
def test_nd_axis_0(self):
x = np.zeros(20, dtype=np.float64)
x = x.reshape((10,2,1))
x[0,:,:] = 1.0
f, p = periodogram(x, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_array_almost_equal_nulp(p[:,0,0], p[:,1,0], 60)
f0, p0 = periodogram(x[:,0,0])
assert_array_almost_equal_nulp(p0, p[:,1,0])
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, 10, 'hann')
win = signal.get_window('hann', 16)
fe, pe = periodogram(x, 10, win)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, periodogram, x,
10, win_err) # win longer than signal
def test_padded_fft(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x)
fp, pp = periodogram(x, nfft=32)
assert_allclose(f, fp[::2])
assert_allclose(p, pp[::2])
assert_array_equal(pp.shape, (17,))
def test_empty_input(self):
f, p = periodogram([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = periodogram(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_nfft(self):
x = np.zeros(18)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_nfft_is_xshape(self):
x = np.zeros(16)
x[0] = 1
f, p = periodogram(x, nfft=16)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9)
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.linspace(0, 0.5, 9))
q = np.ones(9, 'f')
q[0] = 0
q[-1] /= 2.0
q /= 8
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(15, 'f')
x[0] = 1
f, p = periodogram(x)
assert_allclose(f, np.arange(8.0)/15.0)
q = np.ones(8, 'f')
q[0] = 0
q *= 2.0/15.0
assert_allclose(p, q, atol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 1/16.0, 'f')
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
f, p = periodogram(x, return_onesided=False)
assert_allclose(f, fftfreq(16, 1.0))
q = np.full(16, 5.0/16.0, 'f')
q[0] = 0
assert_allclose(p, q)
assert_(p.dtype == q.dtype)
class TestWelch(object):
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, welch, np.zeros(4, np.complex128),
scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = welch(x, nperseg=10, detrend=False)
f2, p2 = welch(x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = welch(x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
x = np.rollaxis(x, 2, 0)
f, p = welch(x, nperseg=10, axis=0,
detrend=lambda seg: signal.detrend(seg, axis=0, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_nd_axis_m1(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((2,1,10))
f, p = welch(x, nperseg=10)
assert_array_equal(p.shape, (2, 1, 6))
assert_allclose(p[0,0,:], p[1,0,:], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[0,0,:], nperseg=10)
assert_allclose(p0[np.newaxis,:], p[1,:], atol=1e-13, rtol=1e-13)
def test_nd_axis_0(self):
x = np.arange(20, dtype=np.float64) + 0.04
x = x.reshape((10,2,1))
f, p = welch(x, nperseg=10, axis=0)
assert_array_equal(p.shape, (6,2,1))
assert_allclose(p[:,0,0], p[:,1,0], atol=1e-13, rtol=1e-13)
f0, p0 = welch(x[:,0,0], nperseg=10)
assert_allclose(p0, p[:,1,0], atol=1e-13, rtol=1e-13)
def test_window_external(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, 10, 'hann', nperseg=8)
win = signal.get_window('hann', 8)
fe, pe = welch(x, 10, win, nperseg=None)
assert_array_almost_equal_nulp(p, pe)
assert_array_almost_equal_nulp(f, fe)
assert_array_equal(fe.shape, (5,)) # because win length used as nperseg
assert_array_equal(pe.shape, (5,))
assert_raises(ValueError, welch, x,
10, win, nperseg=4) # because nperseg != win.shape[-1]
win_err = signal.get_window('hann', 32)
assert_raises(ValueError, welch, x,
10, win_err, nperseg=None) # win longer than signal
def test_empty_input(self):
f, p = welch([])
assert_array_equal(f.shape, (0,))
assert_array_equal(p.shape, (0,))
for shape in [(0,), (3,0), (0,5,2)]:
f, p = welch(np.empty(shape))
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_empty_input_other_axis(self):
for shape in [(3,0), (0,5,2)]:
f, p = welch(np.empty(shape), axis=1)
assert_array_equal(f.shape, shape)
assert_array_equal(p.shape, shape)
def test_short_data(self):
x = np.zeros(8)
x[0] = 1
#for string-like window, input signal length < nperseg value gives
#UserWarning, sets nperseg to x.shape[-1]
with suppress_warnings() as sup:
sup.filter(UserWarning, "nperseg = 256 is greater than input length = 8, using nperseg = 8")
f, p = welch(x,window='hann') # default nperseg
f1, p1 = welch(x,window='hann', nperseg=256) # user-specified nperseg
f2, p2 = welch(x, nperseg=8) # valid nperseg, doesn't give warning
assert_allclose(f, f2)
assert_allclose(p, p2)
assert_allclose(f1, f2)
assert_allclose(p1, p2)
def test_window_long_or_nd(self):
assert_raises(ValueError, welch, np.zeros(4), 1, np.array([1,1,1,1,1]))
assert_raises(ValueError, welch, np.zeros(4), 1,
np.arange(6).reshape((2,3)))
def test_nondefault_noverlap(self):
x = np.zeros(64)
x[::8] = 1
f, p = welch(x, nperseg=16, noverlap=4)
q = np.array([0, 1./12., 1./3., 1./5., 1./3., 1./5., 1./3., 1./5.,
1./6.])
assert_allclose(p, q, atol=1e-12)
def test_bad_noverlap(self):
assert_raises(ValueError, welch, np.zeros(4), 1, 'hann', 2, 7)
def test_nfft_too_short(self):
assert_raises(ValueError, welch, np.ones(12), nfft=3, nperseg=4)
def test_real_onesided_even_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_onesided_odd_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477458, 0.23430935, 0.17072113, 0.17072116,
0.17072113], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_real_twosided_32(self):
x = np.zeros(16, 'f')
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.11111111,
0.07638889], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype)
def test_complex_32(self):
x = np.zeros(16, 'F')
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = welch(x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666666, 0.38194442, 0.55555552, 0.55555552,
0.55555558, 0.55555552, 0.55555552, 0.38194442], 'f')
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_(p.dtype == q.dtype,
'dtype mismatch, %s, %s' % (p.dtype, q.dtype))
def test_padded_freqs(self):
x = np.zeros(12)
nfft = 24
f = fftfreq(nfft, 1.0)[:nfft//2+1]
f[-1] *= -1
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
nfft = 25
f = fftfreq(nfft, 1.0)[:(nfft + 1)//2]
fodd, _ = welch(x, nperseg=5, nfft=nfft)
feven, _ = welch(x, nperseg=6, nfft=nfft)
assert_allclose(f, fodd)
assert_allclose(f, feven)
def test_window_correction(self):
A = 20
fs = 1e4
nperseg = int(fs//10)
fsig = 300
ii = int(fsig*nperseg//fs) # Freq index of fsig
tt = np.arange(fs)/fs
x = A*np.sin(2*np.pi*fsig*tt)
for window in ['hann', 'bartlett', ('tukey', 0.1), 'flattop']:
_, p_spec = welch(x, fs=fs, nperseg=nperseg, window=window,
scaling='spectrum')
freq, p_dens = welch(x, fs=fs, nperseg=nperseg, window=window,
scaling='density')
# Check peak height at signal frequency for 'spectrum'
assert_allclose(p_spec[ii], A**2/2.0)
# Check integrated spectrum RMS for 'density'
assert_allclose(np.sqrt(np.trapz(p_dens, freq)), A*np.sqrt(2)/2,
rtol=1e-3)
def test_axis_rolling(self):
np.random.seed(1234)
x_flat = np.random.randn(1024)
_, p_flat = welch(x_flat)
for a in range(3):
newshape = [1,]*3
newshape[a] = -1
x = x_flat.reshape(newshape)
_, p_plus = welch(x, axis=a) # Positive axis index
_, p_minus = welch(x, axis=a-x.ndim) # Negative axis index
assert_equal(p_flat, p_plus.squeeze(), err_msg=a)
assert_equal(p_flat, p_minus.squeeze(), err_msg=a-x.ndim)
def test_average(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = welch(x, nperseg=8, average='median')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([.1, .05, 0., 1.54074396e-33, 0.])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
assert_raises(ValueError, welch, x, nperseg=8,
average='unrecognised-average')
class TestCSD:
def test_pad_shorter_x(self):
x = np.zeros(8)
y = np.zeros(12)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_pad_shorter_y(self):
x = np.zeros(12)
y = np.zeros(8)
f = np.linspace(0, 0.5, 7)
c = np.zeros(7,dtype=np.complex128)
f1, c1 = csd(x, y, nperseg=12)
assert_allclose(f, f1)
assert_allclose(c, c1)
def test_real_onesided_even(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_onesided_odd(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_twosided(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_real_spectrum(self):
x = np.zeros(16)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, scaling='spectrum')
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.015625, 0.02864583, 0.04166667, 0.04166667,
0.02083333])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_even(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8)
assert_allclose(f, np.linspace(0, 0.5, 5))
q = np.array([0.08333333, 0.15277778, 0.22222222, 0.22222222,
0.11111111])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_onesided_odd(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=9)
assert_allclose(f, np.arange(5.0)/9.0)
q = np.array([0.12477455, 0.23430933, 0.17072113, 0.17072113,
0.17072113])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_integer_twosided(self):
x = np.zeros(16, dtype=int)
x[0] = 1
x[8] = 1
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.08333333, 0.07638889, 0.11111111, 0.11111111,
0.11111111, 0.11111111, 0.11111111, 0.07638889])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_complex(self):
x = np.zeros(16, np.complex128)
x[0] = 1.0 + 2.0j
x[8] = 1.0 + 2.0j
f, p = csd(x, x, nperseg=8, return_onesided=False)
assert_allclose(f, fftfreq(8, 1.0))
q = np.array([0.41666667, 0.38194444, 0.55555556, 0.55555556,
0.55555556, 0.55555556, 0.55555556, 0.38194444])
assert_allclose(p, q, atol=1e-7, rtol=1e-7)
def test_unk_scaling(self):
assert_raises(ValueError, csd, np.zeros(4, np.complex128),
np.ones(4, np.complex128), scaling='foo', nperseg=4)
def test_detrend_linear(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10, detrend='linear')
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_no_detrending(self):
x = np.arange(10, dtype=np.float64) + 0.04
f1, p1 = csd(x, x, nperseg=10, detrend=False)
f2, p2 = csd(x, x, nperseg=10, detrend=lambda x: x)
assert_allclose(f1, f2, atol=1e-15)
assert_allclose(p1, p2, atol=1e-15)
def test_detrend_external(self):
x = np.arange(10, dtype=np.float64) + 0.04
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p, np.zeros_like(p), atol=1e-15)
def test_detrend_external_nd_m1(self):
x = np.arange(40, dtype=np.float64) + 0.04
x = x.reshape((2,2,10))
f, p = csd(x, x, nperseg=10,
detrend=lambda seg: signal.detrend(seg, type='l'))
assert_allclose(p,
|
np.zeros_like(p)
|
numpy.zeros_like
|
from __future__ import annotations
import numpy as np
from numpy import ndarray as Array
from functools import total_ordering
from typing import List, Dict, Tuple, Iterable, Callable, NamedTuple, Union, Optional
class Point(NamedTuple):
"""
Position tuple
"""
x: int = 0
y: int = 0
def __add__(self, other: Union[Point, Tuple[int, int]]) -> Point:
"""
Component wise add onto the point
:param other: Other point to add
:return: The resulting point
"""
return Point(self.x + other[0], self.y + other[1])
def __sub__(self, other: Union[Point, Tuple[int, int]]) -> Point:
"""
Component wise subtract onto the point
:param other: Other point to subtract
:return: The resulting point
"""
return Point(self.x - other[0], self.y - other[1])
def __mod__(self, other: Union[Point, Tuple[int, int]]) -> Point:
"""
Component wise modulo onto the point
:param other: Other point to mod by
:return: The resulting point
"""
return Point(self.x % other[0], self.y % other[1])
@staticmethod
def manhattan_distance(a: Point, b: Point) -> Point:
"""
Manhattan distance between two points
:param a: First point
:param b: Second point
:return: The distance between the points
"""
return Point(abs(a.x - b.x), abs(a.y - b.y))
class State(NamedTuple):
"""
Board state, consisting of cost and associated board
"""
cost: int
target: int
board: Board
@total_ordering
class Board:
"""
Chi-Puzzle board
"""
def __init__(self, array: Array, zero: Point, goals: Tuple[Array, ...], heuristic: Optional[Callable[[Board], int]], sort_g: bool, cost: int = 0, parent: Optional[State] = None) -> None:
"""
Creates a new board with the specified parameters
:param array: Board array
:param zero: Position of the zero
:param goals: Tuple of goal states
:param heuristic: Heuristic function used
:param cost: Cost to reach this board
:param parent: Parent board
"""
self.height: int
self.width: int
self.height, self.width = array.shape
self.array = array
self.zero = zero
self.goals = goals
self._heuristic = heuristic
self._sort_g = sort_g
self.g = cost
self.parent = parent
self._hash: Optional[int] = None
self.is_goal = any(np.array_equal(self.array, goal) for goal in self.goals)
# Calculate heuristic
self.h = heuristic(self) if heuristic is not None else 0
self.f = self.g + self.h if self._sort_g else self.h
# region Ordering
def __hash__(self) -> int:
if self._hash is None:
self._hash = hash(tuple(self.array.flat))
return self._hash
def __eq__(self, other: Board) -> bool:
return np.array_equal(self.array, other.array)
def __lt__(self, other: Board) -> bool:
return self.f < other.f
def __str__(self) -> str:
return " ".join(map(str, self.array.flat))
# endregion
# region Move generation
def generate_moves(self) -> Iterable[State]:
"""
Generates all possible moves from the current board state
:return: Iterable of all the possible moves
"""
targets: Dict[Point, State] = {}
# Check cardinal direction moves
self._check_cardinal(self.zero + (1, 0), targets)
self._check_cardinal(self.zero - (1, 0), targets)
self._check_cardinal(self.zero + (0, 1), targets)
self._check_cardinal(self.zero - (0, 1), targets)
# Check the corner moves
max_x = self.height - 1
max_y = self.width - 1
if self.zero in ((0, 0), (max_x, max_y)):
# Top left/bottom right corners
self._check_corner(self.zero + (1, 1), targets)
self._check_corner(self.zero - (1, 1), targets)
elif self.zero in ((max_x, 0), (0, max_y)):
# Bottom left/top right corners
self._check_corner(self.zero + (1, -1), targets)
self._check_corner(self.zero - (1, -1), targets)
return targets.values()
def _check_cardinal(self, target: Point, targets: Dict[Point, State]) -> None:
"""
Checks for wrapping on a cardinal move and adjusts the position and cost
:param target: Target move
:param targets: Known targets so far
"""
# Check if we're wrapping with this move
if target.x in (-1, self.height) or target.y in (-1, self.width):
cost = 2
target %= self.array.shape
else:
cost = 1
self._check_target(target, cost, targets)
def _check_corner(self, target: Point, targets: Dict[Point, State]) -> None:
"""
Adjusts the wrapping on corner moves and sets the correct cost
:param target: Target move
:param targets: Known targets so far
"""
# Adjust wrapping bounds
target %= self.array.shape
self._check_target(target, 3, targets)
def _check_target(self, target: Point, cost: int, targets: Dict[Point, State]) -> None:
"""
Validates the target move and adds it to or adjusts known targets if possible
:param target: Target move
:param cost: Move cost
:param targets: Known targets so far
"""
# Check if not in targets
if target not in targets:
# Copy array, then apply the move
a = self.array.copy()
t = a[target]
a[self.zero], a[target] = t, a[self.zero]
board = Board(a, target, self.goals, self._heuristic, self._sort_g, self.g + cost, State(cost, t, self))
else:
# Check if we have a lower cost
state = targets[target]
if cost >= state.cost:
# If not do nothing
return
# Reuse the same board if possible
board = state.board
board.g = self.g + cost
# Create state
targets[target] = State(cost, 0, board)
# endregion
# region Heuristics
@staticmethod
def h0(self: Board) -> int:
"""
Heuristic 0 - A naive heuristic base one the position of 0
:param self: The board to calculate the heuristic for
:return: The value of the heuristic
"""
return 0 if self.array[(self.height - 1, self.width - 1)] == 0 else 1
@staticmethod
def h1(self: Board) -> int:
"""
Heuristic 1 - Hamming Distance
:param self: The board to calculate the heuristic for
:return: The value of the heuristic
"""
# Find the lowest heuristic over all goal states, return 0 if a goal state
return min(map(self._heuristic_hamming, self.goals)) if not self.is_goal else 0
@staticmethod
def h2(self: Board) -> int:
"""
Heuristic 2 - Wrapped Manhattan Distance
We are using regular Manhattan Distance, and accounting for wraps.
If a wrap is the shorter path, one is also added to account for the more expensive move.
:param self: The board to calculate the heuristic for
:return: The value of the heuristic
"""
# Find the lowest heuristic over all goal states, return 0 if a goal state
return min(map(self._heuristic_manhattan, self.goals)) if not self.is_goal else 0
def _heuristic_hamming(self, goal: Array) -> int:
"""
Hamming Distance heuristic
:param goal: Goal state the calculate the heuristic from
:return: The Hamming Distance from the given goal state
"""
# Running total
total = 0
for index in np.ndindex(goal.shape):
i = goal[index]
if i == 0:
# Skip zero since it's out "empty" position
continue
# If the spots do not match, add one
if i != self.array[index]:
total += 1
return total
def _heuristic_manhattan(self, goal: Array) -> int:
"""
Manhattan Distance heuristic
:param goal: Goal state the calculate the heuristic from
:return: The Manhattan Distance from the given goal state
"""
# Running total
total = 0
for index in np.ndindex(goal.shape):
i = goal[index]
if i == 0:
# Skip zero since it's out "empty" position
continue
g = Point(*index)
t = self._find_point(self.array, i)
x, y = Point.manhattan_distance(g, t)
# Take care of wrapping
wraps = 0
if x > self.height // 2:
x = self.height - x
wraps = 1
if y > self.width // 2:
y = self.width - y
wraps = 1
# Make sure we don't add two wrapping penalties
total += x + y + wraps
return total
# endregion
# region Static methods
@staticmethod
def _find_point(array: Array, value: int) -> Point:
return Point(*np.asarray(np.where(array == value)).T[0])
@staticmethod
def from_list(data: List[int], shape: Tuple[int, int], heuristic: Optional[Callable[[Board], int]] = None, sort_g: bool = True, dtype: Optional[object] = np.int16) -> Board:
"""
Creates a new board from a list and a specified size
:param data: List to create the board from
:param shape: Shape of the board (height, width)
:param heuristic: Heuristic function
:param sort_g: If the sorting should account g(n)
:param dtype: Type used within the Numpy arrays
:return: The created board
"""
# Create the board array
array: Array = np.array(data, dtype=dtype).reshape(shape)
# Find the location of the zero
zero = Board._find_point(array, 0)
# Create both solution boards
g1: Array = np.roll(
|
np.arange(array.size, dtype=dtype)
|
numpy.arange
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""NeuTra implementation."""
# pylint: disable=invalid-name,missing-docstring
import time
from typing import Any, Text, Tuple, NamedTuple
from absl import logging
import gin
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from hmc_swindles import targets
from hmc_swindles import utils
from tensorflow.python.util import nest # pylint: disable=g-direct-tensorflow-import
# pylint: disable=g-import-not-at-top
USE_LOCAL_FUN_MC = True
if USE_LOCAL_FUN_MC:
from fun_mc import using_tensorflow as fun_mc # pylint: disable=reimported
tfd = tfp.distributions
tfb = tfp.bijectors
tfkl = tf.keras.layers
@gin.configurable("head_tail_bijector")
def MakeHeadTailBijectorFn(num_dims,
head_layers=(),
activation=tf.nn.elu,
train=False,
head_dims=3):
"""A RealNVP for stochastic volatility model."""
# pylint: disable=no-value-for-parameter
del train
tail_dims = num_dims - head_dims
@utils.MakeTFTemplate
def head_bijector_fn(x):
x.set_shape(list(x.shape)[:-1] + [head_dims])
input_shape = x.shape
for i, units in enumerate(head_layers):
x = utils.MaskedDense(
inputs=x,
units=units,
num_blocks=head_dims,
exclusive=True if i == 0 else False,
kernel_initializer=utils.L2HMCInitializer(factor=0.01),
activation=activation,
)
x = utils.MaskedDense(
inputs=x,
units=2 * head_dims,
num_blocks=head_dims,
activation=None,
kernel_initializer=utils.L2HMCInitializer(factor=0.01),
)
x = tf.reshape(x, shape=tf.concat([input_shape, [2]], axis=0))
shift, log_scale = tf.unstack(x, num=2, axis=-1)
return tfb.Chain([tfb.Shift(shift=shift), tfb.Scale(log_scale=log_scale)])
@utils.MakeTFTemplate
def head_to_tail_bijector_fn(x, _):
for units in head_layers:
x = tfkl.Dense(
units=units,
activation=activation,
kernel_initializer=utils.L2HMCInitializer(factor=0.01),
)(x,)
shift = tfkl.Dense(
units=1,
activation=None,
kernel_initializer=utils.L2HMCInitializer(factor=0.01),
)(x,)
return tfb.Chain(
[tfb.Shift(shift=shift), tfb.Scale(log_scale=tf.Variable(0.))])
@utils.MakeTFTemplate
def tail_to_head_bijector_fn(x, _):
x = tf.reduce_mean(x, axis=-1, keepdims=True)
for units in head_layers:
x = tfkl.Dense(units=units, activation=activation)(x)
shift = tfkl.Dense(units=head_dims, activation=None)(x)
return tfb.Chain(
[tfb.Shift(shift=shift),
tfb.Scale(log_scale=tf.Variable(tf.zeros(head_dims)))])
b = tfb.Identity()
b = tfb.Blockwise(
[
tfb.Invert(
tfb.MaskedAutoregressiveFlow(
bijector_fn=head_bijector_fn("head"))),
tfb.Identity(),
],
[head_dims, tail_dims],
)(b,)
b = tfb.RealNVP(
num_masked=head_dims,
bijector_fn=head_to_tail_bijector_fn("head_to_tail"))(
b)
b = tfb.Permute(list(reversed(range(num_dims))))(b)
b = tfb.RealNVP(
num_masked=tail_dims,
bijector_fn=tail_to_head_bijector_fn("tail_to_head"))(
b)
b = tfb.Permute(list(reversed(range(num_dims))))(b)
b = tfb.Blockwise(
[
tfb.Identity(),
tfb.Shift(shift=tf.Variable(tf.zeros([tail_dims])))
],
[head_dims, tail_dims],
)(b,)
# Construct the variables
_ = b.forward(tf.zeros([1, num_dims]))
return b
@gin.configurable("affine_bijector")
def MakeAffineBijectorFn(num_dims, train=False, use_tril=False):
mu = tf.Variable(tf.zeros([num_dims]), name="mean", trainable=train)
if use_tril:
tril_flat = tf.Variable(
tf.zeros([num_dims * (num_dims + 1) // 2]),
name="tril_flat",
trainable=train)
tril_raw = tfp.math.fill_triangular(tril_flat)
sigma = tf.nn.softplus(tf.linalg.diag_part(tril_raw))
tril = tf.linalg.set_diag(tril_raw, sigma)
return tfb.Shift(shift=mu)(tfb.ScaleMatvecTriL(scale_tril=tril))
else:
sigma = tf.nn.softplus(
tf.Variable(tf.zeros([num_dims]), name="invpsigma", trainable=train))
return tfb.Shift(shift=mu)(tfb.ScaleMatvecDiag(scale_diag=sigma))
@gin.configurable("rnvp_bijector")
def MakeRNVPBijectorFn(num_dims,
num_stages,
hidden_layers,
scale=1.0,
activation=tf.nn.elu,
train=False,
learn_scale=False,
dropout_rate=0.0):
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_stages):
_rnvp_template = utils.DenseShiftLogScale(
"rnvp_%d" % i,
hidden_layers=hidden_layers,
activation=activation,
kernel_initializer=utils.L2HMCInitializer(factor=0.01),
dropout_rate=dropout_rate,
train=train)
def rnvp_template(x, output_units, t=_rnvp_template):
# # TODO(siege): I don't understand why the shape gets lost.
# x.set_shape([None, num_dims - output_units])
return t(x, output_units)
bijectors.append(
tfb.RealNVP(
num_masked=num_dims // 2, shift_and_log_scale_fn=rnvp_template))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
if learn_scale:
scale = tf.Variable(tfp.math.softplus_inverse(scale),
name="isp_global_scale")
bijectors.append(tfb.Scale(scale=scale))
bijector = tfb.Chain(bijectors)
# Construct the variables
_ = bijector.forward(tf.zeros([1, num_dims]))
return bijector
@gin.configurable("iaf_bijector")
def MakeIAFBijectorFn(
num_dims,
num_stages,
hidden_layers,
scale=1.0,
activation=tf.nn.elu,
train=False,
dropout_rate=0.0,
learn_scale=False,
):
swap = tfb.Permute(permutation=np.arange(num_dims - 1, -1, -1))
bijectors = []
for i in range(num_stages):
_iaf_template = utils.DenseAR(
"iaf_%d" % i,
hidden_layers=hidden_layers,
activation=activation,
kernel_initializer=utils.L2HMCInitializer(factor=0.01),
dropout_rate=dropout_rate,
train=train)
def iaf_template(x, t=_iaf_template):
# # TODO(siege): I don't understand why the shape gets lost.
# x.set_shape([None, num_dims])
return t(x)
bijectors.append(
tfb.Invert(
tfb.MaskedAutoregressiveFlow(shift_and_log_scale_fn=iaf_template)))
bijectors.append(swap)
# Drop the last swap.
bijectors = bijectors[:-1]
if learn_scale:
scale = tf.nn.softplus(
tf.Variable(tfp.math.softplus_inverse(scale), name="isp_global_scale"))
bijectors.append(tfb.Scale(scale=scale))
bijector = tfb.Chain(bijectors)
# Construct the variables
_ = bijector.forward(tf.zeros([1, num_dims]))
return bijector
@utils.register_namedtuple
class TargetSpec(NamedTuple):
name: Any
num_dims: Any
x_min: Any
x_max: Any
y_min: Any
y_max: Any
stats: Any
bijector: Any
transforms: Any
@gin.configurable("target_spec")
def GetTargetSpec(
name,
**kwargs):
target_density = utils.VectorTargetDensity(getattr(targets, name)())
num_dims = target_density.event_shape.num_elements()
target = utils.LogProbDist(num_dims=num_dims, log_prob_fn=target_density)
spec = TargetSpec(
name=name,
num_dims=num_dims,
x_min=0.10,
x_max=0.15,
y_min=0.10,
y_max=0.15,
stats={
k + "_mean": v.ground_truth_mean.astype(np.float32)
for k, v in target_density.expectations.items()
if v.ground_truth_mean is not None
},
transforms=list(target_density.expectations.items()),
bijector=target_density.constraining_bijectors,
)
return target, spec._replace(**kwargs)
@utils.register_namedtuple
class MCMCOutputs(NamedTuple):
x_chain: Any = ()
xcv_chain: Any = ()
xa_chain: Any = ()
xcva_chain: Any = ()
p_accept: Any = ()
p_accept_cv: Any = ()
is_accepted: Any = ()
is_accepted_cv: Any = ()
is_accepted_a: Any = ()
log_accept_ratio: Any = ()
num_leapfrog_steps: Any = ()
step_size: Any = ()
extra: Any = ()
def GetIntegrator(integrator, step_size, num_steps, target_log_prob_fn):
integrators = {
"leapfrog": (fun_mc.leapfrog_step, 1),
"ruth4": (fun_mc.ruth4_step, 3),
"blanes_3_stage": (fun_mc.blanes_3_stage_step, 3),
"blanes_4_stage": (fun_mc.blanes_4_stage_step, 5),
}
integrator_step_fn, leapfrog_multiplier = integrators[integrator]
kinetic_energy_fn = fun_mc.make_gaussian_kinetic_energy_fn(1)
integrator_fn = lambda state: fun_mc.hamiltonian_integrator( # pylint: disable=g-long-lambda
state,
num_steps=num_steps,
integrator_step_fn=lambda state: integrator_step_fn( # pylint: disable=g-long-lambda
state,
step_size=step_size,
target_log_prob_fn=target_log_prob_fn,
kinetic_energy_fn=kinetic_energy_fn),
kinetic_energy_fn=kinetic_energy_fn)
return integrator_fn, leapfrog_multiplier
@gin.configurable("cva_neutra")
def MakeCVANeuTra(target,
q,
batch_size=32,
num_steps=100,
num_leapfrog_steps=2,
step_size=0.1,
integrator="leapfrog",
x_init=None):
if x_init is None:
x_init = q.sample(batch_size)
(transformed_log_prob_fn,
z_init) = fun_mc.transform_log_prob_fn(lambda x: (target.log_prob(x), ()),
q.bijector, x_init)
def joint_log_prob_fn(z_zcv):
# N.B. z is concatenated real + antithetic chain.
z, zcv = tf.split(z_zcv, [batch_size * 2, batch_size], axis=0)
lpz, (x, _) = transformed_log_prob_fn(z)
lpzcv = q.distribution.log_prob(zcv)
xcv = q.bijector.forward(zcv)
x_xcv = tf.concat([x, xcv], axis=0)
return tf.concat([lpz, lpzcv], axis=0), x_xcv
integrator, leapfrog_multiplier = GetIntegrator(integrator, step_size,
num_leapfrog_steps,
joint_log_prob_fn)
def transition_operator(hmc_state):
momentum = tf.random.normal(tf.shape(x_init))
log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(x_init)[:-1]))
# Share the momentum for everything, but negate it for antithetic chain.
# Share the log_uniform between z and zcv as is.
momentum = tf.concat([momentum, -momentum, momentum], axis=0)
log_uniform = tf.concat([log_uniform] * 3, axis=0)
return fun_mc.hamiltonian_monte_carlo_step(
hmc_state,
target_log_prob_fn=joint_log_prob_fn,
momentum=momentum,
log_uniform=log_uniform,
integrator_fn=integrator)
def trace_fn(state, extra):
x_xcv = state.state_extra
zcva = -state.state[-batch_size:]
return (x_xcv, zcva, extra.log_accept_ratio, extra.is_accepted)
(_, (x_xcv_chain, zcva_chain, log_accept_ratio,
is_accepted)) = fun_mc.trace(
state=fun_mc.hamiltonian_monte_carlo_init(
tf.concat([z_init, -z_init, z_init], axis=0), joint_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(
tf.cast(is_accepted[:, :2 * batch_size], tf.float32))
p_accept_cv = tf.reduce_mean(
tf.cast(is_accepted[:, -batch_size:], tf.float32))
is_accepted_a = is_accepted[:, batch_size:2 * batch_size]
is_accepted_cv = is_accepted[:, -batch_size:]
is_accepted = is_accepted[:, :batch_size]
x_chain, xa_chain, xcv_chain = tf.split(x_xcv_chain, 3, axis=1)
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
xa_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], xa_chain], 0))
xcv_chain = tf.stop_gradient(
tf.concat([x_init[tf.newaxis, Ellipsis], xcv_chain], 0))
zcva_chain = tf.concat([-z_init[tf.newaxis, Ellipsis], zcva_chain], 0)
xcva_chain = tf.stop_gradient(q.bijector.forward(zcva_chain))
return MCMCOutputs(
x_chain=x_chain,
xcv_chain=xcv_chain,
xa_chain=xa_chain,
xcva_chain=xcva_chain,
p_accept=p_accept,
p_accept_cv=p_accept_cv,
is_accepted=is_accepted,
is_accepted_cv=is_accepted_cv,
is_accepted_a=is_accepted_a,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=num_leapfrog_steps * leapfrog_multiplier)
@gin.configurable("a_neutra")
def MakeANeuTra(target,
q,
batch_size=32,
num_steps=100,
num_leapfrog_steps=2,
step_size=0.1,
integrator="leapfrog",
x_init=None):
if x_init is None:
x_init = q.sample(batch_size)
(transformed_log_prob_fn,
z_init) = fun_mc.transform_log_prob_fn(lambda x: (target.log_prob(x), ()),
q.bijector, x_init)
integrator, leapfrog_multiplier = GetIntegrator(integrator, step_size,
num_leapfrog_steps,
transformed_log_prob_fn)
def transition_operator(hmc_state):
momentum = tf.random.normal(tf.shape(x_init))
log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(x_init)[:-1]))
# Share the momentum for everything, but negate it for antithetic chain.
momentum = tf.concat([momentum, -momentum], axis=0)
log_uniform = tf.concat([log_uniform] * 2, axis=0)
return fun_mc.hamiltonian_monte_carlo_step(
hmc_state,
target_log_prob_fn=transformed_log_prob_fn,
momentum=momentum,
log_uniform=log_uniform,
integrator_fn=integrator)
def trace_fn(state, extra):
x_xa = state.state_extra[0]
return (x_xa, extra.log_accept_ratio, extra.is_accepted)
(_, (x_xa_chain, log_accept_ratio, is_accepted)) = fun_mc.trace(
state=fun_mc.hamiltonian_monte_carlo_init(
tf.concat([z_init, -z_init], axis=0), transformed_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted, tf.float32))
is_accepted_a = is_accepted[:, -batch_size:]
is_accepted = is_accepted[:, :batch_size]
x_chain, xa_chain = tf.split(x_xa_chain, 2, axis=1)
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
xa_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], xa_chain], 0))
return MCMCOutputs(
x_chain=x_chain,
xa_chain=xa_chain,
p_accept=p_accept,
is_accepted=is_accepted,
is_accepted_a=is_accepted_a,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=num_leapfrog_steps * leapfrog_multiplier)
@gin.configurable("cv_neutra")
def MakeCVNeuTra(target,
q,
batch_size=32,
num_steps=100,
num_leapfrog_steps=2,
step_size=0.1,
integrator="leapfrog",
x_init=None):
if x_init is None:
x_init = q.sample(batch_size)
(transformed_log_prob_fn,
z_init) = fun_mc.transform_log_prob_fn(lambda x: (target.log_prob(x), ()),
q.bijector, x_init)
def joint_log_prob_fn(z_zcv):
z, zcv = tf.split(z_zcv, 2, axis=0)
lpz, (x, _) = transformed_log_prob_fn(z)
lpzcv = q.distribution.log_prob(zcv)
xcv = q.bijector.forward(zcv)
x_xcv = tf.concat([x, xcv], axis=0)
return tf.concat([lpz, lpzcv], axis=0), x_xcv
integrator, leapfrog_multiplier = GetIntegrator(integrator, step_size,
num_leapfrog_steps,
joint_log_prob_fn)
def transition_operator(hmc_state):
momentum = tf.random.normal(tf.shape(x_init))
momentum_cv = momentum
log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(x_init)[:-1]))
# Share the momentum and log_uniform between z and zcv
momentum = tf.concat([momentum, momentum_cv], axis=0)
log_uniform = tf.concat([log_uniform] * 2, axis=0)
return fun_mc.hamiltonian_monte_carlo_step(
hmc_state,
target_log_prob_fn=joint_log_prob_fn,
momentum=momentum,
log_uniform=log_uniform,
integrator_fn=integrator)
def trace_fn(state, extra):
x, xcv = tf.split(state.state_extra, 2, axis=0)
return (x, xcv, extra.log_accept_ratio, extra.is_accepted)
(_, (x_chain, xcv_chain, log_accept_ratio, is_accepted)) = fun_mc.trace(
state=fun_mc.hamiltonian_monte_carlo_init(
tf.concat([z_init] * 2, axis=0), joint_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted[:, :batch_size], tf.float32))
p_accept_cv = tf.reduce_mean(
tf.cast(is_accepted[:, -batch_size:], tf.float32))
is_accepted_cv = is_accepted[:, -batch_size:]
is_accepted = is_accepted[:, :batch_size]
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
xcv_chain = tf.stop_gradient(
tf.concat([x_init[tf.newaxis, Ellipsis], xcv_chain], 0))
return MCMCOutputs(
x_chain=x_chain,
xcv_chain=xcv_chain,
p_accept=p_accept,
p_accept_cv=p_accept_cv,
is_accepted=is_accepted,
is_accepted_cv=is_accepted_cv,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=num_leapfrog_steps * leapfrog_multiplier)
@gin.configurable("neutra")
def MakeNeuTra(target,
q,
batch_size=32,
num_steps=100,
num_leapfrog_steps=2,
step_size=0.1,
integrator="leapfrog",
x_init=None):
if x_init is None:
x_init = q.sample(batch_size)
(transformed_log_prob_fn,
z_init) = fun_mc.transform_log_prob_fn(lambda x: (target.log_prob(x), ()),
q.bijector, x_init)
integrator, leapfrog_multiplier = GetIntegrator(integrator, step_size,
num_leapfrog_steps,
transformed_log_prob_fn)
def transition_operator(hmc_state):
return fun_mc.hamiltonian_monte_carlo_step(
hmc_state,
target_log_prob_fn=transformed_log_prob_fn,
integrator_fn=integrator)
def trace_fn(state, extra):
return (state.state_extra[0], extra.log_accept_ratio, extra.is_accepted)
(_, (x_chain, log_accept_ratio, is_accepted)) = fun_mc.trace(
state=fun_mc.hamiltonian_monte_carlo_init(z_init,
transformed_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted, tf.float32))
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
return MCMCOutputs(
x_chain=x_chain,
p_accept=p_accept,
is_accepted=is_accepted,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=num_leapfrog_steps * leapfrog_multiplier)
@gin.configurable("neutra_rwm")
def MakeNeuTraRWM(target,
q,
batch_size=32,
num_steps=100,
step_size=0.1,
x_init=None,
**_):
if x_init is None:
x_init = q.sample(batch_size)
(transformed_log_prob_fn,
z_init) = fun_mc.transform_log_prob_fn(lambda x: (target.log_prob(x), ()),
q.bijector, x_init)
def proposal_fn(x, seed):
return x + step_size * tf.random.normal(x.shape, seed=seed), ((), 0.)
def transition_operator(rwm_state):
return fun_mc.random_walk_metropolis_step(
rwm_state,
target_log_prob_fn=transformed_log_prob_fn,
proposal_fn=proposal_fn)
def trace_fn(state, extra):
return (state.state_extra[0], extra.log_accept_ratio, extra.is_accepted)
(_, (x_chain, log_accept_ratio, is_accepted)) = fun_mc.trace(
state=fun_mc.random_walk_metropolis_init(z_init,
transformed_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted, tf.float32))
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
return MCMCOutputs(
x_chain=x_chain,
p_accept=p_accept,
is_accepted=is_accepted,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=1)
@gin.configurable("cv_neutra_rwm")
def MakeCVNeuTraRWM(target,
q,
batch_size=32,
num_steps=100,
step_size=0.1,
x_init=None,
**_):
if x_init is None:
x_init = q.sample(batch_size)
(transformed_log_prob_fn,
z_init) = fun_mc.transform_log_prob_fn(lambda x: (target.log_prob(x), ()),
q.bijector, x_init)
def joint_log_prob_fn(z_zcv):
z, zcv = tf.split(z_zcv, 2, axis=0)
lpz, (x, _) = transformed_log_prob_fn(z)
lpzcv = q.distribution.log_prob(zcv)
xcv = q.bijector.forward(zcv)
x_xcv = tf.concat([x, xcv], axis=0)
return tf.concat([lpz, lpzcv], axis=0), x_xcv
def transition_operator(rwm_state):
log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(x_init)[:-1]))
# Share the log_uniform and proposal between z and zcv
log_uniform = tf.concat([log_uniform] * 2, axis=0)
def proposal_fn(x, seed):
proposal = tf.random.normal(tf.shape(x_init), seed=seed)
proposal = tf.concat([proposal] * 2, axis=0)
return x + step_size * proposal, ((), 0.)
return fun_mc.random_walk_metropolis_step(
rwm_state,
target_log_prob_fn=joint_log_prob_fn,
proposal_fn=proposal_fn,
log_uniform=log_uniform)
def trace_fn(state, extra):
x, xcv = tf.split(state.state_extra, 2, axis=0)
return (x, xcv, extra.log_accept_ratio, extra.is_accepted)
(_, (x_chain, xcv_chain, log_accept_ratio, is_accepted)) = fun_mc.trace(
state=fun_mc.random_walk_metropolis_init(
tf.concat([z_init] * 2, axis=0), joint_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted[:, :batch_size], tf.float32))
p_accept_cv = tf.reduce_mean(
tf.cast(is_accepted[:, -batch_size:], tf.float32))
is_accepted_cv = is_accepted[:, -batch_size:]
is_accepted = is_accepted[:, :batch_size]
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
xcv_chain = tf.stop_gradient(
tf.concat([x_init[tf.newaxis, Ellipsis], xcv_chain], 0))
return MCMCOutputs(
x_chain=x_chain,
xcv_chain=xcv_chain,
p_accept=p_accept,
p_accept_cv=p_accept_cv,
is_accepted=is_accepted,
is_accepted_cv=is_accepted_cv,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=1)
@gin.configurable("a_neutra_rwm")
def MakeANeuTraRWM(target,
q,
batch_size=32,
num_steps=100,
step_size=0.1,
x_init=None,
**_):
if x_init is None:
x_init = q.sample(batch_size)
(transformed_log_prob_fn,
z_init) = fun_mc.transform_log_prob_fn(lambda x: (target.log_prob(x), ()),
q.bijector, x_init)
def transition_operator(rwm_state):
log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(x_init)[:-1]))
# Share the log_uniform and proposal between z and zcv
log_uniform = tf.concat([log_uniform] * 2, axis=0)
def proposal_fn(x, seed):
proposal = tf.random.normal(tf.shape(x_init), seed=seed)
proposal = tf.concat([proposal, -proposal], axis=0)
return x + step_size * proposal, ((), 0.)
return fun_mc.random_walk_metropolis_step(
rwm_state,
target_log_prob_fn=transformed_log_prob_fn,
proposal_fn=proposal_fn,
log_uniform=log_uniform,
)
def trace_fn(state, extra):
x_xa = state.state_extra[0]
return (x_xa, extra.log_accept_ratio, extra.is_accepted)
(_, (x_xa_chain, log_accept_ratio, is_accepted)) = fun_mc.trace(
state=fun_mc.random_walk_metropolis_init(
tf.concat([z_init, -z_init], axis=0), transformed_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted, tf.float32))
is_accepted_a = is_accepted[:, -batch_size:]
is_accepted = is_accepted[:, :batch_size]
x_chain, xa_chain = tf.split(x_xa_chain, 2, axis=1)
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
xa_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], xa_chain], 0))
return MCMCOutputs(
x_chain=x_chain,
xa_chain=xa_chain,
p_accept=p_accept,
is_accepted=is_accepted,
is_accepted_a=is_accepted_a,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=1)
@gin.configurable("cva_neutra_rwm")
def MakeCVANeuTraRWM(target,
q,
batch_size=32,
num_steps=100,
step_size=0.1,
x_init=None):
if x_init is None:
x_init = q.sample(batch_size)
(transformed_log_prob_fn,
z_init) = fun_mc.transform_log_prob_fn(lambda x: (target.log_prob(x), ()),
q.bijector, x_init)
def joint_log_prob_fn(z_zcv):
# N.B. z is concatenated real + antithetic chain.
z, zcv = tf.split(z_zcv, [batch_size * 2, batch_size], axis=0)
lpz, (x, _) = transformed_log_prob_fn(z)
lpzcv = q.distribution.log_prob(zcv)
xcv = q.bijector.forward(zcv)
x_xcv = tf.concat([x, xcv], axis=0)
return tf.concat([lpz, lpzcv], axis=0), x_xcv
def transition_operator(rwm_state):
log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(x_init)[:-1]))
# Share the log_uniform between z and zcv as is.
log_uniform = tf.concat([log_uniform] * 3, axis=0)
# Share the proposal for everything, but negate it for antithetic chain.
def proposal_fn(x, seed):
proposal = tf.random.normal(tf.shape(x_init), seed=seed)
proposal = tf.concat([proposal, -proposal, proposal], axis=0)
return x + step_size * proposal, ((), 0.)
return fun_mc.random_walk_metropolis_step(
rwm_state,
target_log_prob_fn=joint_log_prob_fn,
proposal_fn=proposal_fn,
log_uniform=log_uniform)
def trace_fn(state, extra):
x_xcv = state.state_extra
zcva = -state.state[-batch_size:]
return (x_xcv, zcva, extra.log_accept_ratio, extra.is_accepted)
(_, (x_xcv_chain, zcva_chain, log_accept_ratio,
is_accepted)) = fun_mc.trace(
state=fun_mc.random_walk_metropolis_init(
tf.concat([z_init, -z_init, z_init], axis=0), joint_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(
tf.cast(is_accepted[:, :2 * batch_size], tf.float32))
p_accept_cv = tf.reduce_mean(
tf.cast(is_accepted[:, -batch_size:], tf.float32))
is_accepted_a = is_accepted[:, batch_size:2 * batch_size]
is_accepted_cv = is_accepted[:, -batch_size:]
is_accepted = is_accepted[:, :batch_size]
x_chain, xa_chain, xcv_chain = tf.split(x_xcv_chain, 3, axis=1)
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
xa_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], xa_chain], 0))
xcv_chain = tf.stop_gradient(
tf.concat([x_init[tf.newaxis, Ellipsis], xcv_chain], 0))
zcva_chain = tf.concat([-z_init[tf.newaxis, Ellipsis], zcva_chain], 0)
xcva_chain = tf.stop_gradient(q.bijector.forward(zcva_chain))
return MCMCOutputs(
x_chain=x_chain,
xcv_chain=xcv_chain,
xa_chain=xa_chain,
xcva_chain=xcva_chain,
p_accept=p_accept,
p_accept_cv=p_accept_cv,
is_accepted=is_accepted,
is_accepted_cv=is_accepted_cv,
is_accepted_a=is_accepted_a,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=1)
@gin.configurable("cv_hmc")
def MakeCVHMC(target,
q,
batch_size=32,
num_steps=100,
num_leapfrog_steps=2,
step_size=0.1,
integrator="leapfrog",
x_init=None):
if x_init is None:
x_init = q.sample(batch_size)
def joint_log_prob_fn(x_xcv):
x, xcv = tf.split(x_xcv, 2, axis=0)
lpx = target.log_prob(x)
lpxcv = q.log_prob(xcv)
return tf.concat([lpx, lpxcv], axis=0), ()
integrator, leapfrog_multiplier = GetIntegrator(integrator, step_size,
num_leapfrog_steps,
joint_log_prob_fn)
def transition_operator(hmc_state):
momentum = tf.random.normal(tf.shape(x_init))
log_uniform = tf.math.log(tf.random.uniform(shape=tf.shape(x_init)[:-1]))
# Share the momentum and log_uniform between z and zcv
momentum = tf.concat([momentum] * 2, axis=0)
log_uniform = tf.concat([log_uniform] * 2, axis=0)
return fun_mc.hamiltonian_monte_carlo_step(
hmc_state,
target_log_prob_fn=joint_log_prob_fn,
momentum=momentum,
log_uniform=log_uniform,
integrator_fn=integrator)
def trace_fn(state, extra):
x, xcv = tf.split(state.state, 2, axis=0)
return (x, xcv, extra.log_accept_ratio, extra.is_accepted)
(_, (x_chain, xcv_chain, log_accept_ratio, is_accepted)) = fun_mc.trace(
state=fun_mc.hamiltonian_monte_carlo_init(
tf.concat([x_init] * 2, axis=0), joint_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted[:, :batch_size], tf.float32))
p_accept_cv = tf.reduce_mean(
tf.cast(is_accepted[:, -batch_size:], tf.float32))
is_accepted_cv = is_accepted[:, -batch_size:]
is_accepted = is_accepted[:, :batch_size]
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
xcv_chain = tf.stop_gradient(
tf.concat([x_init[tf.newaxis, Ellipsis], xcv_chain], 0))
return MCMCOutputs(
x_chain=x_chain,
xcv_chain=xcv_chain,
p_accept=p_accept,
p_accept_cv=p_accept_cv,
is_accepted=is_accepted,
is_accepted_cv=is_accepted_cv,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=num_leapfrog_steps * leapfrog_multiplier)
@gin.configurable("hmc")
def MakeHMC(target,
q,
batch_size=32,
num_steps=100,
num_leapfrog_steps=2,
step_size=0.1,
integrator="leapfrog",
x_init=None):
if x_init is None:
x_init = q.sample(batch_size)
def joint_log_prob_fn(x):
return target.log_prob(x), ()
integrator, leapfrog_multiplier = GetIntegrator(integrator, step_size,
num_leapfrog_steps,
joint_log_prob_fn)
def transition_operator(hmc_state):
return fun_mc.hamiltonian_monte_carlo_step(
hmc_state,
target_log_prob_fn=joint_log_prob_fn,
integrator_fn=integrator)
def trace_fn(state, extra):
return (state.state, extra.log_accept_ratio, extra.is_accepted)
(_, (x_chain, log_accept_ratio, is_accepted)) = fun_mc.trace(
state=fun_mc.hamiltonian_monte_carlo_init(x_init, joint_log_prob_fn),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted, tf.float32))
x_chain = tf.stop_gradient(tf.concat([x_init[tf.newaxis, Ellipsis], x_chain], 0))
return MCMCOutputs(
x_chain=x_chain,
p_accept=p_accept,
is_accepted=is_accepted,
log_accept_ratio=log_accept_ratio,
num_leapfrog_steps=num_leapfrog_steps * leapfrog_multiplier)
@utils.register_namedtuple
class ChainLossState(NamedTuple):
z_state: Any
step_size: Any
@utils.register_namedtuple
class ChainLossOutputs(NamedTuple):
x_fin: Any
loss: Any
p_accept: Any
@gin.configurable("chain_loss")
def ChainLoss(chain_loss_state,
target,
q,
batch_size=32,
step_size=0.1,
trajectory_length=1.,
num_steps=1,
target_accept_prob=0.9):
if chain_loss_state is None:
x_init = q.sample(batch_size)
z_init = q.bijector.inverse(x_init)
chain_loss_state = ChainLossState(
z_state=z_init, step_size=tf.convert_to_tensor(step_size, tf.float32))
transformed_log_prob_fn = fun_mc.transform_log_prob_fn(
lambda x: (target.log_prob(x), ()), q.bijector)
def transition_operator(hmc_state):
num_leapfrog_steps = tf.cast(
tf.math.ceil(trajectory_length / chain_loss_state.step_size), tf.int32)
return fun_mc.hamiltonian_monte_carlo_step(
hmc_state,
target_log_prob_fn=transformed_log_prob_fn,
step_size=chain_loss_state.step_size,
num_integrator_steps=num_leapfrog_steps)
def trace_fn(_state, extra):
return (extra.log_accept_ratio, extra.is_accepted)
(final_state, (_, is_accepted)) = fun_mc.trace(
state=fun_mc.HamiltonianMonteCarloState(
state=chain_loss_state.z_state,
state_grads=None,
target_log_prob=None,
state_extra=None,
),
fn=transition_operator,
num_steps=num_steps,
trace_fn=trace_fn)
p_accept = tf.reduce_mean(tf.cast(is_accepted, tf.float32))
step_size = fun_mc.sign_adaptation(
control=chain_loss_state.step_size,
output=p_accept,
set_point=target_accept_prob,
adaptation_rate=0.01)
# step_size = chain_loss_state.step_size
chain_loss_state = chain_loss_state._replace(
z_state=final_state.state, step_size=step_size)
x_fin = q.bijector.forward(final_state.state)
x_fin = tf.stop_gradient(x_fin)
loss = -tf.reduce_mean(q.log_prob(x_fin))
return chain_loss_state, ChainLossOutputs(
x_fin=x_fin, loss=loss, p_accept=p_accept)
@utils.register_namedtuple
class QStats(NamedTuple):
bias: Any
def ComputeQStats(q_samples, target_mean):
return QStats(bias=target_mean - tf.reduce_mean(q_samples, 0))
@utils.register_namedtuple
class ChainStats(NamedTuple):
bias: Any
inst_bias: Any
variance: Any
inst_variance: Any
error_sq: Any
ess: Any
ess_per_grad: Any
rhat: Any
autocorr: Any
warmupped_bias: Any
warmupped_variance: Any
overall_variance: Any
per_chain_variance: Any
@gin.configurable("chain_stats")
@utils.compile
def ComputeChainStats(chain,
target_mean,
num_leapfrog_steps,
compute_stats_over_time=False,
target_variance=None):
# Chain is [num_steps, batch, num_dims]
num_steps = tf.shape(chain)[0]
batch = tf.shape(chain)[1]
if compute_stats_over_time:
counts = tf.cast(tf.range(1, num_steps + 1), tf.float32)
chain_mean = tf.cumsum(chain, 0) / counts[:, tf.newaxis, tf.newaxis]
bias = target_mean - tf.reduce_mean(chain_mean, 1)
variance = tf.math.reduce_variance(chain_mean, 1)
inst_bias = target_mean - tf.reduce_mean(chain, 1)
# XXX: This looks wrong, why are we using target_mean here?
inst_variance = tf.reduce_sum(tf.square(target_mean - chain), 1) / tf.cast(
batch - 1, tf.float32)
def reducer(_, idx):
chain_mean = tf.reduce_mean(chain[idx // 2:idx], 0)
bias = tf.reduce_mean(target_mean - chain_mean, 0)
variance = tf.math.reduce_variance(chain_mean, 0)
return bias, variance
indices = 1 + tf.range(num_steps)
warmupped_bias, warmupped_variance = tf.scan(
reducer, indices, initializer=(chain[0, 0], chain[0, 0]))
half_steps = num_steps // 2
half_chain = chain[half_steps:]
error_sq = tf.reduce_mean(
tf.square(tf.reduce_mean(half_chain, 0) - target_mean), 0)
if target_variance is None:
target_variance = tf.math.reduce_variance(half_chain, [0, 1])
ess = utils.EffectiveSampleSize(
half_chain / tf.sqrt(target_variance), use_geyer=True,
normalize=False) / tf.cast(half_steps, tf.float32)
ess_per_grad = ess / tf.cast(num_leapfrog_steps, tf.float32)
rhat = tfp.mcmc.potential_scale_reduction(half_chain)
autocorr = utils.SanitizedAutoCorrelationMean(
half_chain, axis=0, reduce_axis=1, max_lags=300)
# Brute ESS is computed as the ratio of these two, NB these are not normalized
# by chain length.
overall_variance = tf.math.reduce_variance(half_chain, [0, 1])
per_chain_variance = tf.math.reduce_variance(tf.reduce_mean(half_chain, 0), 0)
return ChainStats(
bias=bias if compute_stats_over_time else (),
variance=variance if compute_stats_over_time else (),
error_sq=error_sq,
inst_bias=inst_bias if compute_stats_over_time else (),
inst_variance=inst_variance if compute_stats_over_time else (),
ess=ess,
ess_per_grad=ess_per_grad,
rhat=rhat,
warmupped_bias=warmupped_bias if compute_stats_over_time else (),
warmupped_variance=warmupped_variance if compute_stats_over_time else (),
autocorr=autocorr,
overall_variance=overall_variance,
per_chain_variance=per_chain_variance,
)
@utils.register_namedtuple
class VRChainOutputs(NamedTuple):
vr_chain: Any
cv_beta: Any
cv_rho: Any
def ChainCov(xs, ys):
n = tf.shape(xs)[0]
mx = tf.reduce_mean(xs, 0)
my = tf.reduce_mean(ys, 0)
return tf.einsum("abi,abj->ij", xs - mx, ys - my) / tf.cast(n - 1, tf.float32)
def ChainCovDiag(xs, ys):
n = tf.shape(xs)[0]
mx = tf.reduce_mean(xs, 0)
my = tf.reduce_mean(ys, 0)
return tf.einsum("abi,abi->i", xs - mx, ys - my) / tf.cast(n - 1, tf.float32)
def ChainCorr(xs, ys):
cov_ys_ys = ChainCovDiag(ys, ys)
cov_xs_ys = ChainCovDiag(xs, ys)
cov_xs_xs = ChainCovDiag(xs, xs)
return cov_xs_ys / tf.sqrt(cov_ys_ys * cov_xs_xs)
@utils.compile
def GetCVBeta(chain, cv_chain):
num_steps = tf.shape(chain)[0]
half_steps = num_steps // 2
half_chain = chain[half_steps:]
half_cv_chain = cv_chain[half_steps:]
cov_cv_chain = ChainCov(half_cv_chain, half_chain)
cov_cv_cv = ChainCov(half_cv_chain, half_cv_chain)
cov_cv_cv += tf.eye(cov_cv_cv.shape[-1]) * 1e-6
return tf.linalg.solve(cov_cv_cv, cov_cv_chain)
@utils.compile
def GetCVBetaVec(chain, cv_chain):
num_steps = tf.shape(chain)[0]
half_steps = num_steps // 2
half_chain = chain[half_steps:]
half_cv_chain = cv_chain[half_steps:]
cov_cv_chain = ChainCovDiag(half_cv_chain, half_chain)
cov_cv_cv = ChainCovDiag(half_cv_chain, half_cv_chain)
beta_vec = cov_cv_chain / cov_cv_cv
return beta_vec
@utils.compile
def GetVarianceReducedChain(chain, cv_chain, cv_mean, cv_beta):
num_steps = tf.shape(chain)[0]
half_steps = num_steps // 2
half_chain = chain[half_steps:]
half_cv_chain = cv_chain[half_steps:]
if cv_beta.shape.rank == 2:
vr_chain = chain - tf.einsum("abi,ij->abj", cv_chain - cv_mean, cv_beta)
else:
vr_chain = chain - (cv_chain - cv_mean) * cv_beta
cv_rho = ChainCorr(half_chain, half_cv_chain)
return VRChainOutputs(vr_chain=vr_chain, cv_beta=cv_beta, cv_rho=cv_rho)
@utils.compile
def GetMHDecoupleRate(is_accepted_1, is_accepted_2):
return tf.reduce_mean(tf.cast(is_accepted_1 != is_accepted_2, tf.float32))
@utils.compile
def GetMHAgreeAcceptRate(is_accepted_1, is_accepted_2):
return tf.reduce_mean(tf.cast(is_accepted_1 & is_accepted_2, tf.float32))
@utils.register_namedtuple
class MCMCStats(NamedTuple):
chain_stats: Any = ()
chain_stats_tune: Any = ()
chain_stats_cv: Any = ()
chain_stats_a: Any = ()
chain_stats_vr_cv: Any = ()
chain_stats_vr_a: Any = ()
chain_stats_vr_cva: Any = ()
chain_stats_vr_cv_one: Any = ()
chain_stats_vr_cva_one: Any = ()
chain_stats_vr_cv_vec: Any = ()
chain_stats_vr_cva_vec: Any = ()
vr_outputs_cva: Any = ()
vr_outputs_cva_one: Any = ()
vr_outputs_cva_vec: Any = ()
vr_outputs_cv: Any = ()
vr_outputs_cv_one: Any = ()
vr_outputs_cv_vec: Any = ()
p_accept: Any = ()
p_accept_cv: Any = ()
p_mh_agree_accept_cv: Any = ()
p_mh_agree_accept_a: Any = ()
p_mh_decouple_cv: Any = ()
p_mh_decouple_a: Any = ()
a_corr: Any = ()
def AverageStats(stats):
def classify(path):
if "ess" in path:
return lambda x: 1. / np.mean(1. / np.array(x), 0)
else:
return lambda x: np.mean(x, 0)
def to_numpy(t):
if isinstance(t, tf.Tensor):
return t.numpy()
else:
return t
def is_sentinel(x):
return isinstance(x, tuple) and not x
stats = tf.nest.map_structure(to_numpy, stats)
avg_type = [
classify("".join(map(str, path))) # pylint: disable=g-complex-comprehension
for path, _ in nest.flatten_with_tuple_paths(stats[0])
]
flat_stats = [tf.nest.flatten(r) for r in stats]
trans_stats = zip(*flat_stats)
trans_mean_stats = [
r if is_sentinel(r[0]) else avg(r)
for avg, r in zip(avg_type, trans_stats)
]
mean_stats = tf.nest.pack_sequence_as(stats[0], trans_mean_stats)
return mean_stats
@utils.register_namedtuple
class TuneOutputs(NamedTuple):
num_leapfrog_steps: Any
step_size: Any
@utils.register_namedtuple
class TuneObjective(NamedTuple):
objective: Any
step_size: Any
num_leapfrog_steps: Any
@utils.register_namedtuple
class BenchmarkOutputs(NamedTuple):
mcmc_secs_per_step: Any
q_secs_per_sample: Any
@gin.configurable("neutra_experiment")
class NeuTraExperiment(tf.Module):
def __init__( # pylint: disable=dangerous-default-value
self,
mcmc_type = "neutra",
bijector = "iaf",
log_dir="/tmp/neutra",
base_learning_rate=1e-3,
q_base_scale=1.,
loss="kl_qp",
learning_rate_schedule=[[6000, 1e-1]],
do_polyak=False,
polyak_start=0,
polyak_rate=0.999,
):
target, target_spec = GetTargetSpec() # pylint: disable=no-value-for-parameter
self._target = target
self.target_spec = target_spec
with gin.config_scope("train"):
train_target, train_target_spec = GetTargetSpec() # pylint: disable=no-value-for-parameter
self.train_target = train_target
self.train_target_spec = train_target_spec
if bijector == "rnvp":
bijector_fn = utils.Template(
"bijector", MakeRNVPBijectorFn, num_dims=self.target_spec.num_dims)
elif bijector == "iaf":
bijector_fn = utils.Template(
"bijector", MakeIAFBijectorFn, num_dims=self.target_spec.num_dims)
elif bijector == "affine":
bijector_fn = utils.Template(
"bijector", MakeAffineBijectorFn, num_dims=self.target_spec.num_dims)
elif bijector == "head_tail":
bijector_fn = utils.Template(
"bijector",
MakeHeadTailBijectorFn,
num_dims=self.target_spec.num_dims)
else:
bijector_fn = utils.Template("bijector",
lambda *args, **kwargs: tfb.Identity())
if self.train_target_spec.bijector is not None:
print("Using train target bijector")
# For var tracking.
self.base_bijector_fn = bijector_fn
bijector_fn = lambda train: tfb.Chain( # pylint: disable=g-long-lambda
[train_target_spec.bijector,
self.base_bijector_fn(train=train)])
self.bijector_fn = bijector_fn
self.q_base_scale = q_base_scale
# Training
self.base_learning_rate = base_learning_rate
self.learning_rate_schedule = learning_rate_schedule
self.loss = loss
self.mcmc_type = mcmc_type
# Construct the variables
self.bijector_fn(train=True)
self.InitTargetStats()
self.do_polyak = do_polyak
self.polyak_rate = polyak_rate
self.polyak_start = polyak_start
if self.do_polyak:
self.polyak_variables = []
for v in self.trainable_variables:
self.polyak_variables.append(
tf.Variable(v, name=v.name[:-2] + "_poly", trainable=False))
self.checkpoint = tf.train.Checkpoint(experiment=self)
self.log_dir = log_dir
self.global_step = tf.Variable(0, dtype=tf.int64, trainable=False)
@property
def target(self):
# Some bug with nested TF functions, need to re-construct the target
# distribution to break the cache.
return self._target.copy()
def UpdatePolyVars(self, rate=None):
if rate is None:
rate = self.polyak_rate
for pv, v in zip(self.polyak_variables, self.trainable_variables):
pv.assign(rate * pv + (1. - rate) * v)
return rate
def UpdateFromPolyVars(self):
for pv, v in zip(self.polyak_variables, self.trainable_variables):
v.assign(pv)
@utils.compile
def QStats(self, num_samples=16384 * 8):
q_stats = {}
q_samples = self.Q().sample(num_samples)
for name, f in self.functions:
q_stats[name] = ComputeQStats(f(q_samples), self.target_mean[name])
return q_stats
def Q(self, bijector=None):
if bijector is None:
bijector = self.bijector_fn(train=False)
q_base = tfd.Independent(
tfd.Normal(
loc=tf.zeros(self.target_spec.num_dims),
scale=self.q_base_scale * tf.ones(self.target_spec.num_dims)), 1)
return tfd.TransformedDistribution(q_base, bijector)
@utils.compile
def MCMC(self,
batch_size=4096,
test_num_steps=1000,
test_num_leapfrog_steps=2,
test_step_size=0.1,
**kwargs):
if self.mcmc_type == "hmc":
mcmc_fn = MakeHMC
elif self.mcmc_type == "cv_hmc":
mcmc_fn = MakeCVHMC
elif self.mcmc_type == "neutra":
mcmc_fn = MakeNeuTra
elif self.mcmc_type == "a_neutra":
mcmc_fn = MakeANeuTra
elif self.mcmc_type == "cv_neutra":
mcmc_fn = MakeCVNeuTra
elif self.mcmc_type == "cva_neutra":
mcmc_fn = MakeCVANeuTra
elif self.mcmc_type == "neutra_rwm":
mcmc_fn = MakeNeuTraRWM
elif self.mcmc_type == "a_neutra_rwm":
mcmc_fn = MakeANeuTraRWM
elif self.mcmc_type == "cv_neutra_rwm":
mcmc_fn = MakeCVNeuTraRWM
elif self.mcmc_type == "cva_neutra_rwm":
mcmc_fn = MakeCVANeuTraRWM
return mcmc_fn(
target=self.target,
q=self.Q(),
batch_size=batch_size,
num_steps=test_num_steps,
num_leapfrog_steps=test_num_leapfrog_steps,
step_size=test_step_size,
**kwargs,
)
def InitTargetStats(self, batch_size=16384 * 8):
target_samples = utils.compile(
lambda: self.target.sample(batch_size))()
def variance(x, mean_stat):
x -= self.target_mean[mean_stat]
return tf.square(x)
transforms = []
transforms.extend(self.target_spec.transforms)
self.functions = []
# This is because there is some horrid nonsense with lambdas and loops that
# I couldn't figure out... I hate Python.
def body(tname, transform):
def get_name(fname):
if tname is not None:
return "_".join([tname, fname])
return fname
def make_fn(f):
return lambda x: f(transform(x))
self.functions.append((get_name("mean"), make_fn(tf.identity)))
self.functions.append(
(get_name("var"), make_fn(lambda x: variance(x, get_name("mean")))))
for tname, transform in transforms:
body(tname, transform)
self.target_mean = {}
for name, f in self.functions:
if self.target_spec.stats is not None and name in self.target_spec.stats:
target_mean = tf.convert_to_tensor(self.target_spec.stats[name])
else:
target_mean = tf.reduce_mean(f(target_samples), 0)
self.target_mean[name] = target_mean
@gin.configurable("mcmc_stats")
def MCMCStats(self,
neutra_outputs,
return_vr_chains=False,
num_q_samples=8192,
compute_mat_beta=False,
num_beta_chains=None):
ret = MCMCStats(
chain_stats={},
chain_stats_cv={},
chain_stats_a={},
chain_stats_vr_cv={},
chain_stats_vr_a={},
chain_stats_vr_cva={},
chain_stats_vr_cv_one={},
chain_stats_vr_cva_one={},
chain_stats_vr_cv_vec={},
chain_stats_vr_cva_vec={},
vr_outputs_cva={},
vr_outputs_cva_vec={},
vr_outputs_cva_one={},
vr_outputs_cv={},
vr_outputs_cv_vec={},
vr_outputs_cv_one={},
p_accept=neutra_outputs.p_accept,
p_accept_cv=neutra_outputs.p_accept_cv,
a_corr={},
)
ret = ret._replace(chain_stats_tune=ret.chain_stats)
# TODO(siege): We should compute these only once...
q_means = {}
for name, f in self.functions:
q_means[name] = tf.reduce_mean(f(self.Q().sample(num_q_samples)), 0)
for name, f in self.functions:
fx_chain = f(neutra_outputs.x_chain)
half_steps = fx_chain.shape[0] // 2
target_variance = tf.math.reduce_variance(fx_chain[half_steps:], [0, 1])
ret.chain_stats[name] = ComputeChainStats(
fx_chain, self.target_mean[name], neutra_outputs.num_leapfrog_steps)
if self.mcmc_type in [
"cv_neutra", "cv_hmc", "cv_neutra_rwm"
]:
ret = ret._replace(
p_mh_decouple_cv=GetMHDecoupleRate(
neutra_outputs.is_accepted,
neutra_outputs.is_accepted_cv,
),
p_mh_agree_accept_cv=GetMHAgreeAcceptRate(
neutra_outputs.is_accepted,
neutra_outputs.is_accepted_cv,
),
chain_stats_tune=ret.chain_stats_vr_cv_vec,
)
fxcv_chain = f(neutra_outputs.xcv_chain)
ret.chain_stats_cv[name] = ComputeChainStats(
fxcv_chain,
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
if num_beta_chains is None:
fx_chain_beta = fx_chain
fxcv_chain_beta = fxcv_chain
else:
fx_chain_beta = fx_chain[:, :num_beta_chains]
fxcv_chain_beta = fxcv_chain[:, :num_beta_chains]
# TODO(siege): This introduces an annoying off-by-num_beta_chains
# shift. We should only do this when computing the stats...
fx_chain = fx_chain[:, num_beta_chains:]
fxcv_chain = fxcv_chain[:, num_beta_chains:]
cv_beta_vec = GetCVBetaVec(fx_chain_beta, fxcv_chain_beta)
cv_beta_one = tf.ones(cv_beta_vec.shape[-1])
vr_inputs = [
(ret.chain_stats_vr_cv_vec, ret.vr_outputs_cv_vec, cv_beta_vec),
(ret.chain_stats_vr_cv_one, ret.vr_outputs_cv_one, cv_beta_one),
]
fxcv_mean = q_means[name]
if compute_mat_beta:
cv_beta = GetCVBeta(fx_chain_beta, fxcv_chain_beta)
vr_inputs.append((ret.chain_stats_vr_cv, ret.vr_outputs_cv, cv_beta))
for chain_stats_vr_cv, vr_outputs_cv, cv_beta_val in vr_inputs:
vr_outputs_cv1 = GetVarianceReducedChain(fx_chain, fxcv_chain,
fxcv_mean, cv_beta_val)
if return_vr_chains:
vr_outputs_cv[name] = vr_outputs_cv1
else:
vr_outputs_cv[name] = vr_outputs_cv1._replace(vr_chain=())
chain_stats_vr_cv[name] = ComputeChainStats(
vr_outputs_cv1.vr_chain,
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
elif self.mcmc_type in ["cva_neutra", "cva_neutra_rwm"]:
ret = ret._replace(
p_mh_decouple_cv=GetMHDecoupleRate(neutra_outputs.is_accepted,
neutra_outputs.is_accepted_cv),
p_mh_agree_accept_cv=GetMHAgreeAcceptRate(
neutra_outputs.is_accepted,
neutra_outputs.is_accepted_cv,
),
p_mh_decouple_a=GetMHDecoupleRate(
neutra_outputs.is_accepted,
neutra_outputs.is_accepted_a,
),
p_mh_agree_accept_a=GetMHAgreeAcceptRate(
neutra_outputs.is_accepted,
neutra_outputs.is_accepted_a,
),
chain_stats_tune=ret.chain_stats_vr_cva_vec,
)
fxa_chain = f(neutra_outputs.xa_chain)
fxcv_chain = f(neutra_outputs.xcv_chain)
fxcva_chain = f(neutra_outputs.xcva_chain)
ret.a_corr[name] = ChainCorr(fx_chain, fxa_chain)
ret.chain_stats_a[name] = ComputeChainStats(
fxa_chain,
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
ret.chain_stats_cv[name] = ComputeChainStats(
fxcv_chain,
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
ret.chain_stats_vr_a[name] = ComputeChainStats(
0.5 * (fx_chain + fxa_chain),
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
if num_beta_chains is None:
fx_chain_beta = fx_chain
fxcv_chain_beta = fxcv_chain
fxa_chain_beta = fxa_chain
fxcva_chain_beta = fxcva_chain
else:
fx_chain_beta = fx_chain[:, :num_beta_chains]
fxcv_chain_beta = fxcv_chain[:, :num_beta_chains]
fxa_chain_beta = fxa_chain[:, :num_beta_chains]
fxcva_chain_beta = fxcva_chain[:, :num_beta_chains]
# TODO(siege): This introduces an annoying off-by-num_beta_chains
# shift. We should only do this when computing the stats...
fx_chain = fx_chain[:, num_beta_chains:]
fxcv_chain = fxcv_chain[:, num_beta_chains:]
fxa_chain = fxa_chain[:, num_beta_chains:]
fxcva_chain = fxcva_chain[:, num_beta_chains:]
fxcv_mean = q_means[name]
cv_beta_vec = GetCVBetaVec(fx_chain_beta, fxcv_chain_beta)
cv_beta_vec_a = GetCVBetaVec(fxa_chain_beta, fxcva_chain_beta)
cv_beta_one = tf.ones(cv_beta_vec.shape[-1])
vr_inputs = [
(ret.chain_stats_vr_cv_vec, ret.chain_stats_vr_cva_vec,
ret.vr_outputs_cv_vec, ret.vr_outputs_cva_vec, cv_beta_vec,
cv_beta_vec_a),
(ret.chain_stats_vr_cv_one, ret.chain_stats_vr_cva_one,
ret.vr_outputs_cv_one, ret.vr_outputs_cva_one, cv_beta_one,
cv_beta_one),
]
if compute_mat_beta:
cv_beta = GetCVBeta(fx_chain_beta, fxcv_chain_beta)
cv_beta_a = GetCVBeta(fxa_chain_beta, fxcva_chain_beta)
vr_inputs.append(
(ret.chain_stats_vr_cv, ret.chain_stats_vr_cva, ret.vr_outputs_cv,
ret.vr_outputs_cva, cv_beta, cv_beta_a))
for (chain_stats_vr_cv, chain_stats_vr_cva, vr_outputs_cv,
vr_outputs_cva, cv_beta_val, cv_beta_a_val) in vr_inputs:
vr_outputs_cv1 = GetVarianceReducedChain(fx_chain, fxcv_chain,
fxcv_mean, cv_beta_val)
vr_outputs_cv2 = GetVarianceReducedChain(fxa_chain, fxcva_chain,
fxcv_mean, cv_beta_a_val)
if return_vr_chains:
vr_outputs_cv[name] = vr_outputs_cv1
vr_outputs_cva[name] = vr_outputs_cv2
else:
vr_outputs_cv[name] = vr_outputs_cv1._replace(vr_chain=())
vr_outputs_cva[name] = vr_outputs_cv2._replace(vr_chain=())
chain_stats_vr_cv[name] = ComputeChainStats(
vr_outputs_cv1.vr_chain,
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
chain_stats_vr_cva[name] = ComputeChainStats(
0.5 * (vr_outputs_cv1.vr_chain + vr_outputs_cv2.vr_chain),
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
elif self.mcmc_type in ["a_neutra", "a_neutra_rwm"]:
ret = ret._replace(
p_mh_decouple_a=GetMHDecoupleRate(
neutra_outputs.is_accepted,
neutra_outputs.is_accepted_a,
),
p_mh_agree_accept_a=GetMHAgreeAcceptRate(
neutra_outputs.is_accepted,
neutra_outputs.is_accepted_a,
),
chain_stats_tune=ret.chain_stats_vr_a,
)
fxa_chain = f(neutra_outputs.xa_chain)
ret.a_corr[name] = ChainCorr(fx_chain, fxa_chain)
ret.chain_stats_a[name] = ComputeChainStats(
fxa_chain,
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
ret.chain_stats_vr_a[name] = ComputeChainStats(
0.5 * (fx_chain + fxa_chain),
self.target_mean[name],
neutra_outputs.num_leapfrog_steps,
target_variance=target_variance)
return ret
@utils.compile
def TrainLoss(self, batch_size=4096, step=None, state=()):
bijector = self.bijector_fn(train=True)
q_x_train = self.Q(bijector)
if self.loss == "kl_qp":
train_q_samples = q_x_train.sample(batch_size)
train_log_q_x = q_x_train.log_prob(train_q_samples)
kl_q_p = tf.reduce_mean(train_log_q_x -
self.target.log_prob(train_q_samples))
loss = kl_q_p
tf.summary.scalar("kl_qp", kl_q_p, step=step)
elif self.loss == "kl_pq":
state, out = ChainLoss(state, self.target, q_x_train, batch_size)
loss = out.loss
tf.summary.scalar("xent", loss, step=step)
tf.summary.scalar("step_size", state.step_size, step=step)
tf.summary.scalar("p_accept", out.p_accept, step=step)
return loss, state
@gin.configurable("train")
def Train(self,
num_steps,
summary_every=500,
batch_size=4096,
plot_callback=None):
times =
|
np.zeros(num_steps)
|
numpy.zeros
|
import pickle
from configparser import ConfigParser
from itertools import repeat
from pathlib import Path
import numpy as np
import pandas as pd
from keras import Sequential
from keras.layers import Embedding
from torchnlp.word_to_vector import GloVe
from spacy_encoder import SpacyEncoder
def get_data(data_frame):
data = []
for idx, row in data_frame.iterrows():
if idx == 0:
continue
sentiment = (int(row['sentiment']) - 1)
title = row['title']
review = row['review']
if title != title:
title = ""
if review != review:
review = ""
data.append((sentiment, title + " : " + review))
return data
def retrieve_texts(data):
return [tup[1] for tup in data]
def pad(max_len, idxs):
if len(idxs) > max_len:
return idxs[:max_len]
padding = max_len - len(idxs)
zeros = list(repeat(0, padding))
idxs = idxs + zeros
return idxs
def docs2idxs(corpus, max_len=-1, encoder=None):
if encoder is None:
encoder = SpacyEncoder(corpus)
indices = [encoder.encode(doc).numpy().tolist() for doc in corpus]
if max_len <= 0:
max_len = max([len(lst) for lst in indices])
indices = [pad(max_len, idxs) for idxs in indices]
return encoder, indices, max_len
def weights(encoder, vectors):
ws = np.zeros((encoder.vocab_size, vectors.dim))
for index, word in enumerate(encoder.vocab):
ws[index] = vectors[word]
return ws
config = ConfigParser()
config.read('config.INI')
train_csv = config.get('TRAINING', 'train_csv')
test_csv = config.get('TRAINING', 'test_csv')
amazon_training_csv = pd.read_csv(train_csv, header=None,
names=['sentiment', 'title', 'review'])
training_data = get_data(amazon_training_csv)
training_texts = retrieve_texts(training_data)
enc, training_indices, training_seq_len = docs2idxs(training_texts)
training_indices_and_sentiment = [(idxs, d[0]) for (idxs, d) in zip(training_indices, training_data)]
amazon_test_csv = pd.read_csv(test_csv, header=None,
names=['sentiment', 'title', 'review'])
test_data = get_data(amazon_test_csv)
test_texts = retrieve_texts(test_data)
_, test_indices, _ = docs2idxs(test_texts, max_len=training_seq_len, encoder=enc)
test_indices_and_sentiment = [(idxs, d[0]) for (idxs, d) in zip(test_indices, test_data)]
training_titles = set(amazon_training_csv['title'])
training_reviews = set(amazon_training_csv['review'])
for idx, item in amazon_test_csv.iterrows():
if item['review'] in training_reviews:
if item['title'] in training_titles:
raise AssertionError("Row w/ title {} redundant.".format(item['title']))
vecs = GloVe(cache=config.get('PREPARATION', 'word_vector_cache'))
embedding_weights = weights(enc, vecs)
embedding_model = Sequential()
embedding_model.add(Embedding(enc.vocab_size,
vecs.dim,
weights=[embedding_weights],
input_length=training_seq_len,
trainable=False))
embedding_model.compile('rmsprop', 'mse')
input_shape = (training_seq_len, vecs.dim, 1)
x_train_unshaped = [embedding_model.predict(np.array(sample[0]).reshape(1, -1)) for sample in
training_indices_and_sentiment] # shape n * (1 * seq_len * vector_dim)
x_test_unshaped = [embedding_model.predict(np.array(sample[0]).reshape(1, -1)) for sample in
test_indices_and_sentiment] # shape n * (1 * seq_len * vector_dim)
x_train = [sample.reshape(input_shape) for sample in x_train_unshaped]
x_train =
|
np.array(x_train)
|
numpy.array
|
import numpy as np
from argparse import ArgumentParser
from sklearn.datasets import load_breast_cancer, load_iris, load_boston, load_wine
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, roc_auc_score
from gp_lib.gp import ConstantMeanGP
from gp_lib.kernels import *
if __name__ == "__main__":
np.random.seed(123)
argparser = ArgumentParser()
argparser.add_argument("--lr", type=float, default=1.0)
argparser.add_argument("--r2", type=float, default=0.75)
argparser.add_argument("--tol", type=float, default=1e-3)
argparser.add_argument("--m", type=int, default=1000)
argparser.add_argument("--n", type=int, default=100)
argparser.add_argument("--verbose", action="store_true")
args = argparser.parse_args()
x = np.random.randn(args.m, args.n)
x = np.c_[x, np.ones(args.m)]
theta =
|
np.random.randn(args.n + 1)
|
numpy.random.randn
|
import numpy as np
import pandas as pd
from toolz.itertoolz import partition_all
def calculate_iq(
scattering_factors_df,
atom_distance_matrix_df,
qmin,
qmax,
qstep,
q_partition_size=500,
):
"""
Parameters
----------
scattering_factors_df: pandas.DataFrame
9 rows
atoms on columns
"""
# atom_element looks like
# ['O', 'Co', 'O', 'O', 'O', 'O', 'O', 'Co', 'Co',...]
atom_element = atom_distance_matrix_df.index
print("atom_element")
print(atom_element)
# set(atom_element) looks like {'O', 'Co'}
unique_elements = set(atom_element)
print(f"unique elements: {unique_elements}")
atom_distance_matrix = atom_distance_matrix_df.to_numpy()
# work with only the scattering_factors for elements
# in the atom distance matrix
# do a little extra work to keep the rows of
# reduced_scattering_factor_df in the same order as
# in scattering_factors_df
elements_of_interest = [
element
for element in scattering_factors_df.index
if element in atom_distance_matrix_df.index
]
reduced_scattering_factors_df = scattering_factors_df.loc[elements_of_interest]
reduced_scattering_factors = reduced_scattering_factors_df.to_numpy()
print(f"reduced_scattering_factors.shape: {reduced_scattering_factors.shape}")
# loop on q
q_range = np.arange(qmin, qmax, qstep)
# print(f"q_range: {q_range}")
print(f"q_range.shape: {q_range.shape}")
# how much memory are we looking at for all the q?
q_reduced_scattering_size = len(q_range) * np.product(
reduced_scattering_factors.shape
)
print(f"q_reduced_scattering_size: {q_reduced_scattering_size}")
# we need to expand the shape of q_range from (Q, ) to (Q, 1, 1)
# so that reduced_scattering_factors[:, 1:9:2] * qs_expanded
# has shape (E, 4) * (Q, 1, 1) -> (Q, E, 4)
# where E is the number of elements and Q is the number of qs
qs =
|
np.expand_dims(q_range, axis=(1, 2))
|
numpy.expand_dims
|
import numpy as np
import copy
import logging
from sklearn.neighbors import KDTree
# each action -> a lru_knn buffer
# alpha is for updating the internal reward i.e. count based reward
class LRU_KNN_PS(object):
def __init__(self, capacity, obs_shape, z_dim, env_name, action, num_actions=6, knn=4, debug=True, gamma=0.99, alpha=0.1,
beta=0.01):
self.obs =
|
np.empty((capacity,) + obs_shape, dtype=np.uint8)
|
numpy.empty
|
import h5py
import numpy as np
#######################################
#--- get information on data series ---#
#######################################
def get_attrs_from_dict(obj, meta):
def init_meta(p):
for key, value in p.items():
meta[key] = np.empty(nfiles, dtype=np.dtype(value[1]))
def get_attr(header, p):
try:
attr = p[1](header[p[0]])
except:
attr = 0
return attr
nfiles = len(meta['master'])
p = obj.attributes
init_meta(p)
for i, m in enumerate(meta.copy()['master']):
filename = obj.datdir + m
header = obj.get_header(filename)
for key, value in p.items():
meta[key][i] = get_attr(header, value)
def get_header_h5(*args, **kwargs):
return 0
def get_attrs_h5(obj, meta,):
def init_meta(p):
for key, value in p.items():
meta[key] = np.empty(nfiles, dtype=np.dtype(value[1]))
def get_attr(f, p, key):
if p[0] == obj.h5opt['data']:
attr = f[p[0]].attrs[key]
else:
if p[0]:
attr = f[p[0]]
if attr.dtype.kind == 'V':
attr = np.mean(attr.value['value']).astype(
|
np.dtype(p[1])
|
numpy.dtype
|
import numpy as np
from .unet import UNet
from pathlib import Path
from random import sample
import pdb
import math
from math import ceil
import pickle
import cv2
from ..tools.pytorch_batchsize import *
from ..tools.heatmap_to_points import *
from ..tools.helper import *
from ..tools.image_tools import *
from .basic import *
from .unet_revised import SE_Res_UNet
from PIL import Image
import glob
import sys
from fastprogress.fastprogress import master_bar, progress_bar
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
#from .basic import *
from torch import nn
import random
import platform
import matplotlib.pyplot as plt
import pickle
import os
import sys
import warnings
from .hourglass import hg
__all__ = ["DataAugmentation", "HeatmapLearner", "HeatLoss_OldGen_0", "HeatLoss_OldGen_1", "HeatLoss_OldGen_2", "HeatLoss_OldGen_3", "HeatLoss_OldGen_4", "HeatLoss_NextGen_0", "HeatLoss_NextGen_1",
"HeatLoss_NextGen_2", "HeatLoss_NextGen_3", "Loss_weighted"]
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
class CustomHeatmapDataset(Dataset):
"CustomImageDataset with `image_files`,`y_func`, `convert_mode` and `size(height, width)`"
def __init__(self, data, hull_path, size=(512,512), grayscale=False, normalize_mean=None, normalize_std=None, data_aug=None,
is_valid=False, do_normalize=True, clahe=True):
self.data = data
self.size = size
if normalize_mean is None:
if grayscale:
normalize_mean = [0.131]
else:
normalize_mean = [0.485, 0.456, 0.406]
if normalize_std is None:
if grayscale:
normalize_std = [0.308]
else:
normalize_std = [0.229, 0.224, 0.225]
self.normalize = transforms.Normalize(normalize_mean,normalize_std)
self.un_normalize = UnNormalize(mean=normalize_mean, std=normalize_std)
self.hull_path = hull_path
self.convert_mode = "L" if grayscale else "RGB"
self.data_aug = data_aug
self.to_t = transforms.ToTensor()
self.is_valid = is_valid
self.do_normalize = do_normalize
self.clahe = cv2.createCLAHE(clipLimit=20.0,tileGridSize=(30,30)) if clahe else None
def __len__(self):
return len(self.data)
def load_labels(self,idx):
heatmaps = []
for fname in self.data[idx][1]:
mask_file = fname.parent/(fname.stem + "_mask"+ fname.suffix)
heatmaps.append([load_heatmap(fname, size=self.size),load_heatmap(mask_file, size=self.size)])
return heatmaps
def load_hull(self, idx):
return load_heatmap(self.hull_path/self.data[idx][0].name, size=self.size)
def apply_clahe(self,img):
if self.clahe == None:
return img
img = np.asarray(img)
img = self.clahe.apply(img)
return Image.fromarray(img)
def __getitem__(self, idx):
image = load_image(self.data[idx][0], size=self.size, convert_mode=self.convert_mode, to_numpy=False)
labels = self.load_labels(idx)
hull = self.load_hull(idx)
image = self.apply_clahe(image)
if (not self.is_valid) and (self.data_aug is not None):
image, labels, hull = self.data_aug.transform(image, labels, hull)
hull = torch.squeeze(self.to_t(hull),dim=0).type(torch.bool)
labels_extraced = [label[0] for label in labels]
masks_extraced = [label[1] for label in labels]
labels_extraced = self.to_t(np.stack(labels_extraced, axis=2))
masks_extraced = self.to_t(np.stack(masks_extraced, axis=2)).type(torch.bool)
image = self.to_t(image)
if self.do_normalize:
image = self.normalize(image)
return self.data[idx][0].stem, image, labels_extraced, masks_extraced, hull
class RandomRotationImageTarget(transforms.RandomRotation):
def __call__(self, img, targets, hull):
angle = self.get_params(self.degrees)
img = transforms.functional.rotate(img, angle, self.resample, self.expand, self.center)
hull = transforms.functional.rotate(hull, angle, self.resample, self.expand, self.center)
for idx in range(len(targets)):
targets[idx][0] = transforms.functional.rotate(targets[idx][0], angle, self.resample, self.expand, self.center)
targets[idx][1] = transforms.functional.rotate(targets[idx][1], angle, self.resample, self.expand, self.center)
return img, targets, hull
class RandomHorizontalFlipImageTarget(transforms.RandomHorizontalFlip):
def __call__(self, img, targets, hull):
if random.random() < self.p:
img = transforms.functional.hflip(img)
hull = transforms.functional.hflip(hull)
for idx in range(len(targets)):
targets[idx][0] = transforms.functional.hflip(targets[idx][0])
targets[idx][1] = transforms.functional.hflip(targets[idx][1])
return img,targets,hull
class RandomVerticalFlipImageTarget(transforms.RandomVerticalFlip):
def __call__(self, img, targets, hull):
if random.random() < self.p:
img = transforms.functional.vflip(img)
hull = transforms.functional.vflip(hull)
for idx in range(len(targets)):
targets[idx][0] = transforms.functional.vflip(targets[idx][0])
targets[idx][1] = transforms.functional.vflip(targets[idx][1])
return img,targets,hull
class RandomPerspectiveImageTarget(transforms.RandomPerspective):
def __call__(self, img, targets, hull):
if not transforms.functional._is_pil_image(img):
raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
if random.random() < self.p:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message="torch.lstsq")
width, height = img.size
startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
img = transforms.functional.perspective(img, startpoints, endpoints, self.interpolation, self.fill)
hull = transforms.functional.perspective(hull, startpoints, endpoints, self.interpolation, self.fill)
for idx in range(len(targets)):
targets[idx][0] = transforms.functional.perspective(targets[idx][0], startpoints, endpoints, self.interpolation, self.fill)
targets[idx][1] = transforms.functional.perspective(targets[idx][1], startpoints, endpoints, self.interpolation, self.fill)
return img,targets,hull
class ComposeImageTarget(transforms.Compose):
def __call__(self, img, targets, hull):
for t in self.transforms:
img,targets,hull = t(img, targets, hull)
return img,targets,hull
class DataAugmentation:
"DataAugmentation class with `size(height,width)`"
def __init__(self, rotation=20,horizontal_flip_p=0.5,
vertical_flip_p=0.5,warp=0.3,warp_p=0.5, zoom=0.8,
brightness=0.6, contrast=0.6, GaussianBlur=1):
self.lightning_transforms = transforms.Compose([transforms.ColorJitter(brightness=brightness,contrast=contrast),
#transforms.GaussianBlur(kernel_size=GaussianBlur)
])
self.affine_transforms = ComposeImageTarget([
RandomRotationImageTarget(degrees=(-rotation,rotation)),
RandomHorizontalFlipImageTarget(p=horizontal_flip_p),
RandomVerticalFlipImageTarget(p=vertical_flip_p),
RandomPerspectiveImageTarget(distortion_scale=warp, p=warp_p),
#transforms.RandomResizedCrop(size=size,scale=(zoom,1.0),ratio=(1.0,1.0))
])
def transform(self,features,labels, hull):
#do lighting transforms for features
features = self.lightning_transforms(features)
#do affine transforms for features and labels
features,labels,hull = self.affine_transforms(features, labels, hull)
return features,labels,hull
class heatmap_metric(LearnerCallback):
def __init__(self, features, true_positive_threshold=10, metric_counter=1):
self.__counter_epoch = 0
self.__metric_counter = metric_counter
self.__custom_metrics = {"metrics":[],"types":[]}
self.__features = features
self.__true_positive_threshold = true_positive_threshold
self.numeric_metric = 1
self.accuracy_metric = 2
for item in self.__features.keys():
self.__custom_metrics["metrics"].append(item+"_pos_train")
self.__custom_metrics["types"].append(self.numeric_metric)
self.__custom_metrics["metrics"].append(item+"_pos_valid")
self.__custom_metrics["types"].append(self.numeric_metric)
self.__custom_metrics["metrics"].append(item+"_accuracy_train")
self.__custom_metrics["types"].append(self.accuracy_metric)
self.__custom_metrics["metrics"].append(item+"_accuracy_valid")
self.__custom_metrics["types"].append(self.accuracy_metric)
def get_metric_names(self):
return self.__custom_metrics["metrics"]
def __calc_metrics(self, targets, outputs, metric_values, train):
ext = "train" if train else "valid"
for target,output,feature in zip(targets, outputs, list(self.__features.keys())):
type_of = self.__features[feature]["type"]
if (type_of == "circle"):
points_target = heatmap_to_circle(target)
points_output = heatmap_to_circle(output)
if (points_target is not None):
metric_values[feature+"_accuracy_"+ext]["total_targets"] += 1
if (points_target is not None) and (points_output is not None):
mid_point_output = np.round(np.sum(points_output, axis=0)/len(points_output)).astype(np.int)
mid_point_target = np.round(np.sum(points_target, axis=0)/len(points_target)).astype(np.int)
diff_circle_midpoint = np.sqrt(np.sum((mid_point_output - mid_point_target)**2))
metric_values[feature+"_pos_"+ext].append(diff_circle_midpoint)
if diff_circle_midpoint < self.__true_positive_threshold:
metric_values[feature+"_accuracy_"+ext]["total_true_positives"] += 1
elif type_of == "single_point":
center_point_target = heatmap_to_max_confidence_point(target)
center_point_output = heatmap_to_max_confidence_point(output)
if (center_point_target is not None):
metric_values[feature+"_accuracy_"+ext]["total_targets"] += 1
if (center_point_target is not None) and (center_point_output is not None):
diff_center = np.sqrt(np.sum((center_point_output - center_point_target)**2))
metric_values[feature+"_pos_"+ext].append(diff_center)
if diff_center < self.__true_positive_threshold:
metric_values[feature+"_accuracy_"+ext]["total_true_positives"] += 1
elif type_of == "multi_point":
all_peaks_target = heatmap_to_multiple_points(target)
all_peaks_output = heatmap_to_multiple_points(output)
if (all_peaks_target is not None):
metric_values[feature+"_accuracy_"+ext]["total_targets"] += len(all_peaks_target)
if (all_peaks_target is not None) and (all_peaks_output is not None):
diffs = []
for peak_target in all_peaks_target:
if len(all_peaks_output) == 0:
break
s = np.argmin(np.sqrt(np.sum((all_peaks_output - peak_target)**2, axis=1)))
diffs.append(np.sqrt(np.sum((all_peaks_output[s] - peak_target)**2)))
if diffs[-1] < self.__true_positive_threshold:
metric_values[feature+"_accuracy_"+ext]["total_true_positives"] += 1
all_peaks_output = np.delete(all_peaks_output, s, axis=0)
diff_nut_edges = np.array(diffs).mean()
metric_values[feature+"_pos_"+ext].append(diff_nut_edges)
else:
raise("The Heatmaptype " + type_of + " is not implemented yet.")
return metric_values
def on_batch_end(self, last_output, last_target, train):
if self.__counter_epoch % self.__metric_counter == 0:
last_target = last_target.numpy()
last_output = last_output.numpy()
for target_batch,output_batch in zip(last_target, last_output):
self.metrics_values = self.__calc_metrics(target_batch,output_batch,
self.metrics_values, train)
def on_epoch_begin(self):
if self.__counter_epoch % self.__metric_counter == 0:
self.metrics_values = {}
for idx,metric in enumerate(self.__custom_metrics["metrics"]):
if self.__custom_metrics["types"][idx] == self.numeric_metric:
self.metrics_values[metric] = []
else:
self.metrics_values[metric] = {"total_targets":0,"total_true_positives":0}
def on_epoch_end(self):
metrics = list(np.zeros(len(self.__custom_metrics["metrics"]), dtype=np.float32))
if self.__counter_epoch % self.__metric_counter == 0:
for idx,metric in enumerate(self.__custom_metrics["metrics"]):
if self.__custom_metrics["types"][idx] == self.numeric_metric:
if len(self.metrics_values[metric]) == 0:
metrics[idx] = 0
else:
metrics[idx] = np.array(self.metrics_values[metric]).mean()
else:
if self.metrics_values[metric]["total_targets"] != 0:
metrics[idx] = self.metrics_values[metric]["total_true_positives"] / self.metrics_values[metric]["total_targets"]
else:
metrics[idx] = 0
self.__counter_epoch += 1
return metrics
class HeatLoss_OldGen_0(nn.Module):
def __init__(self):
super().__init__()
r"""Class for HeatLoss calculation. This variant includes no masks, simple Mean absolute error over all pixles:
"""
def forward(self, input, target, masks, hull):
return torch.mean(torch.abs(input - target))
class HeatLoss_OldGen_1(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for HeatLoss calculation. This variant includes the masks of following objects:
- specific features
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
m1 = (target > 0.0)
ret1 = torch.abs(input[m1] - target[m1])
mean1 = torch.mean(ret1)
if self.print_out_losses:
print("specific features:",mean1.item(), end="\r")
return mean1
class HeatLoss_OldGen_2(nn.Module):
def __init__(self, print_out_losses=False):
r"""Class for HeatLoss calculation. This variant includes the masks of following objects:
- Background (no heat at all)
- specific features
"""
super().__init__()
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
m1 = (target > 0.0)
m2 = torch.logical_not(m1)
ret1 = torch.abs(input[m1] - target[m1])
ret2 = torch.abs(input[m2] - target[m2])
mean1 = torch.mean(ret1)
mean2 = torch.mean(ret2)
if self.print_out_losses:
print("specific features:",mean1.item(), "background:",mean2.item(), end="\r")
return (mean1+mean2)/2
class HeatLoss_OldGen_3(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for HeatLoss calculation. This variant includes the masks of following objects:
- specific feature
- all features in a image
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
m1 = (target > 0.0)
m2 = torch.zeros(m1.shape, dtype=torch.bool, device=input.device)
for dset in range(input.shape[0]):
logor = torch.zeros((input.shape[2], input.shape[3]), dtype=torch.bool, device=input.device)
for i in range(input.shape[1]):
logor = logor | m1[dset,i,:,:]
for i in range(input.shape[1]):
m2[dset,i,:,:] = logor
ret1 = torch.abs(input[m1] - target[m1])
ret2 = torch.abs(input[m2] - target[m2])
mean1 = torch.mean(ret1)
mean2 = torch.mean(ret2)
if self.print_out_losses:
print("specific feature:",mean1.item(), "all features:",mean2.item(), end="\r")
return (mean1+mean2)/2
class HeatLoss_OldGen_4(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for HeatLoss calculation. This variant includes the masks of following objects:
- specific feature
- all features in a image
- background
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
m1 = (target > 0.0)
m2 = torch.zeros(m1.shape, dtype=torch.bool, device=input.device)
m3 = torch.logical_not(m1)
for dset in range(input.shape[0]):
logor = torch.zeros((input.shape[2], input.shape[3]), dtype=torch.bool, device=input.device)
for i in range(input.shape[1]):
logor = logor | m1[dset,i,:,:]
for i in range(input.shape[1]):
m2[dset,i,:,:] = logor
ret1 = torch.abs(input[m1] - target[m1])
ret2 = torch.abs(input[m2] - target[m2])
ret3 = torch.abs(input[m3] - target[m3])
mean1 = torch.mean(ret1)
mean2 = torch.mean(ret2)
mean3 = torch.mean(ret3)
if self.print_out_losses:
print("specific feature:",mean1.item(), "all features:",mean2.item(), "background:",mean3.item(), end="\r")
return (mean1+mean2+mean3)/3
class HeatLoss_NextGen_0(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for Next Generation HeatLoss calculation. This variant includes offline generated masks of following objects:
- specific feature with mask dilation (single loss calculation for every feature)
- convex hull of all featureswith maks dilation
- background
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
hull_not = torch.logical_not(hull)
feature_count = target.shape[1]
loss_items = torch.zeros(feature_count, dtype=torch.float32, device=input.device)
for idx in range(feature_count):
diff = torch.abs(input[:,idx,:,:][masks[:,idx,:,:]] - target[:,idx,:,:][masks[:,idx,:,:]])
if len(diff) > 0:
loss_items[idx] = torch.mean(diff)
loss_hull = torch.mean(torch.abs(input[hull] - target[hull]))
loss_backgrond = torch.mean(torch.abs(input[hull_not] - target[hull_not]))
if self.print_out_losses:
# print loss begin
out_str = ""
print_items_loss = []
sum_items_loss = torch.zeros(1, dtype=torch.float32, device=input.device)
for idx in range(len(loss_items)):
out_str = out_str + "loss_item_"+str(idx) + " {:.4f} "
print_items_loss.append(round(loss_items[idx].item(),4))
sum_items_loss += loss_items[idx]
print_items_loss.append(round(loss_hull.item(),4))
print_items_loss.append(round(loss_backgrond.item(),4))
print((out_str+" loss_hull {:.4f} loss_backgrond {:.4f}").format(*print_items_loss), end="\r")
# print loss end
return (sum_items_loss+loss_hull+loss_backgrond)/(feature_count+2)
class HeatLoss_NextGen_1(nn.Module):
def __init__(self):
super().__init__(print_out_losses=False)
r"""Class for Next Generation HeatLoss calculation. This variant includes offline generated masks of following objects:
- specific feature with mask dilation (calculation of feature loss all the same)
- convex hull of all featureswith maks dilation
- background
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
hull_not = torch.logical_not(hull)
loss_features = torch.mean(torch.abs(input[masks] - target[masks]))
loss_hull = torch.mean(torch.abs(input[hull] - target[hull]))
loss_backgrond = torch.mean(torch.abs(input[hull_not] - target[hull_not]))
if self.print_out_losses:
print(("loss_features {:.4f} loss_hull {:.4f} loss_backgrond {:.4f}").format(loss_features,loss_hull,loss_backgrond), end="\r")
return (loss_features+loss_hull+loss_backgrond)/3
class HeatLoss_NextGen_2(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for Next Generation HeatLoss calculation. This variant includes offline generated masks of following objects:
- specific feature with mask dilation (calculation of feature loss all the same)
- all features in a image (calculation of feature loss all the same)
- background (calculation of feature loss all the same)
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
all_mask = torch.any(masks,dim=1)[:,None].repeat(1,target.shape[1],1,1)
mask_not = torch.logical_not(masks)
loss_features = torch.mean(torch.abs(input[masks] - target[masks]))
loss_all_features = torch.mean(torch.abs(input[all_mask] - target[all_mask]))
loss_backgrond = torch.mean(torch.abs(input[mask_not] - target[mask_not]))
if self.print_out_losses:
print(("loss_features {:.4f} loss_all_features {:.4f} loss_backgrond {:.4f}").format(loss_features.item(),loss_all_features.item(),loss_backgrond.item()), end="\r")
return (loss_features+loss_all_features+loss_backgrond)/3
class HeatLoss_NextGen_3(nn.Module):
def __init__(self, print_out_losses=False):
super().__init__()
r"""Class for Next Generation HeatLoss calculation. This variant includes offline generated masks of following objects:
- specific feature with mask dilation (single loss calculation for every feature)
- all features in a image (single loss calculation for every feature)
- background (single loss calculation for every feature)
"""
self.print_out_losses = print_out_losses
def forward(self, input, target, masks, hull):
feature_count = target.shape[1]
mask_not = torch.logical_not(masks)
all_mask = torch.any(masks,dim=1)[:,None].repeat(1,target.shape[1],1,1)
loss_features = torch.zeros(feature_count, dtype=torch.float32, device=input.device)
loss_backgrond = torch.zeros(feature_count, dtype=torch.float32, device=input.device)
loss_all_features = torch.zeros(feature_count, dtype=torch.float32, device=input.device)
for idx in range(feature_count):
diff = torch.abs(input[:,idx,:,:][masks[:,idx,:,:]] - target[:,idx,:,:][masks[:,idx,:,:]])
diff_not = torch.abs(input[:,idx,:,:][mask_not[:,idx,:,:]] - target[:,idx,:,:][mask_not[:,idx,:,:]])
diff_all = torch.abs(input[:,idx,:,:][all_mask[:,idx,:,:]] - target[:,idx,:,:][all_mask[:,idx,:,:]])
if len(diff) > 0:
loss_features[idx] = torch.mean(diff)
if len(diff_not) > 0:
loss_backgrond[idx] = torch.mean(diff_not)
if len(diff_all) > 0:
loss_all_features[idx] = torch.mean(diff_all)
loss_features = torch.mean(loss_features)
loss_backgrond = torch.mean(loss_backgrond)
loss_all_features = torch.mean(loss_all_features)
if self.print_out_losses:
print(("loss_features {:.4f} loss_all_features {:.4f} loss_backgrond {:.4f}").format(loss_features.item(),loss_all_features.item(),loss_backgrond.item()), end="\r")
return (loss_features+loss_all_features+loss_backgrond)/3
class AWing(nn.Module):
def __init__(self, alpha=2.1, omega=14, epsilon=1, theta=0.5):
super().__init__()
self.alpha = float(alpha)
self.omega = float(omega)
self.epsilon = float(epsilon)
self.theta = float(theta)
def forward(self, y_pred , y):
lossMat = torch.zeros_like(y_pred)
A = self.omega * (1/(1+(self.theta/self.epsilon)**(self.alpha-y)))*(self.alpha-y)*((self.theta/self.epsilon)**(self.alpha-y-1))/self.epsilon
C = self.theta*A - self.omega*torch.log(1+(self.theta/self.epsilon)**(self.alpha-y))
case1_ind = torch.abs(y-y_pred) < self.theta
case2_ind = torch.abs(y-y_pred) >= self.theta
lossMat[case1_ind] = self.omega*torch.log(1+torch.abs((y[case1_ind]-y_pred[case1_ind])/self.epsilon)**(self.alpha-y[case1_ind]))
lossMat[case2_ind] = A[case2_ind]*torch.abs(y[case2_ind]-y_pred[case2_ind]) - C[case2_ind]
return lossMat
class Loss_weighted(nn.Module):
def __init__(self, W=10, alpha=2.1, omega=14, epsilon=1, theta=0.5):
super().__init__()
self.W = float(W)
self.Awing = AWing(alpha, omega, epsilon, theta)
def forward(self, y_pred , y, M, hull):
#pdb.set_trace()
M = M.float()
Loss = self.Awing(y_pred,y)
weighted = Loss * (self.W * M + 1.)
return weighted.mean()
class HeatmapLearner:
def __init__(self, features, root_path, images_path, hull_path, size=(512,512), bs=-1, items_count=-1, gpu_id=0,
norm_stats=None, data_aug=None, preload=False, sample_results_path="sample_results",
unet_init_features=16, valid_images_store="valid_images.npy", image_convert_mode="L", metric_counter=1,
sample_img=None, true_positive_threshold=0.02, ntype="unet", lr=1e-03, file_filters_include=None, file_filters_exclude=None, clahe=False,
disable_metrics=False, file_prefix="", loss_func=None, weight_decay=0, num_load_workers=None, dropout=True, dropout_rate=0.15, save_counter=10):
r"""Class for train an Unet-style Neural Network, for heatmap based image recognition
Args:
features : Heatmap features for the neural net. This must be a dict. The Keys must be the folder names for the heatmap features.
Every entry is a dict with the feature types: single_point, multi_point, circle
Example:
{"feature_1":{"type":"single_point"},
"feature_2":{"type":"multi_point"},
"feature_3":{"type":"circle"}}
root_path : The root path, where image files and label files are located
images_path : The path, where the images are located, in relation to the root_path
size : Size of images to pass through the neural network. The sizes must be a power of two with the dimensions of (Heigth, Width).
bs : The Batch Size
norm_stats : Normalize values for images in the form (mean,std).
file_filters_incluce : incluce file filter in images_path, must be a list with include search strings
file_filters_exclude : exclude file filter in images_path, must be a list with exclude search strings
Example:
"""
# check assertions
assert power_of_2(size), "size must be a power of 2, to work with this class"
#static variables (they are fix)
heatmap_paths = list(features.keys())
for idx in range(len(heatmap_paths)):
heatmap_paths[idx] = Path(heatmap_paths[idx])
self.features = features
self.__size = size
self.save_counter = save_counter
self.__num_load_workers = num_load_workers
self.__root_path = Path(root_path)
self.__images_path = Path(images_path)
self.__hull_path = self.__root_path/Path(hull_path)
self.__sample_results_path = Path(sample_results_path)
(self.__root_path/self.__sample_results_path).mkdir(parents=True, exist_ok=True)
self.stacked_net = True if ntype=="stacked_hourglass" else False
self.__gpu_id = gpu_id
self.__file_prefix = file_prefix
data_aug = DataAugmentation() if data_aug is None else data_aug
if norm_stats is None:
norm_stats = ([0.131],[0.308]) if image_convert_mode == "L" else ([0.485, 0.456, 0.406],[0.229, 0.224, 0.225])
if sample_img is None:
t_img_files = glob.glob(str(self.__root_path/self.__images_path/"*"))
idx = random.randint(0, len(t_img_files)-1)
self.__sample_img = self.__root_path/self.__images_path/Path(t_img_files[idx]).name
else:
self.__sample_img = self.__root_path/self.__images_path/sample_img
true_positive_threshold = round(true_positive_threshold*self.__size[0])
# dynamic variables (they can change during learning)
self.__epochs = 0
self.__train_losses = None
self.__valid_losses = None
self.__metrics = None
file_filters_include = np.array(file_filters_include) if file_filters_include is not None else None
file_filters_exclude = np.array(file_filters_exclude) if file_filters_exclude is not None else None
self.__create_learner(file_filters_include=file_filters_include, file_filters_exclude=file_filters_exclude, valid_images_store=valid_images_store, items_count=items_count,
features=features, bs=bs, data_aug=data_aug,image_convert_mode=image_convert_mode,
heatmap_paths=heatmap_paths, true_positive_threshold=true_positive_threshold, metric_counter=metric_counter,
lr=lr, clahe=clahe, norm_stats=norm_stats, unet_init_features=unet_init_features, ntype=ntype, disable_metrics=disable_metrics, loss_func=loss_func,
weight_decay = weight_decay, dropout=dropout, dropout_rate=dropout_rate)
def __create_learner(self, file_filters_include, file_filters_exclude, valid_images_store, items_count, features, bs, data_aug,
image_convert_mode, heatmap_paths, true_positive_threshold, metric_counter, lr, clahe, norm_stats,
unet_init_features, ntype, disable_metrics, loss_func, weight_decay, dropout, dropout_rate):
training_data, valid_data = self.__load_data(features=features,file_filters_include=file_filters_include,
file_filters_exclude = file_filters_exclude, valid_images_store=valid_images_store,
items_count=items_count)
self.__unet_in_channels = 1 if image_convert_mode == "L" else 3
self.__unet_out_channls = len(heatmap_paths)
heatmap_files_sample = []
for feat in features.keys():
heatmap_files_sample.append(self.__root_path/feat/self.__sample_img.name)
self.sample_dataset = CustomHeatmapDataset(data=[[self.__sample_img,heatmap_files_sample]], hull_path=self.__hull_path, grayscale=image_convert_mode == "L",
normalize_mean=norm_stats[0],normalize_std=norm_stats[1], is_valid=True,
clahe=clahe, size=self.__size)
self.train_dataset = CustomHeatmapDataset(data=training_data, hull_path=self.__hull_path, grayscale=image_convert_mode == "L",
normalize_mean=norm_stats[0],normalize_std=norm_stats[1], data_aug=data_aug, clahe=clahe,
size=self.__size)
self.valid_dataset = CustomHeatmapDataset(data=valid_data, hull_path=self.__hull_path, grayscale=image_convert_mode == "L",
normalize_mean=norm_stats[0],normalize_std=norm_stats[1], is_valid=True, clahe=clahe,
size=self.__size)
sample_img = None
if self.__sample_img is not None:
to_t = transforms.ToTensor()
img = to_t(load_image(self.__root_path/self.__images_path/self.__sample_img,
convert_mode=image_convert_mode, size=self.__size, to_numpy=False))
masks = []
for idx in range(len(heatmap_paths)):
heat = to_t(load_heatmap(self.__root_path/heatmap_paths[idx]/self.__sample_img))
masks.append(heat)
sample_img = (img,masks)
metric = None if disable_metrics else heatmap_metric(features = features, true_positive_threshold = true_positive_threshold, metric_counter = metric_counter)
net = self.__get_net(ntype, unet_init_features, dropout, dropout_rate).to(torch.device("cuda:"+str(self.__gpu_id)))
opt = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=weight_decay)
loss_func = HeatLoss_NextGen_1() if loss_func is None else loss_func
loss_func = loss_func.to(torch.device("cuda:"+str(self.__gpu_id)))
if bs == -1:
batch_estimator = Batch_Size_Estimator(net=net, opt=opt,
loss_func=loss_func,
gpu_id=self.__gpu_id, dataset = self.train_dataset)
bs = batch_estimator.find_max_bs()
train_dl = DataLoader(self.train_dataset, batch_size=bs, shuffle=True, num_workers=num_workers() if self.__num_load_workers is None else self.__num_load_workers, pin_memory=False)
valid_dl = DataLoader(self.valid_dataset, batch_size=bs, shuffle=True, num_workers=num_workers() if self.__num_load_workers is None else self.__num_load_workers, pin_memory=False)
self.learner = Learner(model=net,loss_func=loss_func, train_dl=train_dl, valid_dl=valid_dl,
optimizer=opt, learner_callback= metric,gpu_id= self.__gpu_id,
predict_smaple_func=self.predict_sample, save_func=self.save_func,
stacked_net= self.stacked_net)
def __get_net(self, ntype, unet_init_features, dropout, dropout_rate):
if ntype == "res_unet++":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=True, squeeze_excite=True,
aspp=True, attention=True, bn_relu_at_first=True, bn_relu_at_end=False)
elif ntype == "res_unet_bn_relu_end":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=True, squeeze_excite=False,
aspp=False, attention=False, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "attention_unet":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=False, squeeze_excite=False,
aspp=False, attention=True, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "aspp_unet":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=False, squeeze_excite=False,
aspp=True, attention=False, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "squeeze_excite_unet":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=False, squeeze_excite=True,
aspp=False, attention=False, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "res_unet_bn_relu_first":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=True, squeeze_excite=False,
aspp=False, attention=False, bn_relu_at_first=True, bn_relu_at_end=False)
elif ntype == "unet":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=False, squeeze_excite=False,
aspp=False, attention=False, bn_relu_at_first=False, bn_relu_at_end=True)
elif ntype == "res34":
net = UNet(in_channels = self.__unet_in_channels, out_channels = self.__unet_out_channls,
init_features = unet_init_features, resblock=True, squeeze_excite=False,
aspp=False, attention=False, bn_relu_at_first=True, bn_relu_at_end=False,
block_sizes_down = res34_downsample, downsample_method=downsample_stride,
blocksize_bottleneck = 2, block_sizes_up=[2,2,2,2])
elif ntype == "sp_unet":
net = SE_Res_UNet(n_channels=self.__unet_in_channels, n_classes= self.__unet_out_channls,
init_features=unet_init_features, dropout=dropout, rate=dropout_rate)
elif ntype == "stacked_hourglass":
net = hg(num_stacks=4,num_blocks=2,num_classes=self.__unet_out_channls, input_features=self.__unet_in_channels)
else:
raise("Net type ´"+ ntype+"´ is not implemented!")
return net
def __load_data(self, features, file_filters_include, file_filters_exclude, valid_images_store, items_count):
def find_valid_imgs_func(all_image_files):
if not (self.__root_path/valid_images_store).is_file():
valid_images = [img for img in sample(list(all_image_files), int(len(all_image_files)*0.2))]
np.save(self.__root_path/valid_images_store, valid_images, allow_pickle=True)
return list(np.load(self.__root_path/valid_images_store, allow_pickle=True))
def filter_files(all_image_files):
if file_filters_include is not None:
new_filenames = []
for filename in all_image_files:
for ffilter in file_filters_include:
if filename.find((ffilter)) != -1:
new_filenames.append(filename)
break
all_image_files = np.array(new_filenames)
if file_filters_exclude is not None:
new_filenames = []
for filename in all_image_files:
include_in = True
for ffilter in file_filters_exclude:
if filename.find((ffilter)) != -1:
include_in = False
break
if include_in:
new_filenames.append(filename)
all_image_files = np.array(new_filenames)
return all_image_files
all_image_files = [fname.name for fname in list((self.__root_path/self.__images_path).glob("*.png"))]
all_image_files = all_image_files if items_count == -1 else all_image_files[:items_count]
all_image_files = filter_files(all_image_files)
valid_images_files = find_valid_imgs_func(all_image_files)
training_data = []
valid_data = []
pbar = progress_bar(range(len(all_image_files)))
pbar.comment = "loading files"
for idx in pbar:
img_file = all_image_files[idx]
heatmap_files = []
for feat in features.keys():
heatmap_files.append(self.__root_path/feat/img_file)
add_file = self.__root_path/self.__images_path/img_file
if img_file in valid_images_files:
valid_data.append((add_file,heatmap_files))
else:
training_data.append((add_file,heatmap_files))
return training_data, valid_data
def save_losses(self, filename=None, train=True,valid=True):
assert ((self.__train_losses is not None) and (self.__valid_losses is not None)), "Call `fit` Before losses can be saved"
filename = "losses" if filename is None else filename
if train:
np.save(self.__root_path/self.__sample_results_path/(self.__file_prefix+filename+"_train.npy"), self.__train_losses)
if valid:
np.save(self.__root_path/self.__sample_results_path/(self.__file_prefix+filename+"_valid.npy"), self.__valid_losses)
def get_metric_names(self):
return np.array(self.learner.metric_names)
def save_metrics(self,filename=None):
assert ((self.__metrics is not None)),"Call `fit` Before metrics can be saved"
filename = "metrics" if filename is None else filename
data = {"names":self.get_metric_names(),"metrics":self.__metrics}
pickle.dump(data, open(self.__root_path/self.__sample_results_path/(self.__file_prefix+filename+".pkl"),"wb"))
def get_metrics(self, specific_metric_names=None):
assert ((self.__metrics is not None)), "Call `fit` Before metrics can be retrived"
if specific_metric_names is None:
return self.__metrics
specific_metric_names =
|
np.array(specific_metric_names)
|
numpy.array
|
"""
Mask R-CNN
Common utility functions and classes.
Copyright (c) 2017 Matterport, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by <NAME>
"""
import sys
import os
import cv2
import math
import random
import numpy as np
import scipy.misc
import _pickle as cPickle
from ctypes import *
import copy
import glob
import time
from aligning import estimateSimilarityTransform
#sys.path.append('./cocoapi/PythonAPI')
#from pycocotools.cocoeval import COCOeval
#from pycocotools import mask as maskUtils
import matplotlib.pyplot as plt
############################################################
# Bounding Boxes
############################################################
def extract_bboxes(mask):
"""Compute bounding boxes from masks.
mask: [height, width, num_instances]. Mask pixels are either 1 or 0.
Returns: bbox array [num_instances, (y1, x1, y2, x2)].
"""
boxes = np.zeros([mask.shape[-1], 4], dtype=np.int32)
for i in range(mask.shape[-1]):
m = mask[:, :, i]
# Bounding box.
horizontal_indicies = np.where(np.any(m, axis=0))[0]
vertical_indicies = np.where(np.any(m, axis=1))[0]
if horizontal_indicies.shape[0]:
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
else:
# No mask for this instance. Might happen due to
# resizing or cropping. Set bbox to zeros
x1, x2, y1, y2 = 0, 0, 0, 0
boxes[i] = np.array([y1, x1, y2, x2])
return boxes.astype(np.int32)
def compute_iou(box, boxes, box_area, boxes_area):
"""Calculates IoU of the given box with the array of the given boxes.
box: 1D vector [y1, x1, y2, x2]
boxes: [boxes_count, (y1, x1, y2, x2)]
box_area: float. the area of 'box'
boxes_area: array of length boxes_count.
Note: the areas are passed in rather than calculated here for
efficiency. Calculate once in the caller to avoid duplicate work.
"""
# Calculate intersection areas
y1 = np.maximum(box[0], boxes[:, 0])
y2 = np.minimum(box[2], boxes[:, 2])
x1 = np.maximum(box[1], boxes[:, 1])
x2 = np.minimum(box[3], boxes[:, 3])
intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)
union = box_area + boxes_area[:] - intersection[:]
iou = intersection / union
return iou
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: [N, (y1, x1, y2, x2)].
For better performance, pass the largest set first and the smaller second.
"""
# Areas of anchors and GT boxes
area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1])
area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1])
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0]))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1)
return overlaps
def compute_overlaps_masks(masks1, masks2):
'''Computes IoU overlaps between two sets of masks.
masks1, masks2: [Height, Width, instances]
'''
# flatten masks
masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)
masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)
area1 = np.sum(masks1, axis=0)
area2 = np.sum(masks2, axis=0)
# intersections and union
intersections = np.dot(masks1.T, masks2)
union = area1[:, None] + area2[None, :] - intersections
overlaps = intersections / union
return overlaps
def compute_mean_l1_coord_diff(mask1, mask2, coord1, coord2, synset, cls_id):
'''Computes IoU overlaps between two sets of masks.
mask1, mask2: [Height, Width]
coord1, coord2: [Height, Width, 3]
'''
# flatten masks
num_pixels = mask1.shape[0] * mask1.shape[1]
mask1 = np.reshape(mask1 > .5, (-1)).astype(np.float32)
mask2 = np.reshape(mask2 > .5, (-1)).astype(np.float32)
coord1 = np.reshape(coord1, (-1, 3)).astype(np.float32)
coord2 = np.reshape(coord2, (-1, 3)).astype(np.float32)
# intersections and union
intersections = np.logical_and(mask1, mask2)
num_pixel_intersection = len(np.where(intersections)[0])
pts1 = coord1[intersections, :].transpose() - 0.5
pts2 = coord2[intersections, :].transpose() - 0.5
def rotation_y_matrix(theta):
rotation_matrix = \
np.array([ np.cos(theta), 0, np.sin(theta),
0, 1, 0,
-np.sin(theta), 0, np.cos(theta)])
rotation_matrix = np.reshape(rotation_matrix, (3, 3))
return rotation_matrix
if synset[cls_id] in ['bottle', 'bowl', 'can']:
M = 20
pts1_symmetry = np.zeros(pts1.shape+(M,)) ## shape: (3, N, 6)
for i in range(M):
rotated_pts1 = rotation_y_matrix(float(i)*np.float32(2*math.pi/M)) @ pts1
pts1_symmetry[:, :, i] = rotated_pts1
pts2_reshape = pts2.reshape([3, -1, 1])
mean_dists = np.mean(np.linalg.norm(pts1_symmetry - pts2_reshape, axis=0), axis=0)
mean_dist = np.amin(mean_dists)
elif synset[cls_id] in ['phone']:
pts1_symmetry = np.zeros(pts1.shape+(2,))
for i in range(2):
rotated_pts1 = rotation_y_matrix(float(i)*np.float32(2*math.pi/2)) @ pts1
#print(rotated_pts1)
pts1_symmetry[:, :, i] = rotated_pts1
pts2_reshape = pts2.reshape([3, -1, 1])
mean_dists = np.mean(np.linalg.norm(pts1_symmetry - pts2_reshape, axis=0), axis=0)
mean_dist = np.amin(mean_dists)
else:
#print(synset[cls_id])
diff = pts1 - pts2
dist = np.linalg.norm(diff, axis=0)
assert dist.shape[0] == num_pixel_intersection
mean_dist = np.mean(dist)
mean_l1_coord_diff = mean_dist
#print(mean_l1_coord_diff, pts1.shape[0])
return mean_l1_coord_diff
def compute_3d_iou(bbox_3d_1, bbox_3d_2, handle_visibility, class_name_1, class_name_2):
'''Computes IoU overlaps between two 3d bboxes.
bbox_3d_1, bbox_3d_1: [3, 8]
'''
# flatten masks
def asymmetric_3d_iou(bbox_3d_1, bbox_3d_2):
bbox_1_max = np.amax(bbox_3d_1, axis=0)
bbox_1_min = np.amin(bbox_3d_1, axis=0)
bbox_2_max = np.amax(bbox_3d_2, axis=0)
bbox_2_min = np.amin(bbox_3d_2, axis=0)
overlap_min = np.maximum(bbox_1_min, bbox_2_min)
overlap_max = np.minimum(bbox_1_max, bbox_2_max)
# intersections and union
if np.amin(overlap_max - overlap_min) <0:
intersections = 0
else:
intersections = np.prod(overlap_max - overlap_min)
union = np.prod(bbox_1_max - bbox_1_min) + np.prod(bbox_2_max - bbox_2_min) - intersections
overlaps = intersections / union
return overlaps
if bbox_3d_1 is None or bbox_3d_2 is None:
return -1
symmetry_flag = False
if class_name_1 in ['bottle', 'bowl', 'can'] and class_name_1 == class_name_2:
symmetry_flag = True
if class_name_1 == 'mug' and class_name_1 == class_name_2 and handle_visibility==0:
symmetry_flag = True
if symmetry_flag:
print('*'*10)
n = 20
theta = 2*math.pi/n
y_rotation_matrix = np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
max_iou = 0
for i in range(n):
bbox_center = np.mean(bbox_3d_1, -1, keepdims=True)
bbox_3d_1 = y_rotation_matrix @ (bbox_3d_1 - bbox_center) + bbox_center
max_iou = max(max_iou, asymmetric_3d_iou(bbox_3d_1, bbox_3d_2))
return max_iou
else:
return asymmetric_3d_iou(bbox_3d_1, bbox_3d_2)
def compute_3d_iou_new(RT_1, RT_2, scales_1, scales_2, handle_visibility, class_name_1, class_name_2):
'''Computes IoU overlaps between two 3d bboxes.
bbox_3d_1, bbox_3d_1: [3, 8]
'''
# flatten masks
def asymmetric_3d_iou(RT_1, RT_2, scales_1, scales_2):
noc_cube_1 = get_3d_bbox(scales_1, 0)
bbox_3d_1 = transform_coordinates_3d(noc_cube_1, RT_1)
noc_cube_2 = get_3d_bbox(scales_2, 0)
bbox_3d_2 = transform_coordinates_3d(noc_cube_2, RT_2)
bbox_1_max = np.amax(bbox_3d_1, axis=0)
bbox_1_min = np.amin(bbox_3d_1, axis=0)
bbox_2_max = np.amax(bbox_3d_2, axis=0)
bbox_2_min = np.amin(bbox_3d_2, axis=0)
overlap_min = np.maximum(bbox_1_min, bbox_2_min)
overlap_max = np.minimum(bbox_1_max, bbox_2_max)
# intersections and union
if np.amin(overlap_max - overlap_min) <0:
intersections = 0
else:
intersections = np.prod(overlap_max - overlap_min)
union = np.prod(bbox_1_max - bbox_1_min) + np.prod(bbox_2_max - bbox_2_min) - intersections
overlaps = intersections / union
return overlaps
if RT_1 is None or RT_2 is None:
return -1
symmetry_flag = False
if (class_name_1 in ['bottle', 'bowl', 'can'] and class_name_1 == class_name_2) or (class_name_1 == 'mug' and class_name_1 == class_name_2 and handle_visibility==0):
print('*'*10)
noc_cube_1 = get_3d_bbox(scales_1, 0)
noc_cube_2 = get_3d_bbox(scales_2, 0)
bbox_3d_2 = transform_coordinates_3d(noc_cube_2, RT_2)
def y_rotation_matrix(theta):
return np.array([[np.cos(theta), 0, np.sin(theta), 0],
[0, 1, 0 , 0],
[-np.sin(theta), 0, np.cos(theta), 0],
[0, 0, 0 , 1]])
n = 20
max_iou = 0
for i in range(n):
rotated_RT_1 = RT_1@y_rotation_matrix(2*math.pi*i/float(n))
max_iou = max(max_iou,
asymmetric_3d_iou(rotated_RT_1, RT_2, scales_1, scales_2))
else:
max_iou = asymmetric_3d_iou(RT_1, RT_2, scales_1, scales_2)
return max_iou
def compute_RT_distances(RT_1, RT_2):
'''
:param RT_1: [4, 4]. homogeneous affine transformation
:param RT_2: [4, 4]. homogeneous affine transformation
:return: theta: angle difference of R in degree, shift: l2 difference of T in centimeter
'''
#print(RT_1[3, :], RT_2[3, :])
## make sure the last row is [0, 0, 0, 1]
if RT_1 is None or RT_2 is None:
return -1
try:
assert np.array_equal(RT_1[3, :], RT_2[3, :])
assert np.array_equal(RT_1[3, :], np.array([0, 0, 0, 1]))
except AssertionError:
print(RT_1[3, :], RT_2[3, :])
R1 = RT_1[:3, :3]/np.cbrt(np.linalg.det(RT_1[:3, :3]))
T1 = RT_1[:3, 3]
R2 = RT_2[:3, :3]/np.cbrt(np.linalg.det(RT_2[:3, :3]))
T2 = RT_2[:3, 3]
R = R1 @ R2.transpose()
theta = np.arccos((np.trace(R) - 1)/2) * 180/np.pi
shift = np.linalg.norm(T1-T2) * 100
# print(theta, shift)
if theta < 5 and shift < 5:
return 10 - theta - shift
else:
return -1
def compute_RT_degree_cm_symmetry(RT_1, RT_2, class_id, handle_visibility, synset_names):
'''
:param RT_1: [4, 4]. homogeneous affine transformation
:param RT_2: [4, 4]. homogeneous affine transformation
:return: theta: angle difference of R in degree, shift: l2 difference of T in centimeter
synset_names = ['BG', # 0
'bottle', # 1
'bowl', # 2
'camera', # 3
'can', # 4
'cap', # 5
'phone', # 6
'monitor', # 7
'laptop', # 8
'mug' # 9
]
synset_names = ['BG', # 0
'bottle', # 1
'bowl', # 2
'camera', # 3
'can', # 4
'laptop', # 5
'mug' # 6
]
'''
## make sure the last row is [0, 0, 0, 1]
if RT_1 is None or RT_2 is None:
return -1
try:
assert np.array_equal(RT_1[3, :], RT_2[3, :])
assert np.array_equal(RT_1[3, :], np.array([0, 0, 0, 1]))
except AssertionError:
print(RT_1[3, :], RT_2[3, :])
exit()
R1 = RT_1[:3, :3] / np.cbrt(np.linalg.det(RT_1[:3, :3]))
T1 = RT_1[:3, 3]
R2 = RT_2[:3, :3] / np.cbrt(np.linalg.det(RT_2[:3, :3]))
T2 = RT_2[:3, 3]
print(T1,T2)
# try:
# assert np.abs(np.linalg.det(R1) - 1) < 0.01
# assert np.abs(np.linalg.det(R2) - 1) < 0.01
# except AssertionError:
# print(np.linalg.det(R1), np.linalg.det(R2))
if synset_names[class_id] in ['bottle', 'can', 'bowl']: ## symmetric when rotating around y-axis
y = np.array([0, 1, 0])
y1 = R1 @ y
y2 = R2 @ y
theta = np.arccos(y1.dot(y2) / (np.linalg.norm(y1) * np.linalg.norm(y2)))
elif synset_names[class_id] == 'mug' and handle_visibility==0: ## symmetric when rotating around y-axis
y = np.array([0, 1, 0])
y1 = R1 @ y
y2 = R2 @ y
theta = np.arccos(y1.dot(y2) / (np.linalg.norm(y1) * np.linalg.norm(y2)))
elif synset_names[class_id] in ['phone', 'eggbox', 'glue']:
y_180_RT = np.diag([-1.0, 1.0, -1.0])
R = R1 @ R2.transpose()
R_rot = R1 @ y_180_RT @ R2.transpose()
theta = min(np.arccos((np.trace(R) - 1) / 2),
np.arccos((np.trace(R_rot) - 1) / 2))
else:
R = R1 @ R2.transpose()
theta = np.arccos((np.trace(R) - 1) / 2)
theta *= 180 / np.pi
shift = np.linalg.norm(T1 - T2) * 100
result = np.array([theta, shift])
return result
def compute_RT_projection_2d_symmetry(RT_1, RT_2, class_id, handle_visibility, mesh_vertices, intrinsics, synset_names, num_rotation=20):
'''
:param RT_1: [4, 4]. homogeneous affine transformation
:param RT_2: [4, 4]. homogeneous affine transformation
:param vertices: [3, N].
:param intrinsics: [4, 4]
:return: mean 2d projection distance in pixel
synset_names = ['BG', # 0
'bottle', # 1
'bowl', # 2
'camera', # 3
'can', # 4
'laptop', # 5
'mug' # 6
]
'''
## make sure the last row is [0, 0, 0, 1]
if RT_1 is None or RT_2 is None:
return -1
try:
assert np.array_equal(RT_1[3, :], RT_2[3, :])
assert np.array_equal(RT_1[3, :], np.array([0, 0, 0, 1]))
except AssertionError:
print(RT_1[3, :], RT_2[3, :])
exit()
RT_1[:3, :3] = RT_1[:3, :3]/np.cbrt(np.linalg.det(RT_1[:3, :3]))
R1 = RT_1[:3, :3]
#T1 = RT_1[:3, 3]
RT_2[:3, :3] = RT_2[:3, :3]/np.cbrt(np.linalg.det(RT_2[:3, :3]))
R2 = RT_2[:3, :3]
#T2 = RT_2[:3, 3]
try:
assert np.abs(np.linalg.det(R1) - 1) < 0.01
assert np.abs(np.linalg.det(R2) - 1) < 0.01
except AssertionError:
print(np.linalg.det(R1), np.linalg.det(R2))
# check the vertices are in meter unit
vertices = np.copy(mesh_vertices)/1000
assert np.amax(vertices) < 0.5, np.amax(vertices)
assert np.amax(vertices) > 0, np.amax(vertices)
assert np.amin(vertices) < 0, np.amin(vertices)
assert np.amin(vertices) > -0.5, np.amin(vertices)
assert vertices.shape[0] == 3
num_vertices = vertices.shape[1]
coords_3d_1 = transform_coordinates_3d(vertices, RT_1)
projected_1 = calculate_2d_projections(coords_3d_1, intrinsics)
coords_3d_2 = transform_coordinates_3d(vertices, RT_2)
projected_2 = calculate_2d_projections(coords_3d_2, intrinsics)
# calculate reprojection 2d error
dists = np.linalg.norm(projected_1 - projected_2, axis=1)
assert len(dists) == num_vertices
min_mean_dist = np.mean(dists)
## take care of symmetry categories
# freely rotate around y axis
if (synset_names[class_id] in ['bottle', 'can', 'bowl']) or (synset_names[class_id] == 'mug' and handle_visibility==0):
def y_rotation_matrix(theta):
return np.array([[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
for i in range(1, num_rotation):
theta = 2*math.pi*i/float(num_rotation)
coords_3d_2 = transform_coordinates_3d(y_rotation_matrix(theta)@vertices, RT_2)
projected_2 = calculate_2d_projections(coords_3d_2, intrinsics)
dists = np.linalg.norm(projected_1 - projected_2, axis=1)
assert len(dists) == num_vertices
min_mean_dist = min(min_mean_dist, np.mean(dists))
# rotate 180 around y axis
elif synset_names[class_id] in ['phone']:
y_180_RT = np.diag([-1.0, 1.0, -1.0])
coords_3d_2 = transform_coordinates_3d(y_180_RT@vertices, RT_2)
projected_2 = calculate_2d_projections(coords_3d_2, intrinsics)
dists = np.linalg.norm(projected_1 - projected_2, axis=1)
assert len(dists) == num_vertices
min_mean_dist = min(min_mean_dist, np.mean(dists))
# rotate 180 around z axis
elif synset_names[class_id] in ['eggbox', 'glue']:
z_180_RT = np.diag([-1.0, -1.0, 1.0])
coords_3d_2 = transform_coordinates_3d(z_180_RT@vertices, RT_2)
projected_2 = calculate_2d_projections(coords_3d_2, intrinsics)
dists = np.linalg.norm(projected_1 - projected_2, axis=1)
assert len(dists) == num_vertices
min_mean_dist = min(min_mean_dist, np.mean(dists))
else: ## normal asymmetric objects
min_mean_dist = min_mean_dist
return min_mean_dist
def non_max_suppression(boxes, scores, threshold):
"""Performs non-maximum suppression and returns indices of kept boxes.
boxes: [N, (y1, x1, y2, x2)]. Notice that (y2, x2) lays outside the box.
scores: 1-D array of box scores.
threshold: Float. IoU threshold to use for filtering.
"""
assert boxes.shape[0] > 0
if boxes.dtype.kind != "f":
boxes = boxes.astype(np.float32)
# Compute box areas
y1 = boxes[:, 0]
x1 = boxes[:, 1]
y2 = boxes[:, 2]
x2 = boxes[:, 3]
area = (y2 - y1) * (x2 - x1)
# Get indicies of boxes sorted by scores (highest first)
ixs = scores.argsort()[::-1]
pick = []
while len(ixs) > 0:
# Pick top box and add its index to the list
i = ixs[0]
pick.append(i)
# Compute IoU of the picked box with the rest
iou = compute_iou(boxes[i], boxes[ixs[1:]], area[i], area[ixs[1:]])
# Identify boxes with IoU over the threshold. This
# returns indicies into ixs[1:], so add 1 to get
# indicies into ixs.
remove_ixs = np.where(iou > threshold)[0] + 1
# Remove indicies of the picked and overlapped boxes.
ixs = np.delete(ixs, remove_ixs)
ixs = np.delete(ixs, 0)
return np.array(pick, dtype=np.int32)
def apply_box_deltas(boxes, deltas):
"""Applies the given deltas to the given boxes.
boxes: [N, (y1, x1, y2, x2)]. Note that (y2, x2) is outside the box.
deltas: [N, (dy, dx, log(dh), log(dw))]
"""
boxes = boxes.astype(np.float32)
# Convert to y, x, h, w
height = boxes[:, 2] - boxes[:, 0]
width = boxes[:, 3] - boxes[:, 1]
center_y = boxes[:, 0] + 0.5 * height
center_x = boxes[:, 1] + 0.5 * width
# Apply deltas
center_y += deltas[:, 0] * height
center_x += deltas[:, 1] * width
height *= np.exp(deltas[:, 2])
width *= np.exp(deltas[:, 3])
# Convert back to y1, x1, y2, x2
y1 = center_y - 0.5 * height
x1 = center_x - 0.5 * width
y2 = y1 + height
x2 = x1 + width
return np.stack([y1, x1, y2, x2], axis=1)
def box_refinement_graph(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]
"""
box = tf.cast(box, tf.float32)
gt_box = tf.cast(gt_box, tf.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = tf.log(gt_height / height)
dw = tf.log(gt_width / width)
result = tf.stack([dy, dx, dh, dw], axis=1)
return result
def box_refinement(box, gt_box):
"""Compute refinement needed to transform box to gt_box.
box and gt_box are [N, (y1, x1, y2, x2)]. (y2, x2) is
assumed to be outside the box.
"""
box = box.astype(np.float32)
gt_box = gt_box.astype(np.float32)
height = box[:, 2] - box[:, 0]
width = box[:, 3] - box[:, 1]
center_y = box[:, 0] + 0.5 * height
center_x = box[:, 1] + 0.5 * width
gt_height = gt_box[:, 2] - gt_box[:, 0]
gt_width = gt_box[:, 3] - gt_box[:, 1]
gt_center_y = gt_box[:, 0] + 0.5 * gt_height
gt_center_x = gt_box[:, 1] + 0.5 * gt_width
dy = (gt_center_y - center_y) / height
dx = (gt_center_x - center_x) / width
dh = np.log(gt_height / height)
dw = np.log(gt_width / width)
return np.stack([dy, dx, dh, dw], axis=1)
def get_3d_bbox(scale, shift = 0):
"""
Input:
scale: [3] or scalar
shift: [3] or scalar
Return
bbox_3d: [3, N]
"""
if hasattr(scale, "__iter__"):
bbox_3d = np.array([[scale[0] / 2, +scale[1] / 2, scale[2] / 2],
[scale[0] / 2, +scale[1] / 2, -scale[2] / 2],
[-scale[0] / 2, +scale[1] / 2, scale[2] / 2],
[-scale[0] / 2, +scale[1] / 2, -scale[2] / 2],
[+scale[0] / 2, -scale[1] / 2, scale[2] / 2],
[+scale[0] / 2, -scale[1] / 2, -scale[2] / 2],
[-scale[0] / 2, -scale[1] / 2, scale[2] / 2],
[-scale[0] / 2, -scale[1] / 2, -scale[2] / 2]]) + shift
else:
bbox_3d = np.array([[scale / 2, +scale / 2, scale / 2],
[scale / 2, +scale / 2, -scale / 2],
[-scale / 2, +scale / 2, scale / 2],
[-scale / 2, +scale / 2, -scale / 2],
[+scale / 2, -scale / 2, scale / 2],
[+scale / 2, -scale / 2, -scale / 2],
[-scale / 2, -scale / 2, scale / 2],
[-scale / 2, -scale / 2, -scale / 2]]) +shift
bbox_3d = bbox_3d.transpose()
return bbox_3d
def transform_coordinates_3d(coordinates, RT):
"""
Input:
coordinates: [3, N]
RT: [4, 4]
Return
new_coordinates: [3, N]
"""
assert coordinates.shape[0] == 3
coordinates = np.vstack([coordinates, np.ones((1, coordinates.shape[1]), dtype=np.float32)])
new_coordinates = RT @ coordinates
new_coordinates = new_coordinates[:3, :]/new_coordinates[3, :]
return new_coordinates
def calculate_2d_projections(coordinates_3d, intrinsics):
"""
Input:
coordinates: [3, N]
intrinsics: [3, 3]
Return
projected_coordinates: [N, 2]
"""
projected_coordinates = intrinsics @ coordinates_3d
projected_coordinates = projected_coordinates[:2, :] / projected_coordinates[2, :]
projected_coordinates = projected_coordinates.transpose()
projected_coordinates = np.array(projected_coordinates, dtype=np.int32)
return projected_coordinates
############################################################
# IMAGE AUGMENTATION
############################################################
def calculate_rotation(image_size, angle):
image_center = tuple(np.array(image_size) / 2)
# Convert the OpenCV 3x2 rotation matrix to 3x3
rot_mat = np.vstack(
[cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]
)
rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])
# Shorthand for below calcs
image_w2 = image_size[0] * 0.5
image_h2 = image_size[1] * 0.5
# Obtain the rotated coordinates of the image corners
rotated_coords = [
(np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, image_h2]) * rot_mat_notranslate).A[0],
(np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],
(np.array([image_w2, -image_h2]) * rot_mat_notranslate).A[0]
]
# Find the size of the new image
x_coords = [pt[0] for pt in rotated_coords]
x_pos = [x for x in x_coords if x > 0]
x_neg = [x for x in x_coords if x < 0]
y_coords = [pt[1] for pt in rotated_coords]
y_pos = [y for y in y_coords if y > 0]
y_neg = [y for y in y_coords if y < 0]
right_bound = max(x_pos)
left_bound = min(x_neg)
top_bound = max(y_pos)
bot_bound = min(y_neg)
new_w = int(abs(right_bound - left_bound))
new_h = int(abs(top_bound - bot_bound))
# We require a translation matrix to keep the image centred
trans_mat = np.matrix([
[1, 0, int(new_w * 0.5 - image_w2)],
[0, 1, int(new_h * 0.5 - image_h2)],
[0, 0, 1]
])
# Compute the tranform for the combined rotation and translation
affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]
return new_w, new_h, affine_mat
def rotate_image(image, new_w, new_h, affine_mat, interpolation=cv2.INTER_LINEAR):
"""
Rotates an OpenCV 2 / NumPy image about it's centre by the given angle
(in degrees). The returned image will be large enough to hold the entire
new image, with a black background
"""
# Get the image size
# No that's not an error - NumPy stores image matricies backwards
# Apply the transform
result = cv2.warpAffine(
image,
affine_mat,
(new_w, new_h),
flags=interpolation
)
return result
def largest_rotated_rect(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle within the rotated rectangle.
Original JS code by 'Andri' and <NAME> from Stack Overflow
Converted to Python by <NAME>
"""
quadrant = int(math.floor(angle / (math.pi / 2))) & 3
sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle
alpha = (sign_alpha % math.pi + math.pi) % math.pi
bb_w = w * math.cos(alpha) + h * math.sin(alpha)
bb_h = w * math.sin(alpha) + h * math.cos(alpha)
gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)
delta = math.pi - alpha - gamma
length = h if (w < h) else w
d = length * math.cos(alpha)
a = d * math.sin(alpha) / math.sin(delta)
y = a * math.cos(gamma)
x = y * math.tan(gamma)
return (
bb_w - 2 * x,
bb_h - 2 * y
)
def crop_around_center(image, width, height):
"""
Given a NumPy / OpenCV 2 image, crops it to the given width and height,
around it's centre point
"""
image_size = (image.shape[1], image.shape[0])
image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))
if(width > image_size[0]):
width = image_size[0]
if(height > image_size[1]):
height = image_size[1]
x1 = int(image_center[0] - width * 0.5)
x2 = int(image_center[0] + width * 0.5)
y1 = int(image_center[1] - height * 0.5)
y2 = int(image_center[1] + height * 0.5)
return image[y1:y2, x1:x2]
def rotate_and_crop(image, rotate_degree, interpolation):
image_height, image_width = image.shape[0:2]
new_w, new_h, affine_mat = calculate_rotation(image.shape[0:2][::-1], rotate_degree)
image_rotated = rotate_image(image, new_w, new_h, affine_mat, interpolation)
image_rotated_cropped = crop_around_center(
image_rotated,
*largest_rotated_rect(
image_width,
image_height,
math.radians(rotate_degree)
)
)
return image_rotated_cropped
def rotate_and_crop_images(image, masks, coords, rotate_degree):
image_height, image_width = image.shape[0:2]
new_w, new_h, affine_mat = calculate_rotation(image.shape[0:2][::-1], rotate_degree)
image_rotated = rotate_image(image, new_w, new_h, affine_mat, cv2.INTER_LINEAR)
mask_rotated = rotate_image(masks, new_w, new_h, affine_mat, cv2.INTER_NEAREST)
rect = largest_rotated_rect(
image_width,
image_height,
math.radians(rotate_degree)
)
image_rotated_cropped = crop_around_center(image_rotated, *rect)
mask_rotated_cropped = crop_around_center(mask_rotated, *rect)
image_rotated_cropped = cv2.resize(image_rotated_cropped, (image_width, image_height),interpolation=cv2.INTER_LINEAR)
mask_rotated_cropped = cv2.resize(mask_rotated_cropped, (image_width, image_height), interpolation=cv2.INTER_NEAREST)
if coords is not None:
coord_rotated = rotate_image(coords, new_w, new_h, affine_mat, cv2.INTER_NEAREST)
coord_rotated_cropped = crop_around_center(coord_rotated, *rect)
coord_rotated_cropped = cv2.resize(coord_rotated_cropped, (image_width, image_height), interpolation=cv2.INTER_NEAREST)
return image_rotated_cropped, mask_rotated_cropped, coord_rotated_cropped
else:
return image_rotated_cropped, mask_rotated_cropped
############################################################
# Dataset
############################################################
class Dataset(object):
"""The base class for dataset classes.
To use it, create a new class that adds functions specific to the dataset
you want to use. For example:
class CatsAndDogsDataset(Dataset):
def load_cats_and_dogs(self):
...
def load_mask(self, image_id):
...
def image_reference(self, image_id):
...
See COCODataset and ShapesDataset as examples.
"""
def __init__(self, class_map=None):
self._image_ids = []
self.image_info = []
# Background is always the first class
self.class_info = [{"source": "", "id": 0, "name": "BG"}]
self.source_class_ids = {}
def add_class(self, source, class_id, class_name):
assert "." not in source, "Source name cannot contain a dot"
# Does the class exist already?
for info in self.class_info:
if info['source'] == source and info["id"] == class_id:
# source.class_id combination already available, skip
return
# Add the class
self.class_info.append({
"source": source,
"id": class_id,
"name": class_name,
})
def add_image(self, source, image_id, path, **kwargs):
image_info = {
"id": image_id,
"source": source,
"path": path,
}
image_info.update(kwargs)
self.image_info.append(image_info)
def image_reference(self, image_id):
"""Return a link to the image in its source Website or details about
the image that help looking it up or debugging it.
Override for your dataset, but pass to this function
if you encounter images not in your dataset.
"""
return ""
def prepare(self, class_map=None):
"""Prepares the Dataset class for use.d
"""
def clean_name(name):
"""Returns a shorter version of object names for cleaner display."""
return ",".join(name.split(",")[:1])
# Build (or rebuild) everything else from the info dicts.
#self.num_classes = len(self.class_info)
self.num_classes = 0
#self.class_ids = np.arange(self.num_classes)
self.class_ids = []
#self.class_names = [clean_name(c["name"]) for c in self.class_info]
self.class_names = []
#self.class_from_source_map = {"{}.{}".format(info['source'], info['id']): id
# for info, id in zip(self.class_info, self.class_ids)}
self.class_from_source_map = {}
for cls_info in self.class_info:
source = cls_info["source"]
if source == 'coco':
map_key = "{}.{}".format(cls_info['source'], cls_info['id'])
self.class_from_source_map[map_key] = self.class_names.index(class_map[cls_info["name"]])
else:
self.class_ids.append(self.num_classes)
self.num_classes += 1
self.class_names.append(cls_info["name"])
map_key = "{}.{}".format(cls_info['source'], cls_info['id'])
self.class_from_source_map[map_key] = self.class_ids[-1]
self.num_images = len(self.image_info)
self._image_ids = np.arange(self.num_images)
# Mapping from source class and image IDs to internal IDs
self.image_from_source_map = {"{}.{}".format(info['source'], info['id']): id
for info, id in zip(self.image_info, self.image_ids)}
# Map sources to class_ids they support
self.sources = list(set([i['source'] for i in self.class_info]))
'''
self.source_class_ids = {}
# Loop over datasets
for source in self.sources:
self.source_class_ids[source] = []
# Find classes that belong to this dataset
for i, info in enumerate(self.class_info):
# Include BG class in all datasets
if i == 0 or source == info['source']:
self.source_class_ids[source].append(i)
'''
print(self.class_names)
print(self.class_from_source_map)
print(self.sources)
#print(self.source_class_ids)
def map_source_class_id(self, source_class_id):
"""Takes a source class ID and returns the int class ID assigned to it.
For example:
dataset.map_source_class_id("coco.12") -> 23
"""
return self.class_from_source_map[source_class_id] if source_class_id in self.class_from_source_map else None
def get_source_class_id(self, class_id, source):
"""Map an internal class ID to the corresponding class ID in the source dataset."""
info = self.class_info[class_id]
assert info['source'] == source
return info['id']
def append_data(self, class_info, image_info):
self.external_to_class_id = {}
for i, c in enumerate(self.class_info):
for ds, id in c["map"]:
self.external_to_class_id[ds + str(id)] = i
# Map external image IDs to internal ones.
self.external_to_image_id = {}
for i, info in enumerate(self.image_info):
self.external_to_image_id[info["ds"] + str(info["id"])] = i
@property
def image_ids(self):
return self._image_ids
def source_image_link(self, image_id):
"""Returns the path or URL to the image.
Override this to return a URL to the image if it's availble online for easy
debugging.
"""
return self.image_info[image_id]["path"]
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = scipy.misc.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
image = skimage.color.gray2rgb(image)
return image
def load_mask(self, image_id):
"""Load instance masks for the given image.
Different datasets use different ways to store masks. Override this
method to load instance masks and return them in the form of am
array of binary masks of shape [height, width, instances].
Returns:
masks: A bool array of shape [height, width, instance count] with
a binary mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# Override this function to load a mask from your dataset.
# Otherwise, it returns an empty mask.
mask = np.empty([0, 0, 0])
class_ids = np.empty([0], np.int32)
return mask, class_ids
def resize_image(image, min_dim=None, max_dim=None, padding=False):
"""
Resizes an image keeping the aspect ratio.
min_dim: if provided, resizes the image such that it's smaller
dimension == min_dim
max_dim: if provided, ensures that the image longest side doesn't
exceed this value.
padding: If true, pads image with zeros so it's size is max_dim x max_dim
Returns:
image: the resized image
window: (y1, x1, y2, x2). If max_dim is provided, padding might
be inserted in the returned image. If so, this window is the
coordinates of the image part of the full image (excluding
the padding). The x2, y2 pixels are not included.
scale: The scale factor used to resize the image
padding: Padding added to the image [(top, bottom), (left, right), (0, 0)]
"""
# Default window (y1, x1, y2, x2) and default scale == 1.
h, w = image.shape[:2]
window = (0, 0, h, w)
scale = 1
# Scale?
if min_dim:
# Scale up but not down
scale = max(1, min_dim / min(h, w))
# Does it exceed max dim?
if max_dim:
image_max = max(h, w)
if round(image_max * scale) > max_dim:
scale = max_dim / image_max
# Resize image and mask
if scale != 1:
image = scipy.misc.imresize(
image, (round(h * scale), round(w * scale)))
# Need padding?
if padding:
# Get new height and width
h, w = image.shape[:2]
top_pad = (max_dim - h) // 2
bottom_pad = max_dim - h - top_pad
left_pad = (max_dim - w) // 2
right_pad = max_dim - w - left_pad
padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]
image = np.pad(image, padding, mode='constant', constant_values=0)
window = (top_pad, left_pad, h + top_pad, w + left_pad)
return image, window, scale, padding
def resize_mask(mask, scale, padding):
"""Resizes a mask using the given scale and padding.
Typically, you get the scale and padding from resize_image() to
ensure both, the image, the mask, and the coordinate map are resized consistently.
scale: mask scaling factor
padding: Padding to add to the mask in the form
[(top, bottom), (left, right), (0, 0)]
"""
h, w = mask.shape[:2]
# for instance mask
if len(mask.shape) == 3:
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1], order=0)
new_padding = padding
# for coordinate map
elif len(mask.shape) == 4:
mask = scipy.ndimage.zoom(mask, zoom=[scale, scale, 1, 1], order=0)
new_padding = padding + [(0, 0)]
else:
assert False
mask =
|
np.pad(mask, new_padding, mode='constant', constant_values=0)
|
numpy.pad
|
'''
FOREST CARBON BALANCE BY SPARSE GRID SAMPLING
'''
#%% Import modules
import os
import numpy as np
import pandas as pd
import geopandas as gpd
import fiona
import matplotlib.pyplot as plt
import numpy.matlib as ml
import matplotlib.pyplot as plt
import time
from shapely.geometry import Polygon,Point
from fcgadgets.macgyver import utilities_general as gu
from fcgadgets.macgyver import utilities_gis as gis
from fcgadgets.macgyver import utilities_inventory as invu
from fcgadgets.cbrunner import cbrun_utilities
#%% Define paths
meta={}
meta['Paths']={}
meta['Paths']['Project']=r'D:\Data\FCI_Projects\FCI_SparseGrid'
#meta['Paths']['Project']=r'D:\Data\FCI_Projects\SparseGrid_HighRes'
meta['Paths']['Geospatial']=meta['Paths']['Project'] + '\\Geospatial'
meta['Paths']['Results']=r'C:\Users\rhember\Documents\Data\ForestInventory\Results\20210401'
meta['Paths']['VRI']=r'C:\Users\rhember\Documents\Data\ForestInventory\VRI\20210401'
meta['Paths']['Disturbances']=r'C:\Users\rhember\Documents\Data\ForestInventory\Disturbances\20210401'
meta['Paths']['LandUse']=r'C:\Users\rhember\Documents\Data\ForestInventory\LandUse\20210401'
meta['Paths']['Taz Datasets']=r'C:\Users\rhember\Documents\Data\Taz Datasets'
# Save
gu.opickle(meta['Paths']['Project'] + '\\Inputs\\Metadata.pkl',meta)
#%% Define sparse grid sample
# Import TSA maps
zTSA=gis.OpenGeoTiff(r'C:\Users\rhember\Documents\Data\BC1ha\Admin\tsa.tif')
lut_tsa=pd.read_excel(r'C:\Users\rhember\Documents\Data\BC1ha\Admin\lut_tsa.xlsx')
tsa_boundaries=gpd.read_file(r'C:\Users\rhember\Documents\Data\TSA\tsa_boundaries.shp')
# Import land cover
zLC2=gis.OpenGeoTiff(r'C:\Users\rhember\Documents\Data\BC1ha\VRI\lc2.tif')
# Define regular grid sampling frequency
sfreq=100 # 10 km
#sfreq=50 # 5 km
#sfreq=20 # High res
# Extract subgrid
zTSA['X']=zTSA['X'][0::sfreq,0::sfreq]
zTSA['Y']=zTSA['Y'][0::sfreq,0::sfreq]
zTSA['m'],zTSA['n']=zTSA['X'].shape
zTSA['Data']=zTSA['Data'][0::sfreq,0::sfreq]
zLC2['Data']=zLC2['Data'][0::sfreq,0::sfreq]
# Define additional inclusion criteria
# Treed, province-wide
iIreg=np.where( (zLC2['Data']==4) )
# Treed, Williams Lake TSA only
#iTSA=lut_tsa.loc[lut_tsa.Name=='Williams Lake TSA','VALUE'].values
#ind=np.where( (zLC2.Data==4) & (zTSA.Data==iTSA) )
# Save grid
flg=0
if flg==1:
z=zTSA.copy()
z['Data']=np.zeros(zTSA['Data'].shape,dtype='int16')
z['Data'][iIreg]=1 # Treed = 1
gis.SaveGeoTiff(z,meta['Paths']['Project'] + '\\Geospatial\\GridSXY.tiff')
plt.matshow(z['Data'])
#%% Generate sparse grid
# Apply filters to BC1ha grid
sxy={}
sxy['x']=zTSA['X'][iIreg]
sxy['y']=zTSA['Y'][iIreg]
sxy['ID_TSA']=zTSA['Data'][iIreg]
# Save to pickle file
gu.opickle(meta['Paths']['Geospatial'] + '\\sxy.pkl',sxy)
# Save as geojson
flg=1
if flg==1:
points=[]
for k in range(sxy['x'].size):
points.append(Point(sxy['x'][k],sxy['y'][k]))
gdf_sxy=gpd.GeoDataFrame({'geometry':points,'ID_TSA':sxy['ID_TSA']})
gdf_sxy.crs=tsa_boundaries.crs
gdf_sxy.to_file(meta['Paths']['Geospatial'] + '\\sxy.geojson',driver='GeoJSON')
#%% Plot
# Load basemap
gdf_bm=gpd.read_file(r'C:\Users\rhember\Documents\Data\Basemaps\Basemaps.gdb',layer='NRC_POLITICAL_BOUNDARIES_1M_SP')
plt.close('all')
fig,ax=plt.subplots(figsize=gu.cm2inch(7.8,6.6))
#mngr=plt.get_current_fig_manager()
#mngr.window.setGeometry(700,20,620,600)
gdf_bm.plot(ax=ax,facecolor=[0.8,0.8,0.8],edgecolor=[0,0,0],label='Political Boundary',linewidth=0.25,alpha=1)
tsa_boundaries.plot(ax=ax,facecolor='none',edgecolor=[0,0,0],linewidth=0.25)
gdf_sxy.plot(ax=ax,markersize=1,facecolor=[0,0,0.75],edgecolor=None,linewidth=0.75,alpha=1)
ax.grid(color='k',linestyle='-',linewidth=0.25)
#iP=2000
#for iD in range(len(nddat[iP])):
# x,y=nddat[iP][iD]['Geometry'].exterior.xy
# plt.plot(x,y,'r-')
ax.set(position=[0.01,0.01,0.98,0.98],xticks=[],yticks=[])
#plt.savefig(PathProject + '\\SparseGrid_Map.png',format='png',dpi=900)
##%% Get N deposition time series for each sparse grid cell
#
#tv=np.arange(1971,2021,1)
#ndep=np.zeros((tv.size,len(gdf_sxy)))
#for iMP in range(len(nddat)):
# print(iMP)
# it=np.where(tv==nddat[iMP][0]['Year'])[0]
# if it.size==0:
# continue
# FlagDone=np.zeros(len(gdf_sxy))
# for iD in range(len(nddat[iMP])):
# InPoly=gdf_sxy.within(nddat[iMP][iD]['Geometry'])
# ind=np.where( (InPoly==True) & (FlagDone==0) )[0]
# #gdf_sxy.loc[InPoly].plot(ax=ax,markersize=1,facecolor=[1,0,0.25],edgecolor=None,linewidth=0.75,alpha=1)
# if ind.size>0:
# ndep[it,ind]=ndep[it,ind]+nddat[iMP][iD]['N deposition']
# FlagDone[ind]=1
#
#plt.plot(tv,np.prctile(ndep,axis=1),'-k.')
#
#gu.opickle(r'C:\Users\rhember\Documents\Data\FCI_Projects\FertilizationSummaryNdep\Geospatial\ndep.pkl',ndep)
#%% Open crosswalk between missing AT geometries and opening geometries
# If this doesn't work, you need to run the script that creates the crosswalk
missing_geo_atu_list=gu.ipickle(meta['Paths']['Results'] + '\\missing_geo_atu_list.pkl')
missing_geo_op_geos=gu.ipickle(meta['Paths']['Results'] + '\\missing_geo_op_geos.pkl')
missing_geo_fc_geos=gu.ipickle(meta['Paths']['Results'] + '\\missing_geo_fc_geos.pkl')
#%% Import inventory layer information (names, variables, LUTs)
InvLyrInfo=invu.DefineInventoryLayersAndVariables()
for iLyr in range(len(InvLyrInfo)):
lut=gu.ipickle(InvLyrInfo[iLyr]['Path'] + '\\LUTs_' + InvLyrInfo[iLyr]['Layer Name'] +'.pkl')
for key in lut.keys():
InvLyrInfo[iLyr]['LUT'][key]=lut[key]
del lut
#%% Compile inventory layers
for iLyr in range(len(InvLyrInfo)):
# Loop through features in layer
t_start=time.time()
# Define path
path=InvLyrInfo[iLyr]['Path'] + '\\' + InvLyrInfo[iLyr]['File Name']
# Define layer
lyr_nam=InvLyrInfo[iLyr]['Layer Name']
print(lyr_nam)
# Don't run this for planting - planting is done seperately below
if lyr_nam=='RSLT_PLANTING_SVW':
continue
# Initialize index to inventory
IdxToInv=[None]*sxy['x'].size
# Initialize inventory dictionary
L=5000000
data={}
data['IdxToSXY']=np.zeros(L,dtype=int)
for fnam,flag,dtype in InvLyrInfo[iLyr]['Field List']:
if dtype=='<U20':
data[fnam]=np.zeros(L,dtype=dtype)
else:
data[fnam]=-999*np.ones(L,dtype=dtype)
cnt_inventory=0
# Scan through layer file to extract selected variables, and convert string
# variables to numeric based on LUTs
cc=0
with fiona.open(path,layer=lyr_nam) as source:
for feat in source:
# Extract attributes and geometry
prp=feat['properties']
geom=feat['geometry']
# Fill missing AT spatial with OP spatial
if (lyr_nam=='RSLT_ACTIVITY_TREATMENT_SVW'):
# No need to record surveys
#if prp['SILV_BASE_CODE']=='SU':
# continue
# Populate missing ATU layer geometry with geometry from
# OPENING or FC layer where possible.
flg_geom_from_op=0
flg_geom_from_fc=0
if (geom==None):
# Check to see if the opening is listed in the AT missing dictionary
indMis=np.where( (missing_geo_atu_list['ACTIVITY_TREATMENT_UNIT_ID']==prp['ACTIVITY_TREATMENT_UNIT_ID']) )[0]
if indMis.size>0:
idx2fc=missing_geo_atu_list['IdxToFC'][indMis[0]]
if len(idx2fc)>0:
# Use forest cover geometries
geom={}
geom['coordinates']=[]
for i in range(len(idx2fc)):
geo0=missing_geo_fc_geos[prp['OPENING_ID']][idx2fc[i]]
if type(geo0)==dict:
geo1=geo0['coordinates']
geom['coordinates'].append(geo1[0])
else:
for j in range(len(geo0)):
geo1=geo0[j]['coordinates']
geom['coordinates'].append(geo1[0])
flg_geom_from_fc=1
# Plot (not working)
#flg=0
#if flg==1:
# plt.close('all')
# fig,ax=plt.subplots(1)
# gdf_fc=gpd.GeoDataFrame.from_features(feat_fc)
# gdf_fc.plot(ax=ax,facecolor='None',edgecolor='r',linewidth=1.25,linestyle='--')
if prp['OPENING_ID'] in missing_geo_op_geos:
if len(missing_geo_op_geos[prp['OPENING_ID']])>0:
# Use opening geometry
geom={}
geom['coordinates']=[]
geo0=missing_geo_op_geos[prp['OPENING_ID']]
if type(geo0)==dict:
geo1=geo0['coordinates']
geom['coordinates'].append(geo1[0])
else:
for j in range(len(geo0)):
geo1=geo0[j]['coordinates']
geom['coordinates'].append(geo1[0])
flg_geom_from_op=1
else:
# Could not use either FC or opening layer
print('Missing spatial could not be recovered')
# Only continue if spatial info exists
if (geom==None) | (geom==[]):
continue
# Extract multipolygon
coords0=geom['coordinates']
# loop through multipolygon
for i in range(len(coords0)):
# Extract multipolygon
coords1=coords0[i]
# loop through multipolygon
for j in range(len(coords1)):
# Extract polygon
coords2=
|
np.asarray(coords1[j])
|
numpy.asarray
|
"""
.. module:: skrf.calibration.deembedding
====================================================
deembedding (:mod:`skrf.calibration.deembedding`)
====================================================
De-embedding is the procedure of removing effects of the
test fixture that is often present in the measurement of a device
or circuit. It is based on a lumped element approximation of the
test fixture which needs removal from the raw data, and its
equivalent circuit often needs to be known a-priori. This is often
required since implementation of calibration methods such as
Thru-Reflect-Line (TRL) becomes too expensive for implementation
in on-wafer measurement environments where space is limited, or
insufficiently accurate as in the case of Short-Open-Load-Thru
(SOLT) calibration where the load cannot be manufactured accurately.
De-embedding is often performed as a second step, after a
SOLT, TRL or similar calibration to the end of a known reference
plane, like the probe-tips in on-wafer measurements.
This module provides objects to implement commonly used de-embedding
method in on-wafer applications.
Each de-embedding method inherits from the common abstract base
class :class:`Deembedding`.
Base Class
----------
.. autosummary::
:toctree: generated/
Deembedding
De-embedding Methods
--------------------
.. autosummary::
:toctree: generated/
OpenShort
Open
ShortOpen
Short
SplitPi
SplitTee
AdmittanceCancel
ImpedanceCancel
"""
from abc import ABC, abstractmethod
from ..frequency import *
from ..network import *
import warnings
import numpy as np
from numpy import concatenate, conj, flip, real, angle, exp, zeros
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
class Deembedding(ABC):
"""
Abstract Base Class for all de-embedding objects.
This class implements the common mechanisms for all de-embedding
algorithms. Specific calibration algorithms should inherit this
class and over-ride the methods:
* :func:`Deembedding.deembed`
"""
def __init__(self, dummies, name=None, *args, **kwargs):
r"""
De-embedding Initializer
Notes
-----
Each de-embedding algorithm may use a different number of
dummy networks. We check that each of these dummy networks
have matching frequencies to perform de-embedding.
It should be known a-priori what the equivalent circuit
of the parasitic network looks like. The proper de-embedding
method should then be chosen accordingly.
Parameters
----------
dummies : list of :class:`~skrf.network.Network` objects
Network info of all the dummy structures used in a
given de-embedding algorithm.
name : string
Name of this de-embedding instance, like 'open-short-set1'
This is for convenience of identification.
\*args, \*\*kwargs : keyword arguments
stored in self.args and self.kwargs, which may be used
by sub-classes if needed.
"""
# ensure all the dummy Networks' frequency's are the same
for dmyntwk in dummies:
if dummies[0].frequency != dmyntwk.frequency:
raise(ValueError('Dummy Networks dont have matching frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
self.frequency = dummies[0].frequency
self.dummies = dummies
self.args = args
self.kwargs = kwargs
self.name = name
def __str__(self):
if self.name is None:
name = ''
else:
name = self.name
output = '%s Deembedding: %s, %s, %s dummy structures'\
%(self.__class__.__name__, name, str(self.frequency),\
len(self.dummies))
return output
def __repr_(self):
return self.__str__()
@abstractmethod
def deembed(self, ntwk):
"""
Apply de-embedding correction to a Network
"""
pass
class OpenShort(Deembedding):
"""
Remove open parasitics followed by short parasitics.
This is a commonly used de-embedding method for on-wafer applications.
A deembedding object is created with two dummy measurements: `dummy_open`
and `dummy_short`. When :func:`Deembedding.deembed` is applied, the
Y-parameters of the dummy_open are subtracted from the DUT measurement,
followed by subtraction of Z-parameters of dummy-short.
This method is applicable only when there is a-priori knowledge of the
equivalent circuit model of the parasitic network to be de-embedded,
where the series parasitics are closest to device under test,
followed by the parallel parasitics. For more information, see [1]_
References
------------
.. [1] <NAME>, <NAME> and <NAME>, "An improved
de-embedding technique for on-wafer high frequency characterization",
IEEE 1991 Bipolar Circuits and Technology Meeting, pp. 188-191, Sep. 1991.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import OpenShort
Create network objects for dummy structures and dut
>>> op = rf.Network('open_ckt.s2p')
>>> sh = rf.Network('short_ckt.s2p')
>>> dut = rf.Network('full_ckt.s2p')
Create de-embedding object
>>> dm = OpenShort(dummy_open = op, dummy_short = sh, name = 'test_openshort')
Remove parasitics to get the actual device network
>>> realdut = dm.deembed(dut)
"""
def __init__(self, dummy_open, dummy_short, name=None, *args, **kwargs):
"""
Open-Short De-embedding Initializer
Parameters
-----------
dummy_open : :class:`~skrf.network.Network` object
Measurement of the dummy open structure
dummy_short : :class:`~skrf.network.Network` object
Measurement of the dummy short structure
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.open = dummy_open.copy()
self.short = dummy_short.copy()
dummies = [self.open, self.short]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
parasitics needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.open.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
caled = ntwk.copy()
# remove open parasitics
caled.y = ntwk.y - self.open.y
# remove short parasitics
caled.z = caled.z - self.short.z
return caled
class Open(Deembedding):
"""
Remove open parasitics only.
A deembedding object is created with just one open dummy measurement,
`dummy_open`. When :func:`Deembedding.deembed` is applied, the
Y-parameters of the open dummy are subtracted from the DUT measurement,
This method is applicable only when there is a-priori knowledge of the
equivalent circuit model of the parasitic network to be de-embedded,
where the series parasitics are assumed to be negligible,
but parallel parasitics are unwanted.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import Open
Create network objects for dummy structure and dut
>>> op = rf.Network('open_ckt.s2p')
>>> dut = rf.Network('full_ckt.s2p')
Create de-embedding object
>>> dm = Open(dummy_open = op, name = 'test_open')
Remove parasitics to get the actual device network
>>> realdut = dm.deembed(dut)
"""
def __init__(self, dummy_open, name=None, *args, **kwargs):
"""
Open De-embedding Initializer
Parameters
-----------
dummy_open : :class:`~skrf.network.Network` object
Measurement of the dummy open structure
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.open = dummy_open.copy()
dummies = [self.open]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
parasitics needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.open.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
caled = ntwk.copy()
# remove open parasitics
caled.y = ntwk.y - self.open.y
return caled
class ShortOpen(Deembedding):
"""
Remove short parasitics followed by open parasitics.
A deembedding object is created with two dummy measurements: `dummy_open`
and `dummy_short`. When :func:`Deembedding.deembed` is applied, the
Z-parameters of the dummy_short are subtracted from the DUT measurement,
followed by subtraction of Y-parameters of dummy_open.
This method is applicable only when there is a-priori knowledge of the
equivalent circuit model of the parasitic network to be de-embedded,
where the parallel parasitics are closest to device under test,
followed by the series parasitics.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import ShortOpen
Create network objects for dummy structures and dut
>>> op = rf.Network('open_ckt.s2p')
>>> sh = rf.Network('short_ckt.s2p')
>>> dut = rf.Network('full_ckt.s2p')
Create de-embedding object
>>> dm = ShortOpen(dummy_short = sh, dummy_open = op, name = 'test_shortopen')
Remove parasitics to get the actual device network
>>> realdut = dm.deembed(dut)
"""
def __init__(self, dummy_short, dummy_open, name=None, *args, **kwargs):
"""
Short-Open De-embedding Initializer
Parameters
-----------
dummy_short : :class:`~skrf.network.Network` object
Measurement of the dummy short structure
dummy_open : :class:`~skrf.network.Network` object
Measurement of the dummy open structure
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.open = dummy_open.copy()
self.short = dummy_short.copy()
dummies = [self.open, self.short]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
parasitics needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.open.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
caled = ntwk.copy()
# remove short parasitics
caled.z = ntwk.z - self.short.z
# remove open parasitics
caled.y = caled.y - self.open.y
return caled
class Short(Deembedding):
"""
Remove short parasitics only.
This is a useful method to remove pad contact resistances from measurement.
A deembedding object is created with just one dummy measurement: `dummy_short`.
When :func:`Deembedding.deembed` is applied, the
Z-parameters of the dummy_short are subtracted from the DUT measurement,
This method is applicable only when there is a-priori knowledge of the
equivalent circuit model of the parasitic network to be de-embedded,
where only series parasitics are to be removed while retaining all others.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import Short
Create network objects for dummy structures and dut
>>> sh = rf.Network('short_ckt.s2p')
>>> dut = rf.Network('full_ckt.s2p')
Create de-embedding object
>>> dm = Short(dummy_short = sh, name = 'test_short')
Remove parasitics to get the actual device network
>>> realdut = dm.deembed(dut)
"""
def __init__(self, dummy_short, name=None, *args, **kwargs):
"""
Short De-embedding Initializer
Parameters
-----------
dummy_short : :class:`~skrf.network.Network` object
Measurement of the dummy short structure
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.short = dummy_short.copy()
dummies = [self.short]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
parasitics needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.short.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
caled = ntwk.copy()
# remove short parasitics
caled.z = ntwk.z - self.short.z
return caled
class SplitPi(Deembedding):
"""
Remove shunt and series parasitics assuming pi-type embedding network.
A deembedding object is created with just one thru dummy measurement `dummy_thru`.
The thru dummy is, for example, a direct cascaded connection of the left and right test pads.
When :func:`Deembedding.deembed` is applied,
the shunt admittance and series impedance of the thru dummy are removed.
This method is applicable only when there is a-priori knowledge of the
equivalent circuit model of the parasitic network to be de-embedded,
where the series parasitics are closest to device under test,
followed by the shunt parasitics. For more information, see [2]_
References
------------
.. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
“Experimental Characterization of the Effect of Metal Dummy Fills on Spiral Inductors,”
in 2007 IEEE Radio Frequency Integrated Circuits (RFIC) Symposium, Jun. 2007, pp. 307–310.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import SplitPi
Create network objects for dummy structure and dut
>>> th = rf.Network('thru_ckt.s2p')
>>> dut = rf.Network('full_ckt.s2p')
Create de-embedding object
>>> dm = SplitPi(dummy_thru = th, name = 'test_thru')
Remove parasitics to get the actual device network
>>> realdut = dm.deembed(dut)
"""
def __init__(self, dummy_thru, name=None, *args, **kwargs):
"""
SplitPi De-embedding Initializer
Parameters
-----------
dummy_thru : :class:`~skrf.network.Network` object
Measurement of the dummy thru structure
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.thru = dummy_thru.copy()
dummies = [self.thru]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
parasitics needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.thru.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
left = self.thru.copy()
left_y = left.y
left_y[:,0,0] = (self.thru.y[:,0,0] - self.thru.y[:,1,0] + self.thru.y[:,1,1] - self.thru.y[:,0,1]) / 2
left_y[:,0,1] = self.thru.y[:,1,0] + self.thru.y[:,0,1]
left_y[:,1,0] = self.thru.y[:,1,0] + self.thru.y[:,0,1]
left_y[:,1,1] = - self.thru.y[:,1,0] - self.thru.y[:,0,1]
left.y = left_y
right = left.flipped()
caled = left.inv ** ntwk ** right.inv
return caled
class SplitTee(Deembedding):
"""
Remove series and shunt parasitics assuming tee-type embedding network.
A deembedding object is created with just one thru dummy measurement `dummy_thru`.
The thru dummy is, for example, a direct cascaded connection of the left and right test pads.
When :func:`Deembedding.deembed` is applied,
the shunt admittance and series impedance of the thru dummy are removed.
This method is applicable only when there is a-priori knowledge of the
equivalent circuit model of the parasitic network to be de-embedded,
where the shunt parasitics are closest to device under test,
followed by the series parasitics. For more information, see [3]_
References
------------
.. [3] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
“Experimental validation of crosstalk simulations for on-chip interconnects using S-parameters,”
IEEE Transactions on Advanced Packaging, vol. 28, no. 1, pp. 57–62, Feb. 2005.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import SplitTee
Create network objects for dummy structure and dut
>>> th = rf.Network('thru_ckt.s2p')
>>> dut = rf.Network('full_ckt.s2p')
Create de-embedding object
>>> dm = SplitTee(dummy_thru = th, name = 'test_thru')
Remove parasitics to get the actual device network
>>> realdut = dm.deembed(dut)
"""
def __init__(self, dummy_thru, name=None, *args, **kwargs):
"""
SplitTee De-embedding Initializer
Parameters
-----------
dummy_thru : :class:`~skrf.network.Network` object
Measurement of the dummy thru structure
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.thru = dummy_thru.copy()
dummies = [self.thru]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
parasitics needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.thru.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
left = self.thru.copy()
left_z = left.z
left_z[:,0,0] = (self.thru.z[:,0,0] + self.thru.z[:,1,0] + self.thru.z[:,1,1] + self.thru.z[:,0,1]) / 2
left_z[:,0,1] = self.thru.z[:,1,0] + self.thru.z[:,0,1]
left_z[:,1,0] = self.thru.z[:,1,0] + self.thru.z[:,0,1]
left_z[:,1,1] = self.thru.z[:,1,0] + self.thru.z[:,0,1]
left.z = left_z
right = left.flipped()
caled = left.inv ** ntwk ** right.inv
return caled
class AdmittanceCancel(Deembedding):
"""
Cancel shunt admittance by swapping (a.k.a Mangan's method).
A deembedding object is created with just one thru dummy measurement `dummy_thru`.
The thru dummy is, for example, a direct cascaded connection of the left and right test pads.
When :func:`Deembedding.deembed` is applied,
the shunt admittance of the thru dummy are canceled,
from the DUT measurement by left-right mirroring operation.
This method is applicable to only symmetric (i.e. S11=S22 and S12=S21) 2-port DUTs,
but suitable for the characterization of transmission lines at mmW frequencies.
For more information, see [4]_
References
------------
.. [4] <NAME>, <NAME>, <NAME>, and <NAME>,
“De-embedding transmission line measurements for accurate modeling of IC designs,”
IEEE Trans. Electron Devices, vol. 53, no. 2, pp. 235–241, Feb. 2006.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import AdmittanceCancel
Create network objects for dummy structure and dut
>>> th = rf.Network('thru_ckt.s2p')
>>> dut = rf.Network('full_ckt.s2p')
Create de-embedding object
>>> dm = AdmittanceCancel(dummy_thru = th, name = 'test_thru')
Remove parasitics to get the actual device network
>>> realdut = dm.deembed(dut)
"""
def __init__(self, dummy_thru, name=None, *args, **kwargs):
"""
AdmittanceCancel De-embedding Initializer
Parameters
-----------
dummy_thru : :class:`~skrf.network.Network` object
Measurement of the dummy thru structure
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.thru = dummy_thru.copy()
dummies = [self.thru]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
parasitics needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.thru.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
caled = ntwk.copy()
h = ntwk ** self.thru.inv
h_ = h.flipped()
caled.y = (h.y + h_.y) / 2
return caled
class ImpedanceCancel(Deembedding):
"""
Cancel series impedance by swapping.
A deembedding object is created with just one thru dummy measurement `dummy_thru`.
The thru dummy is, for example, a direct cascaded connection of the left and right test pads.
When :func:`Deembedding.deembed` is applied,
the series impedance of the thru dummy are canceled,
from the DUT measurement by left-right mirroring operation.
This method is applicable to only symmetric (i.e. S11=S22 and S12=S21) 2-port DUTs,
but suitable for the characterization of transmission lines at mmW frequencies.
For more information, see [5]_
References
------------
.. [5] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
“Comparative analysis of on-chip transmission line de-embedding techniques,”
in 2015 IEEE International Symposium on Radio-Frequency Integration Technology,
Sendai, Japan, Aug. 2015, pp. 91–93.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import ImpedanceCancel
Create network objects for dummy structure and dut
>>> th = rf.Network('thru_ckt.s2p')
>>> dut = rf.Network('full_ckt.s2p')
Create de-embedding object
>>> dm = ImpedanceCancel(dummy_thru = th, name = 'test_thru')
Remove parasitics to get the actual device network
>>> realdut = dm.deembed(dut)
"""
def __init__(self, dummy_thru, name=None, *args, **kwargs):
"""
ImpedanceCancel De-embedding Initializer
Parameters
-----------
dummy_thru : :class:`~skrf.network.Network` object
Measurement of the dummy thru structure
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.thru = dummy_thru.copy()
dummies = [self.thru]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
parasitics needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.thru.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
caled = ntwk.copy()
h = ntwk ** self.thru.inv
h_ = h.flipped()
caled.z = (h.z + h_.z) / 2
return caled
class IEEEP370_SE_NZC_2xThru(Deembedding):
"""
Creates error boxes from a test fixture 2x thru.
Based on https://opensource.ieee.org/elec-char/ieee-370/-/blob/master/TG1/IEEEP3702xThru.m
commit 49ddd78cf68ad5a7c0aaa57a73415075b5178aa6
A deembedding object is created with one 2x thru measurement,
`dummy_2xthru` which is split into left and right fixtures with IEEEP370
2xThru method. When :func:`Deembedding.deembed` is applied,
the s-parameters of the left and right fixture are deembedded from
fixture-dut-fixture measurement.
This method is applicable only when there is a 2xthru measurement.
Example
--------
>>> import skrf as rf
>>> from skrf.calibration import IEEEP370_SE_NZC_2xThru
Create network objects for dummy structure and dut
>>> s2xthru = rf.Network('2xthru.s2p')
>>> fdf = rf.Network('f-dut-f.s2p')
Create de-embedding object
>>> dm = IEEEP370_SE_NZC_2xThru(dummy_2xthru = s2xthru, name = '2xthru')
Remove parasitics to get the actual device network
>>> dut = dm.deembed(fdf)
"""
def __init__(self, dummy_2xthru, name=None,
z0 = 50, *args, **kwargs):
"""
IEEEP370_SE_NZC_2xThru De-embedding Initializer
Parameters
-----------
dummy_2xthru : :class:`~skrf.network.Network` object
Measurement of the 2x thru.
z0 :
reference impedance of the S-parameters (default: 50)
name : string
Optional name of de-embedding object
args, kwargs:
Passed to :func:`Deembedding.__init__`
See Also
---------
:func:`Deembedding.__init__`
"""
self.s2xthru = dummy_2xthru.copy()
self.z0 = z0
dummies = [self.s2xthru]
Deembedding.__init__(self, dummies, name, *args, **kwargs)
self.s_side1, self.s_side2 = self.split2xthru(self.s2xthru)
def deembed(self, ntwk):
"""
Perform the de-embedding calculation
Parameters
----------
ntwk : :class:`~skrf.network.Network` object
Network data of device measurement from which
thru fixtures needs to be removed via de-embedding
Returns
-------
caled : :class:`~skrf.network.Network` object
Network data of the device after de-embedding
"""
# check if the frequencies match with dummy frequencies
if ntwk.frequency != self.s2xthru.frequency:
raise(ValueError('Network frequencies dont match dummy frequencies.'))
# TODO: attempt to interpolate if frequencies do not match
return self.s_side1.inv ** ntwk ** self.s_side2.inv
def dc_interp(self, s, f):
"""
enforces symmetric upon the first 10 points and interpolates the DC
point.
"""
sp = s[0:9]
fp = f[0:9]
snp = concatenate((conj(flip(sp)), sp))
fnp = concatenate((-1*flip(fp), fp))
# mhuser : used cubic instead spline (not implemented)
snew = interp1d(fnp, snp, axis=0, kind = 'cubic')
return real(snew(0))
def COM_receiver_noise_filter(self, f,fr):
"""
receiver filter in COM defined by eq 93A-20
"""
fdfr = f / fr
# eq 93A-20
return 1 / (1 - 3.414214 * fdfr**2 + fdfr**4 + 1j*2.613126*(fdfr - fdfr**3))
def makeStep(self, impulse):
#mhuser : no need to call step function here, cumsum will be enough and efficient
#step = np.convolve(np.ones((len(impulse))), impulse)
#return step[0:len(impulse)]
return np.cumsum(impulse, axis=0)
def makeSymmetric(self, nonsymmetric):
"""
this takes the nonsymmetric frequency domain input and makes it
symmetric.
The function assumes the DC point is in the nonsymmetric data
"""
symmetric_abs = concatenate((
|
np.abs(nonsymmetric)
|
numpy.abs
|
from __future__ import print_function
from ..sa_net_data_provider import AbstractDataProvider;
from numpy import random;
from distutils.util import strtobool;
import scipy.io as spio;
import numpy as np;
import glob;
import os;
from skimage import io;
from skimage import transform as sktransform;
import torchvision.transforms as transforms
import torchvision
import torch
import matplotlib;
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg');
import matplotlib.pyplot as plt;
import cv2 as cv;
import skimage.filters as skfilters;
import skimage.color as skcolor;
import pickle;
class MultiplexAutoencoderDataProviderRGB(AbstractDataProvider):
def __init__(self, is_test, filepath_data, filepath_label, n_channels, n_classes, do_preprocess, do_augment, data_var_name=None, label_var_name=None, permute=False, repeat=True, kwargs={}):
args = {'input_img_height':460, 'input_img_width': 700, 'file_name_suffix':''
, 'pre_resize':'False', 'pre_center':'False', 'pre_edge':'False'
, 'postprocess':'False', 'invert_img':'False', 'pad_y':0, 'pad_x':0};
args.update(kwargs);
self.input_img_height = int(args['input_img_height']);
self.input_img_width = int(args['input_img_width']);
self.file_name_suffix = args['file_name_suffix'];
self.pre_resize = bool(strtobool(args['pre_resize']));
self.pre_center = bool(strtobool(args['pre_center']));
self.pre_edge = bool(strtobool(args['pre_edge']));
self.do_postprocess = bool(strtobool(args['postprocess']));
self.invert_img = bool(strtobool(args['invert_img']));
self.pad_y = int(args['pad_y']);
self.pad_x = int(args['pad_x']);
self.pad_y1 = int(np.floor(self.pad_y / 2.0));
self.pad_y2 = int(np.ceil(self.pad_y / 2.0));
self.pad_x1 = int(np.floor(self.pad_y / 2.0));
self.pad_x2 = int(np.ceil(self.pad_y / 2.0));
#print('do_augment = {}'.format(do_augment));
if(do_augment):
self.create_augmentation_map(kwargs);
if(self.do_postprocess):
self.read_postprocess_parameters(kwargs);
self.is_test = is_test; # note that label will be None when is_test is true
self.filepath_data = filepath_data;
self.dir_data = os.path.split(self.filepath_data)[0] ;
if(filepath_label == None or filepath_label.strip() == ''):
self.filepath_label = None ;
else:
self.filepath_label = filepath_label ;
self.n_channels = n_channels;
self.n_classes = n_classes;
self.do_preprocess = do_preprocess;
self.do_augment = do_augment;
#self.data_var_name = data_var_name;
#self.label_var_name = label_var_name;
self.do_permute = permute;
self.do_repeat = repeat;
self.is_loaded = False;
##debug
self.tmp_index = 0;
def load_data(self):
self.data = None;
self.label = None;
self.last_fetched_indx = -1;
self.permutation = None;
self.data_count = 0;
#self.transform_data = transforms.Compose(
# [transforms.ToTensor(),
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]);
#self.transform_data = transforms.Compose(
# [transforms.ToTensor()]);
# https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683/9
# https://github.com/pytorch/examples/blob/master/imagenet/main.py#L92-L93
#self.transform_data = transforms.Compose(
# [transforms.ToTensor(),
# transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]);
#self.transform_data = transforms.Compose(
# [transforms.ToTensor(),
# transforms.Normalize(mean=[0.79, 0.62, 0.7], std=[1.0, 1.0, 1.0])]);
#self.transform_data = transforms.Compose(
# [transforms.ToTensor(),
# transforms.Normalize(mean=[0.0, 0.0, 0.0], std=[2.0, 2.0, 2.0])]);
self.transform_data = transforms.Compose(
[transforms.ToTensor()]);
img_path_files = glob.glob(os.path.join(self.filepath_data,'**', "*.png"), recursive=True);
#img_path_files = glob.glob(os.path.join(self.filepath_label, "*-img_rel_path*.pkl"));
#imgs_rel_path_list = [];
#for file in img_path_files:
# imgs_rel_path_list.append(pickle.load(open(file, 'rb')));
# filename = os.path.split(file)[1];
# wsi_name = filename[:filename.rfind("-")];
#imgs_rel_path = sum(imgs_rel_path_list, []);
self.data_count = len(img_path_files);
print('data_count')
print(self.data_count)
self.data = img_path_files;
# Permutation
if(self.do_permute == True):
self.permutation = np.random.permutation(self.data_count)
else:
self.permutation = None;
self.is_loaded = True;
def reset(self, repermute=None):
self.last_fetched_indx = -1;
if(repermute == True):
self.do_permute = True;
self.permutation = np.random.permutation(self.data_count);
def get_next_one(self):
## make sure data is loaded first
if(self.is_loaded == False):
self.load_data();
## get next data point
self.last_fetched_indx = (self.last_fetched_indx + 1);
if(self.do_repeat == False):
if (self.last_fetched_indx >= self.data_count):
return None;
else:
self.last_fetched_indx = self.last_fetched_indx % self.data_count;
actual_indx = self.last_fetched_indx ;
if(self.permutation is not None):
actual_indx = self.permutation[self.last_fetched_indx];
self.img_id = self.data[actual_indx]
## debug
#self.imshow_torch(data_point);
#print('img_id = {}'.format(img_id));
#data_point = self.pytorch_to_tf_format(data_point);
## debug
#self.imshow_tf(data_point);
data_point = self.load_image(self.img_id, do_match_size=(not self.do_preprocess and not self.do_postprocess));
#data_point = self.load_image(os.path.join(self.dir_data, str(self.img_id)+ self.file_name_suffix+ '.png'), do_match_size=(not self.do_preprocess and not self.do_postprocess));
##debug
#print(os.path.join(self.dir_data, str(img_id)+ self.file_name_suffix+ '.png'));
##debug
#print('load: ', data_point.shape);
# debug
#self.imshow(data_point);
## process the data
if(self.do_preprocess == True):
data_point = self.preprocess(data_point);
##debug
#print('preprocess: ', data_point.shape);
if(self.do_augment == True):
data_point = self.augment(data_point);
##debug
#print('augment: ', data_point.shape);
#self.imshow_torch(data_point);
##debug
#io.imsave('/gpfs/projects/KurcGroup/sabousamra/debug/'+ str(self.tmp_index) + '_' + str(self.img_id)+ '_aug.png', data_point);
if(self.do_postprocess):
data_point = self.postprocess(data_point);
##debug
#print('self.tmp_index = ', self.tmp_index);
#print('self.img_id = ', self.img_id);
#io.imsave('/gpfs/projects/KurcGroup/sabousamra/debug/'+ str(self.tmp_index) + '_' + str(self.img_id)+ '_post.png', data_point);
##a
#data_point = self.transform_data(data_point);
#print('data_point.shape', data_point.shape)
#print('dself.transform_data(data_point).shape', self.transform_data(data_point).size())
data_point = data_point.astype(np.float); ############################
##data_point /= 255;
#data_point /= 4;
#data_point -= 0.5;
#data_point *= 2;
##print('np.transpose(data_point, (1, 2, 0)).shape = ', np.transpose(data_point, (2, 0, 1)).shape)
#data_point = torch.tensor(np.transpose(data_point, (2,0, 1)));
data_point = np.transpose(data_point, (2,0, 1));
##b
#data_point = self.tf_to_pytorch_format(data_point);
#data_point = torch.tensor(data_point);
##debug
#print('torch transform: ', data_point.shape);
##debug
#print(data_point.shape);
return data_point;
## returns None, None if there is no more data to retrieve and repeat = false
def get_next_n(self, n:int):
## validate parameters
if(n <= 0):
return None, None;
## make sure data is loaded first
if(self.is_loaded == False):
self.load_data();
## Get number of data points to retrieve
if(self.do_repeat == False):
if (self.last_fetched_indx + n >= self.data_count):
n = self.data_count - self.last_fetched_indx - 1;
if(n <= 0):
return None, None;
## Get data shape
#data_size_x = self.data.shape[1];
#data_size_y = self.data.shape[2];
data_size_x = self.input_img_width;
data_size_y = self.input_img_height;
#print(data_size_x, data_size_y);
#data_points = torch.zeros((n, self.n_channels, data_size_y, data_size_x))
#data_points = np.zeros((n, self.n_channels, data_size_y, data_size_x))
data_points = [];
for i in range(0, n):
d = self.get_next_one(); # returns the rgb image
if(d is None):
break;
data_points.append(d);
data_points = np.stack(data_points, axis=0)
# label is rgb - transform to whatever in train/loss
labels = np.copy(data_points);
#print('data_points.shape before = ', data_points.shape)
# normalize the input
#data_points /= 255;
#data_points -= 0.5;
#data_points *= 2;
if(self.pad_y > 0 or self.pad_x > 0):
#data_points = np.pad(data_points, ((0,0),(0,0),(self.pad_y1, self.pad_y2),(self.pad_x1, self.pad_x2)),'constant');
#data_points = np.pad(data_points, ((0,0),(0,0),(self.pad_y1, self.pad_y2),(self.pad_x1, self.pad_x2)),'constant', constant_values=1);
data_points = np.pad(data_points, ((0,0),(0,0),(self.pad_y1, self.pad_y2),(self.pad_x1, self.pad_x2)),'constant', constant_values=128);
#print('labels max = ', np.max(labels))
#print('data_points max = ', np.max(data_points))
data_points = torch.tensor(data_points, dtype = torch.float); # to avoid the error: Input type (torch.cuda.DoubleTensor) and weight type (torch.cuda.FloatTensor) should be the same
labels = torch.tensor(labels, dtype = torch.float)
#print('data_points.shape after = ', data_points.size())
#print('labels.shape after = ', labels.size())
return data_points, labels;
def preprocess(self, data_point):
data_point2 = data_point;
if(not(data_point.shape[0] == self.input_img_height) or not(data_point.shape[1] == self.input_img_width)):
##debug
#print('before resize: ', data_point2.shape);
if(self.pre_resize):
data_point2 = sktransform.resize(data_point, (self.input_img_height, self.input_img_width), preserve_range=True, anti_aliasing=True).astype(np.uint8);
elif(self.pre_center):
diff_y = self.input_img_height - data_point.shape[0];
diff_x = self.input_img_width - data_point.shape[1];
diff_y_div2 = diff_y//2;
diff_x_div2 = diff_x//2;
data_point2 = np.zeros((self.input_img_height, self.input_img_width, data_point.shape[2]));
if(diff_y >= 0 and diff_x >= 0):
data_point2[diff_y:diff_y+self.input_img_height, diff_x:diff_x+self.input_img_width, :] = data_point;
if(self.pre_edge):
data_point2 = skcolor.rgb2gray(data_point2);
data_point2 = skfilters.sobel(data_point2);
data_point2 = data_point2.reshape(data_point2.shape[0], data_point2.shape[1], 1);
data_point2 = np.concatenate((data_point2, data_point2, data_point2), axis=2);
print(data_point2.shape);
##debug
#print('after resize: ', data_point2.shape);
return data_point2;
# prepare the mapping from allowed operations to available operations index
def create_augmentation_map(self, kwargs={}):
args = {'aug_flip_h': 'True', 'aug_flip_v': 'True', 'aug_flip_hv': 'True' \
, 'aug_rot180': 'True', 'aug_rot90': 'False', 'aug_rot270': 'False', 'aug_rot_rand': 'False' \
, 'aug_brightness': 'False', 'aug_brightness_min': -50, 'aug_brightness_max': 50 \
, 'aug_saturation': 'False', 'aug_saturation_min': -1.5, 'aug_saturation_max': 1.5 \
, 'aug_hue': 'False', 'aug_hue_min': -50, 'aug_hue_max': 50 \
, 'aug_scale': 'False', 'aug_scale_min': 1.0, 'aug_scale_max': 2.0 \
, 'aug_translate': 'False', 'aug_translate_y_min': -20, 'aug_translate_y_max': 20, 'aug_translate_x_min': -20, 'aug_translate_x_max': 20
};
print(args);
args.update(kwargs);
print(args);
self.aug_flip_h = bool(strtobool(args['aug_flip_h']));
self.aug_flip_v = bool(strtobool(args['aug_flip_v']));
self.aug_flip_hv = bool(strtobool(args['aug_flip_hv']));
self.aug_rot180 = bool(strtobool(args['aug_rot180']));
self.aug_rot90 = bool(strtobool(args['aug_rot90']));
self.aug_rot270 = bool(strtobool(args['aug_rot270']));
self.aug_rot_random = bool(strtobool(args['aug_rot_rand']));
self.aug_brightness = bool(strtobool(args['aug_brightness']));
self.aug_saturation = bool(strtobool(args['aug_saturation']));
self.aug_hue = bool(strtobool(args['aug_hue']));
self.aug_scale = bool(strtobool(args['aug_scale']));
self.aug_translate = bool(strtobool(args['aug_translate']));
'''
map allowed operation to the following values
0: same (none)
1: horizontal flip
2: vertical flip
3: horizontal and vertical flip
4: rotate 180
5: rotate 90
6: rotate 270 or -90
7: rotate random angle
'''
self.aug_map = {};
self.aug_map[0] = 0; # (same) none
i = 1;
if(self.aug_flip_h):
self.aug_map[i] = 1;
i += 1;
if(self.aug_flip_v):
self.aug_map[i] = 2;
i += 1;
if(self.aug_flip_hv):
self.aug_map[i] = 3;
i += 1;
if(self.aug_rot180):
self.aug_map[i] = 4;
i += 1;
if(self.aug_rot90):
#print('self.aug_rot90={}'.format(self.aug_rot90));
self.aug_map[i] = 5;
i += 1;
if(self.aug_rot270):
#print('self.aug_rot270={}'.format(self.aug_rot270));
self.aug_map[i] = 6;
i += 1;
if(self.aug_rot_random):
#self.aug_map[i] = 7;
self.aug_rot_min = int(args['aug_rot_min']);
self.aug_rot_max = int(args['aug_rot_max']);
if(self.aug_brightness):
# self.aug_map[i] = 7;
self.aug_brightness_min = int(args['aug_brightness_min']);
self.aug_brightness_max = int(args['aug_brightness_max']);
# i += 1;
if(self.aug_saturation):
self.aug_saturation_min = float(args['aug_saturation_min']);
self.aug_saturation_max = float(args['aug_saturation_max']);
if(self.aug_hue):
self.aug_hue_min = int(args['aug_hue_min']);
self.aug_hue_max = int(args['aug_hue_max']);
if(self.aug_scale):
self.aug_scale_min = float(args['aug_scale_min']);
self.aug_scale_max = float(args['aug_scale_max']);
if(self.aug_translate):
self.aug_translate_y_min = int(args['aug_translate_y_min']);
self.aug_translate_y_max = int(args['aug_translate_y_max']);
self.aug_translate_x_min = int(args['aug_translate_x_min']);
self.aug_translate_x_max = int(args['aug_translate_x_max']);
print(self.aug_map)
def augment(self, data_point):
'''
select augmentation:
0: same (none)
1: horizontal flip
2: vertical flip
3: horizontal and vertical flip
4: rotate 180
5: rotate 90
6: rotate 270 or -90
7: rotate random
'''
# because width and height are not equal cannot do rotation 90 and 270
#op = random.randint(0,7);
#print('data_point.shape');
#print(data_point.shape);
#op = random.randint(0,5);
# select one of the valid operations and map it to its index in the available operations
op = random.randint(0,len(self.aug_map));
op = self.aug_map[op];
## debug
#print('op = ', op);
data_point2 = data_point;
# important: use ndarray.copy() when indexing with negative
# It will alocate new memory for numpy array which make it normal, I mean the stride is not negative any more.
# Otherwise get the error: ValueError: some of the strides of a given numpy array are negative. This is currently not supported, but will be added in future releases.
if(op == 1):
data_point2 = data_point[:,::-1,:].copy();
elif(op == 2):
data_point2 = data_point[::-1,:,:].copy();
elif(op == 3):
data_point2 = data_point[:,::-1,:];
data_point2 = data_point2[::-1,:,:].copy();
elif(op == 4):
data_point2 = np.rot90(data_point, k=2, axes=(0,1)).copy();
elif(op == 5):
data_point2 = np.rot90(data_point, k=1, axes=(0,1)).copy();
elif(op == 6):
data_point2 = np.rot90(data_point, k=3, axes=(0,1)).copy();
if(self.aug_rot_random):
angle = random.randint(self.aug_rot_min, self.aug_rot_max);
##debug
#print('angle = ', angle);
#data_point2 = sktransform.rotate(data_point2, angle, mode='edge', preserve_range=True).astype(np.uint8);
data_point2 = sktransform.rotate(data_point2, angle, preserve_range=True).astype(np.uint8);
###debug
#print('self.tmp_index = ', self.tmp_index);
#print('self.img_id = ', self.img_id);
op_brightness = random.random();
op_saturation = random.random();
op_hue = random.random();
op_scale = random.random();
if((self.aug_saturation and op_saturation > 0.5) or (self.aug_hue and op_hue > 0.5) or (self.aug_brightness and op_brightness > 0.5)):
##debug
#print(data_point2.shape);
#print(data_point2.dtype);
data_point2 = data_point2.astype(np.uint8);
data_point2_hsv = cv.cvtColor(data_point2, cv.COLOR_RGB2HLS);
data_point2_hsv = data_point2_hsv.astype(np.float)
saturation = 1.0;
hue = 0;
brightness = 0;
if(self.aug_hue and op_hue > 0.5):
#hue = random.random()*(self.aug_hue_max-self.aug_hue_min) + self.aug_hue_min;
hue = random.randint(self.aug_hue_min, self.aug_hue_max);
data_point2_hsv[:,:,0] += hue;
data_point2_hsv[:,:,0][np.where(data_point2_hsv[:,:,0] > 179)] = 179;
if(self.aug_saturation and op_saturation > 0.5):
#saturation = random.randint(self.aug_saturation_min, self.aug_saturation_max);
saturation = random.random() * (self.aug_saturation_max-self.aug_saturation_min) + self.aug_saturation_min;
data_point2_hsv[:,:,2] *= saturation;
data_point2_hsv[:,:,2][np.where(data_point2_hsv[:,:,2] > 255)] = 255;
if(self.aug_brightness and op_brightness > 0.5):
brightness = random.randint(self.aug_brightness_min, self.aug_brightness_max);
data_point2_hsv[:,:,1] += brightness;
data_point2_hsv[:,:,1][np.where(data_point2_hsv[:,:,1] > 255)] = 255;
##debug
#print('hu-sat-br = ', hue, ' - ', saturation, ' - ', brightness);
# The ranges that OpenCV manage for HSV format are the following:
# For HSV, Hue range is [0,179], Saturation range is [0,255] and Value range is [0,255]. Different softwares use different scales.
data_point2_hsv[np.where(data_point2_hsv < 0)] = 0;
data_point2_hsv = data_point2_hsv.astype(np.uint8);
data_point2 = cv.cvtColor(data_point2_hsv, cv.COLOR_HLS2RGB);
#elif(self.aug_saturation and op_saturation > 0.5):
# #saturation = random.randint(self.aug_saturation_min, self.aug_saturation_max);
# saturation = random.random() * (self.aug_saturation_max-self.aug_saturation_min) + self.aug_saturation_min;
# ##debug
# print('sat= ', saturation)
# data_point2 = data_point2.astype(np.uint8);
# data_point2_hsv = cv.cvtColor(data_point2, cv.COLOR_RGB2HSV);
# data_point2_hsv = data_point2_hsv.astype(np.float)
# data_point2_hsv[:,:,1] *= saturation;
# data_point2_hsv[:,:,1][np.where(data_point2_hsv[:,:,1] > 255)] = 255;
# data_point2_hsv[np.where(data_point2_hsv < 0)] = 0;
# data_point2_hsv = data_point2_hsv.astype(np.uint8);
# data_point2 = cv.cvtColor(data_point2_hsv, cv.COLOR_HSV2RGB);
#elif(self.aug_hue and op_hue > 0.5):
# #hue = random.random()*(self.aug_hue_max-self.aug_hue_min) + self.aug_hue_min;
# hue = random.randint(self.aug_hue_min, self.aug_hue_max);
# ##debug
# print('hu = ', hue)
# data_point2 = data_point2.astype(np.uint8);
# data_point2_hsv = cv.cvtColor(data_point2, cv.COLOR_RGB2HSV);
# data_point2_hsv = data_point2_hsv.astype(np.float)
# data_point2_hsv[:,:,0] += hue;
# data_point2_hsv[:,:,0][np.where(data_point2_hsv[:,:,0] > 179)] = 179;
# data_point2_hsv[np.where(data_point2_hsv < 0)] = 0;
# data_point2_hsv = data_point2_hsv.astype(np.uint8);
# data_point2 = cv.cvtColor(data_point2_hsv, cv.COLOR_HSV2RGB);
#if(self.aug_brightness and op_brightness > 0.5):
# brightness = random.randint(self.aug_brightness_min, self.aug_brightness_max);
# ##debug
# print('br = ', brightness)
# data_point2 = data_point2.astype(np.int16)
# data_point2 += brightness;
# data_point2[np.where(data_point2 > 255)] = 255;
# data_point2[np.where(data_point2 < 0)] = 0;
# data_point2 = data_point2.astype(np.uint8);
if(self.aug_translate):
#translate_y = random.random()*(self.aug_translate_y_max-self.aug_translate_y_min) + self.aug_translate_y_min;
#translate_x = random.random()*(self.aug_translate_x_max-self.aug_translate_x_min) + self.aug_translate_x_min;
translate_y = np.random.randint(self.aug_translate_y_min, high = self.aug_translate_y_max);
translate_x = np.random.randint(self.aug_translate_x_min, high = self.aug_translate_x_max);
translate_transform = sktransform.AffineTransform(translation = (translate_x, translate_y));
data_point2 = sktransform.warp(data_point2, translate_transform, preserve_range=True).astype(np.uint8);
if(self.aug_scale and op_scale > 0.5):
scale = random.random()*(self.aug_scale_max-self.aug_scale_min) + self.aug_scale_min;
###debug
#print('sc = ', scale)
data_point2 = sktransform.rescale(data_point2, scale, preserve_range=True).astype(np.uint8);
scale_height, scale_width,_ = data_point2.shape;
diff_height = scale_height - self.input_img_height;
diff_width = scale_width - self.input_img_width;
start_y = 0;
start_x = 0;
if(diff_height > 0):
start_y = random.randint(0, diff_height);
if(diff_width > 0):
start_x = random.randint(0, diff_width);
####debug
#io.imsave('/gpfs/projects/KurcGroup/sabousamra/debug/'+ str(self.tmp_index) + '_' + str(self.img_id)+ '_rescale.png', data_point2);
data_point2 = data_point2[start_y : start_y+self.input_img_height, start_x : start_x+self.input_img_width, : ]
#print('data_point2.shape');
#print(data_point2.shape);
###debug
#print('self.tmp_index = ', self.tmp_index);
#print('self.img_id = ', self.img_id);
#io.imsave('/gpfs/projects/KurcGroup/sabousamra/debug/'+ str(self.tmp_index) + '_' + str(self.img_id)+ '.png', data_point2);
self.tmp_index += 1;
return data_point2;
def load_image(self, filepath, do_match_size=False):
img = io.imread(filepath);
if(img.shape[2] > 3): # remove the alpha
img = img[:,:,0:3];
if(do_match_size):
if((not (img.shape[0] == self.input_img_height)) \
or (not (img.shape[1] == self.input_img_width))):
##debug
print('shape mismatch: {}'.format(img.shape));
print('filepath: ', filepath);
valid_height = self.input_img_height;
valid_width = self.input_img_width;
if(img.shape[0] < self.input_img_height):
valid_height = img.shape[0];
if(img.shape[1] < self.input_img_width):
valid_width = img.shape[1];
##debug
print('valid_height= {}, valid_width: {}'.format(valid_height, valid_width));
img_new = np.zeros((self.input_img_height, self.input_img_width, img.shape[2]));
img_new[0:valid_height, 0:valid_width, :] = img[0:valid_height, 0:valid_width, :];
img = img_new;
#filename = os.path.splitext(os.path.split(filepath)[1])[0];
#io.imsave(os.path.join("/gpfs/projects/KurcGroup/sabousamra/multiplex/models-ae", filename + '_before_' +'.png'), img.astype(np.uint8));
if(self.invert_img):
img = 255 - img;
#img[np.where(img <1)] = 1;
img[np.where(img <5)] = 5;
img[
|
np.where(img >250)
|
numpy.where
|
from __future__ import division, print_function
import numpy as np
import cv2 as cv
import scipy.ndimage.interpolation as sii
import os
import signal
from DataUtil.VoxelizerUtil import load_volume_from_mat, rotate_volume
from Constants import consts
import threading
import sys
if sys.version_info >= (2, 0):
print (sys.version)
import Queue as queue
if sys.version_info >= (3, 0):
print (sys.version)
import queue
class DataLoader(threading.Thread):
def __init__(self,
batch_size,
data_dir,
data_indices,
vol_res_x_w=128, vol_res_x_h=192,
vol_res_y_w=128, vol_res_y_h=192,
augmentation=True):
super(DataLoader, self).__init__()
if vol_res_x_h != 192 or vol_res_x_w != 128 or vol_res_y_h != 192 or vol_res_y_w != 128:
print('Unsupported resolution!')
raise ValueError
self.batch_index = 0
self.batch_size = batch_size
self.vox_res_x = (vol_res_x_w, vol_res_x_h, vol_res_x_w)
self.vox_res_y = (vol_res_y_w, vol_res_y_h, vol_res_y_w)
self.data_dir = data_dir
self.data_num = len(data_indices)
self.data_indices = np.copy(data_indices)
self.augmentation = augmentation
self.reshuffle_indices()
if augmentation:
max_idx = np.max(self.data_indices)
self.alpha = np.random.rand(max_idx+1)*0.3 + 0.85 # random from [0.9, 1.1]
self.beta = np.random.rand(max_idx+1)*0.3 - 0.15 # random from [-0.05, 0.05]
self.crop_size = np.random.randint(0, 20, (max_idx+1, 4))
self.movement = np.random.randint(0, 11, (max_idx+1, 3)) - 5
self.queue = queue.Queue(8)
self.stop_queue = False
self.total_batch_num = int(len(self.data_indices) // self.batch_size)
def reshuffle_indices(self):
self.batch_index = 0
np.random.shuffle(self.data_indices)
if self.augmentation:
max_idx = np.max(self.data_indices)
self.alpha = np.random.rand(max_idx+1)*0.3 + 0.85 # random from [0.9, 1.1]
self.beta = np.random.rand(max_idx+1)*0.3 - 0.15 # random from [-0.05, 0.05]
self.crop_size = np.random.randint(0, 20, (max_idx+1, 4))
self.movement = np.random.randint(0, 11, (max_idx+1, 3)) - 5
def load_volume(self, idx):
volume_id = idx // 4 * 4
view_id = idx - volume_id
if consts.fill:
volume = load_volume_from_mat('%s/voxel2/voxel_%08d.mat' % (self.data_dir, volume_id))
else:
volume = load_volume_from_mat('%s/voxel/voxel_%08d.mat' % (self.data_dir, volume_id))
mesh_volume = rotate_volume(volume['mesh_volume'], view_id)
smpl_v_volume = rotate_volume(volume['smpl_v_volume'], view_id)
# convert from WHD format to DHW format (as required by tensorflow)
mesh_volume = np.transpose(mesh_volume, (2, 1, 0))
smpl_v_volume = np.transpose(smpl_v_volume, (2, 1, 0, 3))
# flip upside down
mesh_volume = np.flip(mesh_volume, axis=1)
smpl_v_volume = np.flip(smpl_v_volume, axis=1)
# if self.augmentation:
# movement = self.movement[idx, :]
# x_m, y_m, z_m = movement[0], movement[1], movement[2]
# smpl_v_volume = sii.shift(smpl_v_volume, (0, x_m, y_m, 0), cval=0)
return smpl_v_volume, mesh_volume
@staticmethod
def resize_and_crop_img(img):
img = cv.resize(img, (2*consts.dim_h, 2*consts.dim_h))
edg = (2*consts.dim_h - 2*consts.dim_w) // 2
if len(img.shape) == 2:
img =
|
np.expand_dims(img, axis=-1)
|
numpy.expand_dims
|
import os, time, argparse, pickle
from inspect import Traceback
import numpy as np
from numpy.lib.function_base import append
import pybullet as p
import matplotlib.pyplot as plt
from PIL import Image
import cv2
from torch import set_default_tensor_type
from .env import AssistiveEnv
from .agents.human_mesh import HumanMesh
from gym.utils import seeding
class BeddingManipulationEnv(AssistiveEnv):
def __init__(self, robot, human, use_mesh=False):
if robot is None:
super(BeddingManipulationEnv, self).__init__(robot=None, human=human, task='bedding_manipulation', obs_robot_len=12, obs_human_len=0, frame_skip=1, time_step=0.01, deformable=True)
self.use_mesh = use_mesh
self.bm_config = self.configp[self.task]
if self.bm_config['target_limb_code'] == 'random':
self.fixed_target_limb = False
else:
self.fixed_target_limb = True
self.target_limb_code = int(self.bm_config['target_limb_code'])
self.body_shape = None if self.bm_config.getboolean('vary_body_shape') else np.zeros((1, 10))
if self.bm_config.getboolean('take_images'):
self.take_images = True
self.save_image_dir = self.bm_config['save_image_dir']
# * all the parameters below are False unless spepcified otherwise by args
self.render_body_points = self.bm_config.getboolean('render_body_points')
self.fixed_pose = self.bm_config.getboolean('fixed_human_pose')
self.verbose = self.bm_config.getboolean('bm_verbose')
self.blanket_pose_var = self.bm_config.getboolean('vary_blanket_pose')
self.take_images = self.bm_config.getboolean('take_images')
self.cmaes_dc = self.bm_config.getboolean('cmaes_data_collect')
# * these parameters don't have cmd args to modify them
self.seed_val = 1001
self.save_pstate = False
self.pstate_file = None
def step(self, action):
obs = self._get_obs()
if self.verbose:
print("Target Limb Code:", self.target_limb_code)
print("Observation:\n", obs, obs.dtype)
print("Action: ", action)
# * scale bounds the 2D grasp and release locations to the area over the mattress (action nums only in range [-1, 1])
scale = [0.44, 1.05]
grasp_loc = action[0:2]*scale
release_loc = action[2:4]*scale
# * get points on the blanket, initial state of the cloth
data = p.getMeshData(self.blanket, -1, flags=p.MESH_DATA_SIMULATION_MESH, physicsClientId=self.id)
# * get rid of any nontarget points that are not covered by the initial state of the blanket (will not be considered in reward calculation at the end of the step)
self.non_target_initially_uncovered(data)
head_points = len(self.points_pos_nontarget_limb_world[self.human.head])
point_counts = [self.total_target_point_count, self.total_nontarget_point_count-head_points, head_points]
# * calculate distance between the 2D grasp location and every point on the blanket, anchor points are the 4 points on the blanket closest to the 2D grasp location
dist = []
for i, v in enumerate(data[1]):
v = np.array(v)
d = np.linalg.norm(v[0:2] - grasp_loc)
dist.append(d)
anchor_idx = np.argpartition(np.array(dist), 4)[:4]
# for a in anchor_idx:
# print("anchor loc: ", data[1][a])
# * if no points on the blanket are within 2.8 cm of the grasp location, track that it would have been clipped
clipped = False
if not np.any(np.array(dist) < 0.028):
clipped = True
# * update grasp_loc var with the location of the central anchor point on the cloth
grasp_loc = np.array(data[1][anchor_idx[0]][0:2])
# * move sphere down to the anchor point on the blanket, create anchor point (central point first, then remaining points) and store constraint ids
self.sphere_ee.set_base_pos_orient(data[1][anchor_idx[0]], np.array([0,0,0]))
constraint_ids = []
constraint_ids.append(p.createSoftBodyAnchor(self.blanket, anchor_idx[0], self.sphere_ee.body, -1, [0, 0, 0]))
for i in anchor_idx[1:]:
pos_diff = np.array(data[1][i]) - np.array(data[1][anchor_idx[0]])
constraint_ids.append(p.createSoftBodyAnchor(self.blanket, i, self.sphere_ee.body, -1, pos_diff))
# * take image after blanket grasped
if self.take_images: self.capture_images()
# * move sphere 40 cm from the top of the bed
current_pos = self.sphere_ee.get_base_pos_orient()[0]
delta_z = 0.4 # distance to move up (with respect to the top of the bed)
bed_height = 0.58 # height of the bed
final_z = delta_z + bed_height # global goal z position
while current_pos[2] <= final_z:
self.sphere_ee.set_base_pos_orient(current_pos + np.array([0, 0, 0.005]), np.array([0,0,0]))
p.stepSimulation(physicsClientId=self.id)
current_pos = self.sphere_ee.get_base_pos_orient()[0]
# * take image after blanket lifted up by 40 cm
if self.take_images: self.capture_images()
# * move sphere to the release location, release the blanket
travel_dist = release_loc - grasp_loc
# * determine delta x and y, make sure it is, at max, close to 0.005
num_steps = np.abs(travel_dist//0.005).max()
delta_x, delta_y = travel_dist/num_steps
current_pos = self.sphere_ee.get_base_pos_orient()[0]
for _ in range(int(num_steps)):
self.sphere_ee.set_base_pos_orient(current_pos + np.array([delta_x, delta_y, 0]), np.array([0,0,0]))
p.stepSimulation(physicsClientId=self.id)
current_pos = self.sphere_ee.get_base_pos_orient()[0]
# * continue stepping simulation to allow the cloth to settle before release
for _ in range(20):
p.stepSimulation(physicsClientId=self.id)
# * take image after moving to grasp location, before releasing cloth
if self.take_images: self.capture_images()
# * release the cloth at the release point, sphere is at the same arbitrary z position in the air
for i in constraint_ids:
p.removeConstraint(i, physicsClientId=self.id)
for _ in range(50):
p.stepSimulation(physicsClientId=self.id)
# * take image after cloth is released and settled
if self.take_images: self.capture_images()
# * get points on the blanket, final state of the cloth
data = p.getMeshData(self.blanket, -1, flags=p.MESH_DATA_SIMULATION_MESH, physicsClientId=self.id)
# * compute rewards
reward_uncover_target, uncovered_target_count = self.uncover_target_reward(data)
reward_uncover_nontarget, uncovered_nontarget_count = self.uncover_nontarget_reward(data)
reward_distance_btw_grasp_release = -150 if np.linalg.norm(grasp_loc - release_loc) >= 1.5 else 0
reward_head_kept_uncovered, covered_head_count = self.keep_head_uncovered_reward(data)
# * sum and weight rewards from individual functions to get overall reward
reward = self.config('uncover_target_weight')*reward_uncover_target + self.config('uncover_nontarget_weight')*reward_uncover_nontarget + self.config('grasp_release_distance_max_weight')*reward_distance_btw_grasp_release + self.config('keep_head_uncovered_weight')*reward_head_kept_uncovered
if self.verbose:
print(f"Rewards for each measure:\n\tUncover Target: {reward_uncover_target}, Uncover Nontarget: {reward_uncover_nontarget}, Cover Head: {reward_head_kept_uncovered}, Excessive Distance: {reward_distance_btw_grasp_release}")
print("overall reward: ", reward)
# * prepare info
split_reward = [reward_uncover_target, reward_uncover_nontarget, reward_distance_btw_grasp_release, reward_head_kept_uncovered]
post_action_point_counts = [uncovered_target_count, uncovered_nontarget_count, covered_head_count]
info = {'split_reward':split_reward, 'total_point_counts':point_counts,'post_action_point_counts': post_action_point_counts, 'clipped':clipped}
self.iteration += 1
done = self.iteration >= 1
# * take image after reward computed
if self.take_images: self.capture_images()
# return 0, 0, 1, {}
return obs, reward, done, info
def change_point_color(self, points_target_limb, limb, ind, rgb = [0, 1, 0.5, 1]):
p.changeVisualShape(points_target_limb[limb][ind].body, -1, rgbaColor=rgb, flags=0, physicsClientId=self.id)
def uncover_target_reward(self, blanket_state):
'''
give the robot a reward for uncovering the target body part
'''
points_covered = 0
uncovered_rgb = [0, 1, 0.5, 1]
covered_rgb = [1, 1, 1, 1]
threshold = 0.028
total_points = self.total_target_point_count
# * count # of target points covered by the blanket, subtract from # total points to get the # point uncovered
for limb, points_pos_target_limb_world in self.points_pos_target_limb_world.items():
for point in range(len(points_pos_target_limb_world)):
covered = False
for i, v in enumerate(blanket_state[1]):
if abs(np.linalg.norm(v[0:2]-points_pos_target_limb_world[point][0:2])) < threshold:
covered = True
points_covered += 1
break
if self.render_body_points:
rgb = covered_rgb if covered else uncovered_rgb
self.change_point_color(self.points_target_limb, limb, point, rgb = rgb)
points_uncovered = total_points - points_covered
if self.verbose:
print(f"Total Target Points: {total_points}, Target Uncovered: {points_uncovered}")
return (points_uncovered/total_points)*100, points_uncovered
def uncover_nontarget_reward(self, blanket_state):
'''
discourage the robot from learning policies that uncover nontarget body parts
'''
points_covered = 0
uncovered_rgb = [1, 0, 0, 1]
covered_rgb = [0, 0, 1, 1]
threshold = 0.028
total_points = self.total_nontarget_point_count - len(self.points_pos_nontarget_limb_world[self.human.head])
total_target_points = self.total_target_point_count
# account for case where all nontarget points were initially uncovered
if total_points == 0:
return 0, 0
# count number of target points covered by the blanket
for limb, points_pos_nontarget_limb_world in self.points_pos_nontarget_limb_world.items():
if limb != self.human.head:
# print(limb)
for point in range(len(points_pos_nontarget_limb_world)):
covered = False
for i, v in enumerate(blanket_state[1]):
if abs(np.linalg.norm(v[0:2]-points_pos_nontarget_limb_world[point][0:2])) < threshold:
covered = True
points_covered += 1
break
# print("limb", limb, "covered", covered)
if self.render_body_points:
rgb = covered_rgb if covered else uncovered_rgb
self.change_point_color(self.points_nontarget_limb, limb, point, rgb = rgb)
points_uncovered = total_points - points_covered
if self.verbose:
print(f"Total Nontarget Points: {total_points}, Nontarget Uncovered: {points_uncovered}")
# if the same number of target and nontarget points are uncovered, total reward is 0
return -(points_uncovered/total_target_points)*100, points_uncovered
def keep_head_uncovered_reward(self, blanket_state):
'''
discourage the robot from learning policies that cover the head
'''
points_covered = 0
uncovered_rgb = [0, 0, 1, 1]
covered_rgb = [1, 0, 0, 1]
threshold = 0.028
points_pos_head_world = self.points_pos_nontarget_limb_world[self.human.head]
total_points = len(points_pos_head_world)
if total_points == 0:
return 0, 0
# count number of target points covered by the blanket
for point in range(len(self.points_pos_nontarget_limb_world[self.human.head])):
covered = False
for i, v in enumerate(blanket_state[1]):
if abs(np.linalg.norm(v[0:2]-points_pos_head_world[point][0:2])) < threshold:
covered = True
points_covered += 1
break
if self.render_body_points:
rgb = covered_rgb if covered else uncovered_rgb
self.change_point_color(self.points_nontarget_limb, self.human.head, point, rgb = rgb)
if self.verbose:
print(f"Total Head Points: {total_points}, Covered Head Points: {points_covered}")
# penalize on double the percentage of head points covered (doubled to increase weight of covering the head)
return -(points_covered/total_points)*200, points_covered
def non_target_initially_uncovered(self, blanket_state):
'''
removes nontarget points on the body that are uncovered when the blanket is in it's initial state from the nontarget point set
also handles points on the head that are initially covered
'''
points_covered = 0
threshold = 0.028
points_to_remove = {}
points_to_remove_count = 0
# * create a list of the nontarget points not covered by the blanket
for limb, points_pos_nontarget_limb_world in self.points_pos_nontarget_limb_world.items():
# if limb != self.human.head:
points_to_remove[limb] = []
for point in range(len(points_pos_nontarget_limb_world)):
covered = False
for i, v in enumerate(blanket_state[1]):
if abs(np.linalg.norm(v[0:2]-points_pos_nontarget_limb_world[point][0:2])) < threshold:
covered = True
points_covered += 1
break
if limb == self.human.head:
if covered == True:
points_to_remove[limb].append(point)
elif covered == False:
points_to_remove[limb].append(point)
# * remove the identified points from the list of all nontarget points for each limb
# * points removed in reverse order so that indexs of identified points don't shift
for limb in self.points_pos_nontarget_limb_world.keys():
# if limb != self.human.head:
points_to_remove_count += len(points_to_remove[limb])
# print("count of points on nontarget initially:", len(self.points_pos_nontarget_limb_world[limb]), len(self.points_nontarget_limb[limb]))
for point in reversed(points_to_remove[limb]):
self.points_pos_nontarget_limb_world[limb].pop(point)
if self.render_body_points:
p.removeBody(self.points_nontarget_limb[limb][point].body)
self.points_nontarget_limb[limb].pop(point)
# print("count of points on nontarget now:", len(self.points_pos_nontarget_limb_world[limb]), len(self.points_nontarget_limb[limb]))
# print(self.total_nontarget_point_count)
self.total_nontarget_point_count -= points_to_remove_count
def _get_obs(self, agent=None):
pose = []
for limb in self.human.obs_limbs:
pos, orient = self.human.get_pos_orient(limb)
# print("pose", limb, pos, orient)
pos2D = pos[0:2]
yaw = p.getEulerFromQuaternion(orient)[-1]
pose.append(np.concatenate((pos2D, np.array([yaw])), axis=0))
pose = np.concatenate(pose, axis=0)
# * collect more infomation for cmaes data collect, enables you to train model with different observations if you want to
if self.cmaes_dc:
output = [None]*12
all_joint_angles = self.human.get_joint_angles(self.human.all_joint_indices)
all_pos_orient = [self.human.get_pos_orient(limb) for limb in self.human.all_body_parts]
output[0], output[1], output[2] = pose, all_joint_angles, all_pos_orient
return output
return pose.astype('float32')
def set_seed_val(self, seed = 1001):
if seed != self.seed_val:
self.seed_val = seed
def set_target_limb_code(self, target_limb_code=None):
if target_limb_code == None:
self.target_limb_code = self.np_random.randint(0,12)
else:
self.target_limb_code = target_limb_code
def set_pstate_file(self, filename):
if self.pstate_file != filename:
self.pstate_file = filename
self.save_pstate = True
def reset(self):
super(BeddingManipulationEnv, self).reset()
if not self.fixed_pose and not self.cmaes_dc:
self.set_seed_val(seeding.create_seed())
self.seed(self.seed_val)
self.build_assistive_env(fixed_human_base=False, gender='female', human_impairment='none', furniture_type='hospital_bed', body_shape=self.body_shape)
# * enable rendering
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1, physicsClientId=self.id)
# * configure directory to save captured images to
if self.take_images:
self.image_dir = os.path.join(self.save_image_dir, time.strftime("%Y%m%d-%H%M%S"))
if not os.path.exists(self.image_dir):
os.makedirs(self.image_dir)
# * Setup human in the air, with legs and arms slightly seperated
joints_positions = [(self.human.j_left_hip_y, -10), (self.human.j_right_hip_y, 10), (self.human.j_left_shoulder_x, -20), (self.human.j_right_shoulder_x, 20)]
self.human.setup_joints(joints_positions, use_static_joints=False, reactive_force=None)
self.human.set_base_pos_orient([0, -0.2, 1.1], [-np.pi/2.0, 0, np.pi])
if not self.fixed_pose:
# * Add small variation to the body pose
motor_indices, motor_positions, motor_velocities, motor_torques = self.human.get_motor_joint_states()
# print(motor_positions)
self.human.set_joint_angles(motor_indices, motor_positions+self.np_random.uniform(-0.2, 0.2, size=len(motor_indices)))
# self.increase_pose_variation()
# * Increase friction of joints so human doesn't fail around exessively as they settle
# print([p.getDynamicsInfo(self.human.body, joint)[1] for joint in self.human.all_joint_indices])
self.human.set_whole_body_frictions(spinning_friction=2)
# * Let the person settle on the bed
p.setGravity(0, 0, -1, physicsClientId=self.id)
# * step the simulation a few times so that the human has some initial velocity greater than the at rest threshold
for _ in range(5):
p.stepSimulation(physicsClientId=self.id)
# * continue stepping the simulation until the human joint velocities are under the threshold
threshold = 1e-2
settling = True
numsteps = 0
while settling:
settling = False
for i in self.human.all_joint_indices:
if np.any(np.abs(self.human.get_velocity(i)) >= threshold):
p.stepSimulation(physicsClientId=self.id)
numsteps += 1
settling = True
break
if numsteps > 400:
break
# print("steps to rest:", numsteps)
# * Lock the person in place
self.human.control(self.human.all_joint_indices, self.human.get_joint_angles(), 0.05, 100)
self.human.set_mass(self.human.base, mass=0)
self.human.set_base_velocity(linear_velocity=[0, 0, 0], angular_velocity=[0, 0, 0])
# * take image after human settles
if self.take_images: self.capture_images()
if self.use_mesh:
# * we do not use a mesh in this work
# Replace the capsulized human with a human mesh
self.human = HumanMesh()
joints_positions = [(self.human.j_right_shoulder_z, 60), (self.human.j_right_elbow_y, 90), (self.human.j_left_shoulder_z, -10), (self.human.j_left_elbow_y, -90), (self.human.j_right_hip_x, -60), (self.human.j_right_knee_x, 80), (self.human.j_left_hip_x, -90), (self.human.j_left_knee_x, 80)]
body_shape = np.zeros((1, 10))
gender = 'female' # 'random'
self.human.init(self.directory, self.id, self.np_random, gender=gender, height=None, body_shape=body_shape, joint_angles=joints_positions, left_hand_pose=[[-2, 0, 0, -2, 0, 0]])
chair_seat_position = np.array([0, 0.1, 0.55])
self.human.set_base_pos_orient(chair_seat_position - self.human.get_vertex_positions(self.human.bottom_index), [0, 0, 0, 1])
# * select a target limb to uncover (may be fixed or random), generate points along the body accordingly
if not self.fixed_target_limb:
self.set_target_limb_code()
self.target_limb = self.human.all_possible_target_limbs[self.target_limb_code]
if self.body_shape is None:
self.generate_points_along_body_vary_body_shape()
else:
self.generate_points_along_body()
# * take image after points generated (only visible if --render_body_points flag is used)
if self.take_images: self.capture_images()
# * spawn blanket
self.blanket = p.loadSoftBody(os.path.join(self.directory, 'clothing', 'blanket_2089v.obj'), scale=0.75, mass=0.15, useBendingSprings=1, useMassSpring=1, springElasticStiffness=1, springDampingStiffness=0.0005, springDampingAllDirections=1, springBendingStiffness=0, useSelfCollision=1, collisionMargin=0.006, frictionCoeff=0.5, useFaceContact=1, physicsClientId=self.id)
# * change alpha value so that it is a little more translucent, easier to see the relationship the human
p.changeVisualShape(self.blanket, -1, rgbaColor=[0, 0, 1, 0.75], flags=0, physicsClientId=self.id)
p.changeVisualShape(self.blanket, -1, flags=p.VISUAL_SHAPE_DOUBLE_SIDED, physicsClientId=self.id)
p.setPhysicsEngineParameter(numSubSteps=4, numSolverIterations = 4, physicsClientId=self.id)
if self.blanket_pose_var:
delta_y = self.np_random.uniform(-0.25, 0.05)
delta_x = self.np_random.uniform(-0.02, 0.02)
deg = 45
delta_rad = self.np_random.uniform(-np.radians(deg), np.radians(deg)) # * +/- degrees
p.resetBasePositionAndOrientation(self.blanket, [0+delta_x, 0.2+delta_y, 1.5], self.get_quaternion([np.pi/2.0, 0, 0+delta_rad]), physicsClientId=self.id)
else:
p.resetBasePositionAndOrientation(self.blanket, [0, 0.2, 1.5], self.get_quaternion([np.pi/2.0, 0, 0]), physicsClientId=self.id)
# * Drop the blanket on the person, allow to settle
p.setGravity(0, 0, -9.81, physicsClientId=self.id)
for _ in range(100):
p.stepSimulation(physicsClientId=self.id)
# * Initialize enviornment variables
# * if using the sphere manipulator, spawn the sphere and run a modified version of init_env_variables()
if self.robot is None:
# * spawn sphere manipulator
position = np.array([-0.3, -0.86, 0.8])
self.sphere_ee = self.create_sphere(radius=0.025, mass=0.0, pos = position, visual=True, collision=True, rgba=[1, 0, 0, 1])
# * initialize env variables
from gym import spaces
# * update observation and action spaces
obs_len = len(self._get_obs())
self.observation_space.__init__(low=-
|
np.ones(obs_len, dtype=np.float32)
|
numpy.ones
|
import numpy as np
import numpy.linalg as la
import pdb
class Map():
"""map object
Attributes:
getGlobalPosition: convert position from (s, ey) to (X,Y)
"""
def __init__(self, halfWidth):
"""Initialization
halfWidth: track halfWidth
Modify the vector spec to change the geometry of the track
"""
# Goggle-shaped track
# self.slack = 0.15
# self.halfWidth = halfWidth
# spec = np.array([[60 * 0.03, 0],
# [80 * 0.03, -80 * 0.03 * 2 / np.pi],
# # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2
# [20 * 0.03, 0],
# [80 * 0.03, -80 * 0.03 * 2 / np.pi],
# [40 * 0.03, +40 * 0.03 * 10 / np.pi],
# [60 * 0.03, -60 * 0.03 * 5 / np.pi],
# [40 * 0.03, +40 * 0.03 * 10 / np.pi],
# [80 * 0.03, -80 * 0.03 * 2 / np.pi],
# [20 * 0.03, 0],
# [80 * 0.03, -80 * 0.03 * 2 / np.pi]])
# L-shaped track
self.halfWidth = 0.45
self.slack = 0.45
lengthCurve = 4.5
spec = np.array([[1.0, 0],
[lengthCurve, lengthCurve / np.pi],
# Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2
[lengthCurve / 2, -lengthCurve / np.pi],
[lengthCurve, lengthCurve / np.pi],
[lengthCurve / np.pi * 2, 0],
[lengthCurve / 2, lengthCurve / np.pi]])
# spec = np.array([[1.0, 0],
# [lengthCurve, lengthCurve / np.pi],
# # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2
# [lengthCurve / 4, lengthCurve / (2 * np.pi)],
# [lengthCurve / 4, - lengthCurve / (2 * np.pi)],
# [lengthCurve / np.pi * 2, 0],
# [lengthCurve / 2, lengthCurve / np.pi]])
# Oval track
# spec = np.array([[2.0, 0],
# [4.5, -4.5 / np.pi],
# # Note s = 1 * np.pi / 2 and r = -1 ---> Angle spanned = np.pi / 2
# [4.0, 0],
# [4.5, -4.5 / np.pi],
# [2.0, 0]])
# Now given the above segments we compute the (x, y) points of the track and the angle of the tangent vector (psi) at
# these points. For each segment we compute the (x, y, psi) coordinate at the last point of the segment. Furthermore,
# we compute also the cumulative s at the starting point of the segment at signed curvature
# PointAndTangent = [x, y, psi, cumulative s, segment length, signed curvature]
########################################################
#SS: Need to do something to mark the coordinate points of the lane boundaries.
#SS: Use OpenCV to measure attitude of the car w.r.t. the lane boundary angle.
#########################################################
PointAndTangent = np.zeros((spec.shape[0] + 1, 6))
for i in range(0, spec.shape[0]):
if spec[i, 1] == 0.0: # If the current segment is a straight line
l = spec[i, 0] # Length of the segments
if i == 0:
ang = 0 # Angle of the tangent vector at the starting point of the segment
x = 0 + l * np.cos(ang) # x coordinate of the last point of the segment
y = 0 + l * np.sin(ang) # y coordinate of the last point of the segment
else:
ang = PointAndTangent[i - 1, 2] # Angle of the tangent vector at the starting point of the segment
x = PointAndTangent[i-1, 0] + l * np.cos(ang) # x coordinate of the last point of the segment
y = PointAndTangent[i-1, 1] + l * np.sin(ang) # y coordinate of the last point of the segment
psi = ang # Angle of the tangent vector at the last point of the segment
if i == 0:
NewLine = np.array([x, y, psi, PointAndTangent[i, 3], l, 0])
else:
NewLine =
|
np.array([x, y, psi, PointAndTangent[i-1, 3] + PointAndTangent[i-1, 4], l, 0])
|
numpy.array
|
# Licensed under a ??? style license - see LICENSE.rst
import numpy as np
def lat_lon(x,y,ob_lon,ob_lat,pixscale_km,np_ang,req,rpol):
'''
Projection of an ellipsoid onto a 2-D array with latitude and longitude grid
Parameters
----------
Returns
-------
Examples
--------
'''
np_ang = -np_ang
x1 = pixscale_km*(np.cos(np.radians(np_ang))*x - np.sin(np.radians(np_ang))*y)
y1 = pixscale_km*(np.sin(np.radians(np_ang))*x + np.cos(np.radians(np_ang))*y)
olrad = np.radians(ob_lat)
#set up quadratic equation for ellipsoid
r2 = (req/rpol)**2
a = 1 + r2*(np.tan(olrad))**2 #second order
b = 2*y1*r2*np.sin(olrad) / (np.cos(olrad)**2) #first order
c = x1**2 + r2*y1**2 / (np.cos(olrad))**2 - req**2 #constant
radical = b**2 - 4*a*c
#will equal nan outside planet since radical < 0
with warnings.catch_warnings():
warnings.simplefilter("ignore") #suppresses error for taking sqrt nan
x3s1=(-b+np.sqrt(radical))/(2*a)
x3s2=(-b-np.sqrt(radical))/(2*a)
z3s1=(y1+x3s1*np.sin(olrad))/np.cos(olrad)
z3s2=(y1+x3s2*np.sin(olrad))/np.cos(olrad)
odotr1=x3s1*np.cos(olrad)+z3s1*np.sin(olrad)
odotr2=x3s2*np.cos(olrad)+z3s2*
|
np.sin(olrad)
|
numpy.sin
|
import pickle
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
import utils
class Model(nn.Module):
def __init__(self, size=4, alpha=1, n=100, d=2, sigma=1, half_empty=True):
super().__init__()
self.alpha = alpha
self.d = d
self.sigma = sigma
self.size = size
self.half_empty = half_empty
self.locs = nn.Parameter(torch.randn(size, n), requires_grad=False)
if half_empty:
self.p = nn.Parameter(torch.randn(1, size // 2) * 5.0)
else:
self.p = nn.Parameter(torch.randn(1, size) * 5.0)
self.q = nn.Parameter(torch.randn(1, size) * 5.0)
def forward(self):
log_K = -(utils.batch_pdist(self.locs, self.locs) / self.sigma) ** self.d # RBF
# log_K = -2.0 * torch.log(1 + utils.batch_pdist(self.locs, self.locs)) # polynomial
if self.half_empty:
p = torch.cat([F.softmax(self.p, dim=1), torch.zeros_like(self.p)], dim=1)
else:
p = F.softmax(self.p, dim=1)
q = F.softmax(self.q, dim=1)
log_pK = torch.logsumexp(log_K[None, ...] + torch.log(p[:, None, :]), dim=2)
log_qK = torch.logsumexp(log_K[None, ...] + torch.log(q[:, None, :]), dim=2)
rat1 = (log_pK, log_qK)
rat2 = (log_qK, log_pK)
if np.abs(self.alpha - 1.0) < 1e-8:
dp1 = (p * (rat1[0] - rat1[1])).sum(-1)
dp2 = (q * (rat2[0] - rat2[1])).sum(-1)
loss = 0.5 * (dp1 + dp2)
else:
power_pq = torch.log(p) + (self.alpha - 1) * (rat1[0] - rat1[1])
power_qp = torch.log(q) + (self.alpha - 1) * (rat2[0] - rat2[1])
loss = 0.5 * (1 / (self.alpha - 1)) * (torch.logsumexp(power_pq, -1) + torch.logsumexp(power_qp, -1))
return loss #, p, q, torch.exp(log_K)
if __name__ == '__main__':
torch.set_default_dtype(torch.float64)
losses = []
for n in [100, 5]:
for half_empty in [False, True]:
for size in [1000, 10]:
for alpha in [0.5, 1, 2]:
local_losses = []
print('n', n, 'alpha', alpha, 'size', size, 'half_empty', half_empty)
model = Model(alpha=alpha, size=size, half_empty=half_empty, n=n).cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999))
for itr in range(10000):
optimizer.zero_grad()
loss = model()
if itr % 100 == 0:
print(loss)
local_losses.append(loss.item())
loss.backward()
optimizer.step()
losses.append((half_empty, size, alpha, n, local_losses))
print()
with open('losses_fixed_%d.pk' %
|
np.random.randint(1000)
|
numpy.random.randint
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import csv
#hhids=[86, 59, 77, 26, 93, 101, 114, 171, 1086, 1403]
hhids=['1202', '871', '1103', '585', '59', '2755', '2233', '86', '114', '2575', '1700', '974', '1800',
'370', '187', '1169', '1718', '545', '94', '2018', '744', '2859', '2925', '484', '2953', '171', '2818', '1953',
'1697', '1463', '499', '1790', '1507', '1642', '93', '1632',
'1500', '2472', '2072', '2378', '1415', '2986', '1403', '2945', '77', '1792',
'624', '379', '2557', '890', '1192', '26', '2787', '2965', '2980', '434', '2829',
'503', '2532', '946', '2401', '1801','2337','1086','1714','1283','252','2814']
scenarios=["sb4b64","sb4b135","sb8b64","sb8b135","sb10b64","sb10b135","sb20b64","sb20b135"]
#compile
for j in scenarios:
nos_list=[]
nob_list=[]
nob_fg_list=[]
nob_tg_list=[]
rbc_list=[]
rbc_fg_list=[]
rbc_tg_list=[]
mpc_list=[]
mpc_fg_list=[]
mpc_tg_list=[]
print("In scenarios",j)
csvfile = open('roi_table_all/panel4{}.csv'.format(j),'w', newline='')
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(["Home","no_solar","no_battery","NoB FG","NoB TG","RBC","RBC FG","RBC TG","MPC","MPC FG","MPC TG"])
for i in hhids:
#no solar no battery
#table = pd.read_csv('nopv_4/{}_2_nopv/{}.csv'.format(i,j))
if j[2]=='4':
table = pd.read_csv('nopv_4/{}_4_nopv/sb4b0.csv'.format(i))
if j[2]=='8':
table = pd.read_csv('nopv_4/{}_4_nopv/sb8b0.csv'.format(i))
if j[2]=='1':
table = pd.read_csv('nopv_4/{}_4_nopv/sb10b0.csv'.format(i))
if j[2]=='2':
table = pd.read_csv('nopv_4/{}_4_nopv/sb20b0.csv'.format(i))
nos = table['Base_Bill'].iloc[8735]-table['Base_Bill'].iloc[167]
print("no solar bill: ",nos)
nos_list.append(nos)
#solar no battery
if j[2]=='4':
table = pd.read_csv('nostorage_4/{}_4_nostorage/sb4b0.csv'.format(i))
if j[2]=='8':
table = pd.read_csv('nostorage_4/{}_4_nostorage/sb8b0.csv'.format(i))
if j[2]=='1':
table = pd.read_csv('nostorage_4/{}_4_nostorage/sb10b0.csv'.format(i))
if j[2]=='2':
table = pd.read_csv('nostorage_4/{}_4_nostorage/sb20b0.csv'.format(i))
nob = table['Base_Bill'].iloc[8735]-table['Base_Bill'].iloc[167]
print("no battery bill: ",nob)
nob_list.append(nob)
nob_fg = nos
nob_fg_list.append(nob_fg)
nob_tg = -1*(nob_fg-nob)
nob_tg_list.append(nob_tg)
#Baseline bill
table = pd.read_csv('rbc_4_bill/{}_4_rbc_bill/{}.csv'.format(i,j))
rbc = table['Bill'].iloc[-1]
rbc_fg = table['Total FG bill'].iloc[-1]
rbc_tg = table['Total TG bill'].iloc[-1]
print(" bill: ",rbc)
rbc_list.append(rbc)
rbc_fg_list.append(rbc_fg)
rbc_tg_list.append(rbc_tg)
#MPC
table = pd.read_csv('mpc_4_par/{}_4_mpc_par/{}.csv'.format(i,j))
mpc = table['Bill'].iloc[-1]
mpc_fg = table['Total FG bill'].iloc[-1]
mpc_tg = table['Total TG bill'].iloc[-1]
print("mpc bill: ",mpc)
mpc_list.append(mpc)
mpc_fg_list.append(mpc_fg)
mpc_tg_list.append(mpc_tg)
print("\n")
writer.writerow([i,nos,nob,nob_fg,nob_tg,rbc,rbc_fg,rbc_tg,mpc,mpc_fg,mpc_tg])
writer.writerow(["mean",np.mean(nos_list),
|
np.mean(nob_list)
|
numpy.mean
|
import cv2 as cv
import numpy as np
# cascade face detector
def face_detector(image):
face_cascade = cv.CascadeClassifier('./haarcascade_frontalface_default.xml')
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.2, 3)
if len(faces) == 0:
return 0, 0, 0, 0
return faces[0]
if __name__ == '__main__':
# read video
video = cv.VideoCapture('input.avi')
ret, frame = video.read()
# get video frame's width and height
frame_width = int(video.get(3))
frame_height = int(video.get(4))
if ret:
# create VideoWriter object.The output will be stored in 'output.avi' file.
out = cv.VideoWriter(
'output.avi',
cv.VideoWriter_fourcc('M', 'J', 'P', 'G'),
10, (frame_width, frame_height)
)
# get 4 points of face rectangle
x, y, w, h = face_detector(frame)
# initialize Kalman filter parameters
# 4 state(F), 2 measurement(H) and 0 control
kalman = cv.KalmanFilter(4, 2, 0)
# a rudimentary constant speed model:
# x_t+1 = x_t + v_t
kalman.transitionMatrix = np.array([[1., 0., .1, 0.],
[0., 1., 0., .1],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
# measurement matrix (H)
kalman.measurementMatrix = 1. * np.eye(2, 4)
# gaussian distribution for process error (Q)
kalman.processNoiseCov = 1e-5 * np.eye(4, 4)
# gaussian distribution for measurement error (R)
kalman.measurementNoiseCov = 1e-3 * np.eye(2, 2)
kalman.errorCovPost = 1e-1 * np.eye(4, 4)
kalman.statePost = np.array([x + w / 2, y + h / 2, 0, 0], dtype='float64')
lw = w
lh = h
lx = x
ly = y
# do prediction, measurement and correction on each frame
while True:
ret, frame = video.read()
if not ret:
break
x, y, w, h = face_detector(frame)
lw = w if w != 0 else lw
lh = h if h != 0 else lh
ly = y if y != 0 else ly
lx = x if x != 0 else lx
# draw red rectangle of image detection
img = cv.rectangle(frame, (lx, ly), (lx + lw, ly + lh), (0, 0, 255), 3)
# prediction
predicted = kalman.predict()
# measurement
measurement =
|
np.array([x + w / 2, y + h / 2], dtype='float64')
|
numpy.array
|
import ctypes
from ctypes import *
from ctypes.util import find_library
import numpy as np
from numpy import ma
class bson_error_t(Structure):
_fields_ = [('domain', c_uint32),
('code', c_uint32),
('msg', c_char * 504)]
bson_error_ptr = POINTER(bson_error_t)
class bson_t(Structure):
_fields_ = [('flags', c_uint32),
('len', c_uint32),
('padding', c_byte * 120)]
bson_ptr = POINTER(bson_t)
bson_iter_t = c_byte * 80
bson_iter_ptr = POINTER(bson_iter_t)
class bson_writer_t(Structure):
pass
bson_writer_ptr = POINTER(bson_writer_t)
class bson_reader_t(Structure):
pass
bson_reader_ptr = POINTER(bson_reader_t)
libbson = cdll.LoadLibrary(find_library(
"/Users/emptysquare/.virtualenvs/c-driver/libbson/.libs/libbson-1.0.0.dylib"
))
libbson.bson_strdup.argtypes = [c_char_p]
libbson.bson_strdup.restype = c_char_p
libbson.bson_new.restype = bson_ptr
for type_name in ['double', 'int32']:
ctypes_type = getattr(ctypes, 'c_' + type_name)
# E.g., bool bson_append_int32 (bson_t *, char *key, int key_len, int32_t).
append = getattr(libbson, 'bson_append_' + type_name)
append.argtypes = [bson_ptr, c_char_p, ctypes_type]
append.restype = c_bool
# E.g., int32_t bson_iter_int32 (bson_iter_t *).
get = getattr(libbson, 'bson_iter_' + type_name)
get.argtypes = [bson_iter_ptr]
get.restype = ctypes_type
libbson.bson_as_json.argtypes = [bson_ptr, POINTER(c_size_t)]
libbson.bson_as_json.restype = c_char_p
libbson.bson_new_from_json.argtypes = [c_char_p, c_ssize_t, bson_error_ptr]
libbson.bson_new_from_json.restype = bson_ptr
libbson.bson_destroy.argtypes = [bson_ptr]
libbson.bson_destroy.restype = None
libbson.bson_iter_init.argtypes = [bson_iter_ptr, bson_ptr]
libbson.bson_iter_init.restype = c_bool
libbson.bson_iter_key.argtypes = [bson_iter_ptr]
libbson.bson_iter_key.restype = c_char_p
libbson.bson_iter_type.argtypes = [bson_iter_ptr]
libbson.bson_iter_type.restype = c_int
libbson.bson_get_data.argtypes = [bson_ptr]
libbson.bson_get_data.restype = POINTER(c_uint8)
libbson.bson_new_from_data.argtypes = [POINTER(c_uint8), c_size_t]
libbson.bson_new_from_data.restype = bson_ptr
libbson.bson_new_from_buffer.argtypes = [POINTER(POINTER(c_uint8)),
POINTER(c_size_t),
c_void_p,
c_void_p]
libbson.bson_new_from_buffer.restype = bson_ptr
libbson.bson_init_static.argtypes = [bson_ptr, POINTER(c_uint8), c_size_t]
libbson.bson_init_static.restype = c_bool
libbson.bson_reader_new_from_data.argtypes = [POINTER(c_uint8), c_size_t]
libbson.bson_reader_new_from_data.restype = bson_reader_ptr
libbson.bson_reader_read.argtypes = [bson_reader_ptr, POINTER(c_bool)]
libbson.bson_reader_read.restype = bson_ptr
libbson.bson_writer_new.argtypes = [POINTER(POINTER(c_uint8)),
POINTER(c_size_t),
c_size_t,
c_void_p,
c_void_p]
libbson.bson_writer_new.restype = bson_writer_ptr
# TODO: Decimal128, plus other types defined in Monary but not here
NUMPY_TYPES = {
'objectid': np.dtype('<V12'), # TODO: modern way to express this?
'bool': np.bool,
'int8': np.int8,
'int16': np.int16,
'int32': np.int32,
'int64': np.int64,
'uint8': np.uint8,
'uint16': np.uint16,
'uint32': np.uint32,
'uint64': np.uint64,
'float32': np.float32,
'float64': np.float64,
'date': np.int64,
'timestamp': np.uint64,
}
BSON_TYPES = dict([v, k] for k, v in NUMPY_TYPES.items())
ITER_FUNCS = {
np.float64: libbson.bson_iter_double,
np.int32: libbson.bson_iter_int32,
# TODO: the rest
}
def from_bson_buffer(buf, buf_len, dtype, fields=None):
"""Convert from buffer of catenated BSON documents to NumPy array."""
ret = []
mask = []
dtype = np.dtype(dtype) # Convert from list of tuples if necessary.
field_offsets = dict((field, i) for i, field in enumerate(dtype.fields))
it = byref(bson_iter_t())
eof = c_bool()
reader = libbson.bson_reader_new_from_data(buf, buf_len)
assert reader
b = libbson.bson_reader_read(reader, byref(eof))
while b:
assert libbson.bson_iter_init(it, b)
row = []
row_mask = []
for field, field_type in dtype.fields.items():
# All entries in this row are masked out to begin.
row.append(field_type[0].type())
row_mask.append(1)
while libbson.bson_iter_next(it):
field = libbson.bson_iter_key(it)
if field in dtype.fields:
field_type = dtype.fields[field][0]
fn = ITER_FUNCS[field_type.type]
row[field_offsets[field]] = fn(it)
row_mask[field_offsets[field]] = 0
ret.append(tuple(row))
mask.append(tuple(row_mask))
b = libbson.bson_reader_read(reader, byref(eof))
return ma.array(ret, mask=mask, dtype=dtype)
def from_bson_array(buf, buf_len, dtype, fields=None):
"""Convert from BSON array like {"0": doc, "1": doc, ...} to NumPy array.
The MongoDB "find" command and others return batches of documents like:
{"firstBatch": {"0": doc, "1": doc, ...}}
Or, from the "getMore" command:
{"nextBatch": {"0": doc, "1": doc, ...}}
The batch element is a BSON array, which is like a document whose keys are
ASCII decimal numbers. Pull each document from the array and add its fields
the resulting NumPy array, converted according to "dtype".
"""
ret = []
mask = []
dtype = np.dtype(dtype) # Convert from list of tuples if necessary.
field_offsets = dict((field, i) for i, field in enumerate(dtype.fields))
bson_array_doc = bson_t()
assert libbson.bson_init_static(byref(bson_array_doc), buf, buf_len)
array_it = byref(bson_iter_t())
assert libbson.bson_iter_init(array_it, byref(bson_array_doc))
it = byref(bson_iter_t())
while libbson.bson_iter_next(array_it):
assert libbson.bson_iter_type(array_it) == 0x3 # BSON document.
row = []
row_mask = []
for field, field_type in dtype.fields.items():
# All entries in this row are masked out to begin.
row.append(field_type[0].type())
row_mask.append(1)
assert libbson.bson_iter_recurse(array_it, it)
while libbson.bson_iter_next(it):
field = libbson.bson_iter_key(it)
if field in dtype.fields:
field_type = dtype.fields[field][0]
fn = getattr(libbson, 'bson_iter_' + BSON_TYPES[field_type.type])
row[field_offsets[field]] = fn(it)
row_mask[field_offsets[field]] = 0
ret.append(tuple(row))
mask.append(tuple(row_mask))
return
|
ma.array(ret, mask=mask, dtype=dtype)
|
numpy.ma.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import time
import numpy as np
from numpy.random import uniform
import theano
import theano.tensor as T
from theano.tensor.nnet import sigmoid
from theano.tensor import tanh
from theano.tensor import exp
# from theano.tensor.nnet.nnet import softmax # 作用于2d-matrix,按行处理。
from theano.tensor.extra_ops import Unique
from GRU import GruBasic
__docformat__ = 'restructedtext en'
def exe_time(func):
def new_func(*args, **args2):
t0 = time.time()
print("-- @%s, {%s} start" % (time.strftime("%X", time.localtime()), func.__name__))
back = func(*args, **args2)
print("-- @%s, {%s} end" % (time.strftime("%X", time.localtime()), func.__name__))
print("-- @%.3fs taken for {%s}" % (time.time() - t0, func.__name__))
return back
return new_func
def softmax(x):
# 竖直方向取softmax。
# theano里的是作用于2d-matrix,按行处理。我文中scan里有一步是处理(n,),直接用会报错,所以要重写。
# 按axis=0处理(n, ),会超级方便。
e_x = exp(x - x.max(axis=0, keepdims=True))
out = e_x / e_x.sum(axis=0, keepdims=True)
return out
# 'Obo' is one by one. 逐条训练。
# ======================================================================================================================
class OboSpatialGru(GruBasic):
def __init__(self, train, test, dist, alpha_lambda, n_user, n_item, n_dists, n_in, n_hidden):
super(OboSpatialGru, self).__init__(train, test, alpha_lambda, n_user, n_item, n_in, n_hidden)
# 用mask进行补全后的距离序列train/test,就是序列里idx=1和idx=0之间的间隔所在的区间。
tra_dist_masks, tes_dist_masks, tra_dist_neg_masks = dist
self.tra_dist_masks = theano.shared(borrow=True, value=np.asarray(tra_dist_masks, dtype='int32'))
self.tes_dist_masks = theano.shared(borrow=True, value=np.asarray(tes_dist_masks, dtype='int32'))
self.tra_dist_neg_masks = theano.shared(borrow=True, value=np.asarray(tra_dist_neg_masks, dtype='int32'))
rang = 0.5
ui = uniform(-rang, rang, (3, n_hidden, 2 * n_in)) # input是40d,hidden是20d.
self.ui = theano.shared(borrow=True, value=ui.astype(theano.config.floatX))
# params --------------------------------------------------------------------------
# 各距离间隔的向量表示,shape=(381,)。
n_dist, dd = n_dists
self.dd = dd
di = uniform(-rang, rang, (n_dist+1, n_in)) # 多出来一个(填充符),存放用于补齐用户购买序列/实际不存在的item
self.di = theano.shared(borrow=True, value=di.astype(theano.config.floatX))
# 生存分析,基于ht产生t+1时刻各距离间隔的概率。softmax(v*h+b)
vs = uniform(-rang, rang, (n_dist+1, n_hidden)) # shape=(381, 20)
bs = np.zeros((n_dist+1, ), dtype=theano.config.floatX)
self.vs = theano.shared(borrow=True, value=vs.astype(theano.config.floatX))
self.bs = theano.shared(borrow=True, value=bs)
# 计算user对item评分时,除了兴趣,还有一个是距离的差异。添加一个权重,表示距离间隔的影响。
wd = uniform(0, rang) # 一个值。在test计算用户对商品评分时,作为一个权重参数。
self.wd = theano.shared(borrow=True, value=wd)
# loss weight添加约束,相加之和等于1,在此添加了softmax
loss_weight = uniform(-rang, rang, (2,))
self.loss_weight = theano.shared(borrow=True, value=loss_weight.astype(dtype=theano.config.floatX))
# 训练结束后:--------------------------------------------------------------
# 训练好的距离间隔的向量表示,
trained_dists = uniform(-rang, rang, (n_dist + 1, n_in))
self.trained_dists = theano.shared(borrow=True, value=trained_dists.astype(theano.config.floatX))
# usr对各pois在距离间隔区间上的概率。
prob = uniform(-rang, rang, (n_user, n_item))
self.prob = theano.shared(borrow=True, value=prob.astype(theano.config.floatX))
# params:-----------------------------------------------------------------
self.params = [
self.ui, self.wh, self.bi,
self.vs, self.bs, self.wd, self.loss_weight]
self.l2_sqr = (
T.sum(self.lt ** 2) + # 各poi的向量表示
T.sum(self.di ** 2) + # 各距离间隔的向量表示
T.sum([T.sum(param ** 2) for param in self.params]))
self.l2 = (
0.5 * self.alpha_lambda[1] * self.l2_sqr)
self.__theano_train__(n_in, n_hidden)
self.__theano_predict__(n_in, n_hidden)
def load_params(self, loaded_objects):
self.loss_weight.set_value(np.asarray(loaded_objects[0], dtype=theano.config.floatX), borrow=True)
self.wd.set_value(np.asarray(loaded_objects[1], dtype=theano.config.floatX), borrow=True)
self.lt.set_value(np.asarray(loaded_objects[2], dtype=theano.config.floatX), borrow=True)
self.di.set_value(np.asarray(loaded_objects[3], dtype=theano.config.floatX), borrow=True)
self.ui.set_value(np.asarray(loaded_objects[4], dtype=theano.config.floatX), borrow=True)
self.wh.set_value(np.asarray(loaded_objects[5], dtype=theano.config.floatX), borrow=True)
self.bi.set_value(np.asarray(loaded_objects[6], dtype=theano.config.floatX), borrow=True)
self.vs.set_value(np.asarray(loaded_objects[7], dtype=theano.config.floatX), borrow=True)
self.bs.set_value(np.asarray(loaded_objects[8], dtype=theano.config.floatX), borrow=True)
def s_update_neg_masks(self, tra_buys_neg_masks, tes_buys_neg_masks, tra_dist_neg_masks):
# 每个epoch都更新负样本
self.tra_buys_neg_masks.set_value(np.asarray(tra_buys_neg_masks, dtype='int32'), borrow=True)
self.tes_buys_neg_masks.set_value(np.asarray(tes_buys_neg_masks, dtype='int32'), borrow=True)
self.tra_dist_neg_masks.set_value(np.asarray(tra_dist_neg_masks, dtype='int32'), borrow=True)
def update_trained_dists(self):
# 更新最终的distance表达
di = self.di.get_value(borrow=True)
self.trained_dists.set_value(np.asarray(di, dtype=theano.config.floatX), borrow=True) # update
def update_prob(self, prob):
self.prob.set_value(np.asarray(prob, dtype=theano.config.floatX), borrow=True) # update
def compute_sub_all_scores(self, start_end): # 其实可以直接传过来实数参数
# 计算users * items,每个用户对所有商品的评分(需去掉填充符)
sub_all_scores = T.dot(self.trained_users[start_end], self.trained_items[:-1].T) + \
self.wd * self.prob[start_end]
# sub_all_scores = (1.0 - self.wd) * T.dot(self.trained_users[start_end], self.trained_items[:-1].T) + \
# self.wd * self.prob[start_end]
return sub_all_scores.eval() # shape=(sub_n_user, n_item)
def __theano_train__(self, n_in, n_hidden):
"""
训练阶段跑一遍训练序列
"""
ui, wh = self.ui, self.wh
vs, bs = self.vs, self.bs
tra_mask = T.ivector()
seq_length = T.sum(tra_mask) # 有效长度
h0 = self.h0
bi = self.bi
xpidxs = T.ivector()
xqidxs = T.ivector()
dpidxs = T.ivector()
dqidxs = T.ivector()
xps = self.lt[xpidxs] # shape=(seq_length, n_in)
xqs = self.lt[xqidxs]
xds = self.di[dpidxs]
xs = T.concatenate((xps, xds), axis=1)
pqs = T.concatenate((xpidxs, xqidxs)) # 先拼接
uiq_pqs = Unique(False, False, False)(pqs) # 再去重
uiq_x = self.lt[uiq_pqs] # 相应的items特征
uiq_ds = Unique(False, False, False)(dpidxs)
uiq_d = self.di[uiq_ds]
wd = self.wd
ls = softmax(self.loss_weight)
"""
输入t时刻正负样本、t-1时刻隐层,计算当前隐层、当前损失. 公式里省略了时刻t
# 根据性质:T.dot((m, n), (n, ))得到shape=(m, ),且是矩阵每行与(n, )相乘
# GRU
z = sigmoid(ux_z * xp + wh_z * h_pre1)
r = sigmoid(ux_r * xp + wh_r * h_pre1)
c = tanh(ux_c * xp + wh_c * (r 点乘 h_pre1))
h = z * h_pre1 + (1.0 - z) * c
# 根据性质:T.dot((n, ), (n, ))得到scalar
upq = h_pre1 * (xp - xq)
loss = log(1.0 + e^(-upq))
"""
def recurrence(x_t, xp_t1, xq_t1, dp_t1, dq_t1,
h_t_pre1):
# 隐层
z_r = sigmoid(T.dot(ui[:2], x_t) +
T.dot(wh[:2], h_t_pre1) + bi[:2])
z, r = z_r[0], z_r[1]
c = tanh(T.dot(ui[2], x_t) +
T.dot(wh[2], (r * h_t_pre1)) + bi[2])
h_t = (T.ones_like(z) - z) * h_t_pre1 + z * c
# 下一时刻各间隔区间的概率
s_t = softmax(T.dot(vs, h_t) + bs) # shape=(381, )
# loss. 使用下一时刻的样本。
# compute_sub_all_scores也要做更改。。。。。
upq_t = T.dot(h_t, xp_t1 - xq_t1) + wd * (s_t[dp_t1] - s_t[dq_t1])
# upq_t = (1.0 - wd) * T.dot(h_t, xp_t1 - xq_t1) + wd * (s_t[dp_t1] - s_t[dq_t1])
loss_t_bpr = T.log(sigmoid(upq_t))
# loss_t_bpr = T.log(sigmoid(upq_t))
loss_t_sur = T.sum(s_t[:dp_t1 + 1]) - T.log(s_t[dp_t1]) # 这个不能用BPR的形式,loss会Nan。
# s_t[:d_t + 1]:从0区间到该距离间隔区间,所有区间概率的和。
return [h_t, loss_t_sur, loss_t_bpr]
[h, loss_sur, loss_bpr], _ = theano.scan(
fn=recurrence,
sequences=[xs, xps[1:], xqs[1:], dpidxs[1:], dqidxs[1:]],
outputs_info=[h0, None, None],
n_steps=seq_length-1)
# ----------------------------------------------------------------------------
# cost, gradients, learning rate, l2 regularization
lr, l2 = self.alpha_lambda[0], self.alpha_lambda[1]
seq_l2_sq = T.sum([T.sum(par ** 2) for par in [xps, xqs, ui, wh, bi,
xds, vs, bs, wd, ls]])
sur = T.sum(loss_sur)
upq = - T.sum(loss_bpr)
los = ls[0] * sur + ls[1] * upq
seq_costs = (
los +
0.5 * l2 * seq_l2_sq)
seq_grads = T.grad(seq_costs, self.params)
seq_updates = [(par, par - lr * gra) for par, gra in zip(self.params, seq_grads)]
update_x = T.set_subtensor(uiq_x, uiq_x - lr * T.grad(seq_costs, self.lt)[uiq_pqs])
update_d = T.set_subtensor(uiq_d, uiq_d - lr * T.grad(seq_costs, self.di)[uiq_ds])
seq_updates.append((self.lt, update_x)) # 会直接更改到seq_updates里
seq_updates.append((self.di, update_d))
# ----------------------------------------------------------------------------
# 输入正、负样本序列及其它参数后,更新变量,返回损失。
uidx = T.iscalar() # T.iscalar()类型是 TensorType(int32, )
self.seq_train = theano.function(
inputs=[uidx],
outputs=[los, sur, upq, ls],
updates=seq_updates,
givens={
xpidxs: self.tra_buys_masks[uidx], # 类型是 TensorType(int32, matrix)
xqidxs: self.tra_buys_neg_masks[uidx], # negtive poi
dpidxs: self.tra_dist_masks[uidx], # 别名表示的两地之间的距离
dqidxs: self.tra_dist_neg_masks[uidx],
tra_mask: self.tra_masks[uidx]})
def __theano_predict__(self, n_in, n_hidden):
"""
测试阶段再跑一遍训练序列得到各个隐层。用全部数据一次性得出所有用户的表达
"""
ui, wh = self.ui, self.wh
vs = self.vs
tra_mask = T.imatrix()
actual_batch_size = tra_mask.shape[0]
seq_length = T.max(T.sum(tra_mask, axis=1)) # 获取mini-batch里各序列的长度最大值作为seq_length
h0 = T.alloc(self.h0, actual_batch_size, n_hidden) # shape=(n, 20)
bi = T.alloc(self.bi, actual_batch_size, 3, n_hidden) # shape=(n, 3, 20), 原维度放在后边
bi = bi.dimshuffle(1, 2, 0) # shape=(3, 20, n)
bs = T.alloc(self.bs, actual_batch_size, self.bs.shape[0]) # shape=(n, lmd[0])=(n, 1520)
# 隐层是1个GRU Unit:都可以用这个统一的格式。
pidxs = T.imatrix()
didxs = T.imatrix()
xps = self.trained_items[pidxs] # shape((actual_batch_size, seq_length, n_hidden))
xbs = self.trained_dists[didxs]
ps = T.concatenate((xps, xbs), axis=2)
ps = ps.dimshuffle(1, 0, 2) # shape=(seq_length, batch_size, n_in)
def recurrence(p_t, h_t_pre1):
# 特征、隐层都处理成shape=(batch_size, n_hidden)=(n, 20)
z_r = sigmoid(T.dot(ui[:2], p_t.T) +
T.dot(wh[:2], h_t_pre1.T) + bi[:2])
z, r = z_r[0].T, z_r[1].T # shape=(n, 20)
c = tanh(T.dot(ui[2], p_t.T) +
T.dot(wh[2], (r * h_t_pre1).T) + bi[2]) # shape=(20, n)
h_t = (T.ones_like(z) - z) * h_t_pre1 + z * c.T # shape=(n, 20)
return h_t
h, _ = theano.scan( # h.shape=(157, n, 20)
fn=recurrence,
sequences=ps,
outputs_info=h0,
n_steps=seq_length)
# 得到batch_hts.shape=(n, 20),就是这个batch里每个用户的表达ht。
# 必须要用T.sum(),不然无法建模到theano的graph里、报length not known的错
hs = h.dimshuffle(1, 0, 2) # shape=(batch_size, seq_length, n_hidden)
hts = hs[ # shape=(n, n_hidden)
T.arange(actual_batch_size), # 行. 花式索引a[[1,2,3],[2,5,6]],需给定行列的表示
T.sum(tra_mask, axis=1) - 1] # 列。需要mask是'int32'型的
# 对下次各距离的概率。
sts = softmax(T.dot(vs, hts.T) + bs.T).T # shape=(n, 381)
# givens给数据
start_end = T.ivector()
self.seq_predict = theano.function(
inputs=[start_end],
outputs=[hts, sts],
givens={
pidxs: self.tra_buys_masks[start_end], # 类型是TensorType(int32, matrix)
didxs: self.tra_dist_masks[start_end],
tra_mask: self.tra_masks[start_end]})
def train(self, idx):
# consider the whole user sequence as a mini-batch and perform one update per sequence
return self.seq_train(idx)
# 备份。可用。
# ======================================================================================================================
class OboSpatialGruBackUp(GruBasic):
def __init__(self, train, test, dist, alpha_lambda, n_user, n_item, n_dist, n_in, n_hidden):
super(OboSpatialGruBackUp, self).__init__(train, test, alpha_lambda, n_user, n_item, n_in, n_hidden)
# 用mask进行补全后的距离序列train/test,就是序列里idx=1和idx=0之间的间隔所在的区间。
tra_dist_masks, tes_dist_masks = dist
self.tra_dist_masks = theano.shared(borrow=True, value=np.asarray(tra_dist_masks, dtype='int32'))
self.tes_dist_masks = theano.shared(borrow=True, value=np.asarray(tes_dist_masks, dtype='int32'))
rang = 0.5
ui = uniform(-rang, rang, (3, n_hidden, 2 * n_in)) # input是40d,hidden是20d.
self.ui = theano.shared(borrow=True, value=ui.astype(theano.config.floatX))
# params --------------------------------------------------------------------------
# 各距离间隔的向量表示,shape=(381,)。
di = uniform(-rang, rang, (n_dist+1, n_in)) # 多出来一个(填充符),存放用于补齐用户购买序列/实际不存在的item
self.di = theano.shared(borrow=True, value=di.astype(theano.config.floatX))
# 生存分析,基于ht产生t+1时刻各距离间隔的概率。softmax(v*h+b)
vs = uniform(-rang, rang, (n_dist+1, n_hidden)) # shape=(381, 20)
bs = np.zeros((n_dist+1, ), dtype=theano.config.floatX)
self.vs = theano.shared(borrow=True, value=vs.astype(theano.config.floatX))
self.bs = theano.shared(borrow=True, value=bs)
# 计算user对item评分时,除了兴趣,还有一个是距离的差异。添加一个权重,表示距离间隔的影响。
wd =
|
uniform(-rang, rang)
|
numpy.random.uniform
|
import unittest
import numpy as np
import h5py
import os
import fastpli.model.solver
import fastpli.objects
TMP_FILE = os.path.join(os.path.dirname(__file__), 'tmp.fastpli.test')
class MainTest(unittest.TestCase):
def setUp(self):
self._test_fiber = fastpli.objects.Fiber([[0, 0, 0, 1], [0, 0, 1, 2]])
self._test_fiberbundles = fastpli.objects.FiberBundles(self._test_fiber)
self.solver = fastpli.model.solver.Solver()
self.solver.fiber_bundles = self._test_fiberbundles
def test_dict(self):
d = self.solver.get_dict()
self.assertWarns(UserWarning, self.solver.set_dict, d)
def test_number_of_fibers(self):
self.solver.drag = 0
self.solver.obj_min_radius = 0
self.solver.obj_mean_length = 0
self.solver.step()
fbs = self.solver.fiber_bundles
self.assertTrue(fbs[0][0].shape[0] == 2)
self.assertTrue(np.array_equal(self._test_fiber, fbs[0][0]))
def test_set_fiber_bundle(self):
self.solver.fiber_bundles = [[np.array([[0, 0, 0, 1], [0, 0, 2, 3]])]]
fbs = self.solver.fiber_bundles
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
self.solver.fiber_bundles = self._test_fiberbundles
fbs = self.solver.fiber_bundles
self.assertTrue(isinstance(fbs, fastpli.objects.FiberBundles))
def test_split(self):
self.solver.drag = 0
self.solver.obj_min_radius = 0
self.solver.obj_mean_length = 0.5
self.solver.step()
fbs = self.solver.fiber_bundles
self.assertTrue(fbs[0][0].shape[0] == 3)
self.assertTrue(fbs[0][0][1, -1] == 1.5)
fbs = self.solver.apply_boundary_conditions(n_max=1)
self.assertTrue(fbs[0][0].shape[0] == 3)
self.assertTrue(np.isclose(fbs[0][0][1, -1], 1.5))
def test_combine(self):
self.fiber = np.array([[0, 0, 0, 1], [0, 0, 1, 1], [0, 0, 2, 1]])
self._test_fiberbundles = [[self._test_fiber]]
self.solver.fiber_bundles = self._test_fiberbundles
self.solver.drag = 0
self.solver.obj_min_radius = 0
self.solver.obj_mean_length = 2
self.solver.step()
fbs = self.solver.fiber_bundles
self.assertTrue(fbs[0][0].shape[0] == 2)
self.solver.drag = 0
self.solver.obj_min_radius = 0
self.solver.obj_mean_length = 20
self.solver.step()
fbs = self.solver.fiber_bundles
self.assertTrue(fbs[0][0].shape[0] == 2)
def test_fibers(self):
self.solver.drag = 0
self.solver.obj_min_radius = 0
self.solver.obj_mean_length = 2
fiber_0 = np.array([[0, 0, 0, 1], [0, 0, 1, 2]])
fiber_1 = np.array([[0, 0, 0.1, 1], [0, 0, 1.1, 2]])
self.solver.fiber_bundles = [[fiber_0, fiber_1]]
self.solver.step()
fbs = self.solver.fiber_bundles
self.assertFalse(np.array_equal(fiber_0[:, :3], fbs[0][0][:, :3]))
self.assertTrue(np.array_equal(fiber_0[:, -1], fbs[0][0][:, -1]))
self.assertFalse(np.array_equal(fiber_1[:, :3], fbs[0][1][:, :3]))
self.assertTrue(np.array_equal(fiber_1[:, -1], fbs[0][1][:, -1]))
def test_fiber_bundles(self):
self.solver.drag = 0
self.solver.obj_min_radius = 0
self.solver.obj_mean_length = 2
fiber_0 = np.array([[0, 0, 0, 1], [0, 0, 1, 2]])
fiber_1 = np.array([[0, 0, 0.1, 1], [0, 0, 1.1, 2]])
self.solver.fiber_bundles = [[fiber_0], [fiber_1]]
self.solver.step()
fbs = self.solver.fiber_bundles
self.assertFalse(np.array_equal(fiber_0[:, :3], fbs[0][0][:, :3]))
self.assertTrue(np.array_equal(fiber_0[:, -1], fbs[0][0][:, -1]))
self.assertFalse(np.array_equal(fiber_1[:, :3], fbs[1][0][:, :3]))
self.assertTrue(np.array_equal(fiber_1[:, -1], fbs[1][0][:, -1]))
def test_col_voi(self):
self.solver.drag = 0
self.solver.obj_min_radius = 0
self.solver.obj_mean_length = 2
fiber_0 = np.array([[0, 0, 0, 1], [0, 0, 1, 2]])
fiber_1 =
|
np.array([[0, 0, 0.1, 1], [0, 0, 1.1, 2]])
|
numpy.array
|
# Copyright (c) 2021 zfit
import itertools
import platform
from collections import OrderedDict
import numpy as np
import pytest
import scipy.optimize
from ordered_set import OrderedSet
import zfit.minimizers.optimizers_tf
from zfit.minimizers.base_tf import WrapOptimizer
from zfit.util.exception import OperationNotAllowedError
true_mu = 4.5
true_sigma = 2
true_lambda = -0.03
parameter_tol = 0.1
max_distance_to_min = 2.5
def create_loss(obs1):
mu_param = zfit.Parameter("mu", true_mu - 2.5, -5., 9.,
step_size=0.03
)
sigma_param = zfit.Parameter("sigma", true_sigma * 0.3, 0.01, 10,
step_size=0.03)
lambda_param = zfit.Parameter("lambda", true_lambda * 0.3, -0.5, -0.0003,
step_size=0.001)
gauss1 = zfit.pdf.Gauss(mu=mu_param, sigma=sigma_param, obs=obs1)
exp1 = zfit.pdf.Exponential(lam=lambda_param, obs=obs1)
sum_pdf1 = zfit.pdf.SumPDF([gauss1, exp1], 0.8)
# load params for sampling
with mu_param.set_value(true_mu):
with sigma_param.set_value(true_sigma):
with lambda_param.set_value(true_lambda):
sampled_data = sum_pdf1.create_sampler(n=25000)
sampled_data.resample()
loss = zfit.loss.UnbinnedNLL(model=sum_pdf1, data=sampled_data, options={'subtr_const': True})
minimum = loss.value().numpy()
return loss, minimum, (mu_param, sigma_param, lambda_param)
verbosity = None
def make_min_grad_hesse():
minimizers = [
zfit.minimize.ScipyTruncNCV1,
# zfit.minimize.ScipyTrustNCGV1, # Too bad
# zfit.minimize.ScipyTrustKrylovV1, # Too bad
zfit.minimize.ScipySLSQPV1,
zfit.minimize.ScipyLBFGSBV1,
zfit.minimize.ScipyTrustConstrV1,
]
min_options = []
for opt in minimizers:
grad = opt._VALID_SCIPY_GRADIENT
hess = opt._VALID_SCIPY_HESSIAN
if not grad:
grad = {None} # the default is None, so this will skip it
if not hess:
hess = {None}
product = itertools.product([opt], grad, hess)
min_options.extend(product)
return min_options
@pytest.mark.parametrize('minimizer_gradient_hessian', make_min_grad_hesse())
@pytest.mark.flaky(reruns=3)
def test_scipy_derivative_options(minimizer_gradient_hessian):
minimizer_cls, gradient, hessian = minimizer_gradient_hessian
loss, true_min, params = create_loss(obs1=obs1)
(mu_param, sigma_param, lambda_param) = params
kwargs = {}
if gradient is not None:
kwargs['gradient'] = gradient
if hessian is not None:
kwargs['hessian'] = hessian
try:
minimizer = minimizer_cls(**kwargs)
except ValueError as error: # we test a not allowed combination
if 'Whenever the gradient is estimated via finite-differences' in error.args[0]:
return
else:
raise
result = minimizer.minimize(loss=loss)
assert result.valid
found_min = loss.value().numpy()
assert true_min + max_distance_to_min >= found_min
aval, bval, cval = zfit.run((mu_param, sigma_param, lambda_param))
assert true_mu == pytest.approx(aval, abs=parameter_tol)
assert true_sigma == pytest.approx(bval, abs=parameter_tol)
assert true_lambda == pytest.approx(cval, abs=parameter_tol)
assert result.converged
do_errors_most = False
minimizers = [
# minimizers, minimizer_kwargs, do error estimation
# TensorFlow minimizers
# (zfit.minimizers.optimizers_tf.WrapOptimizer, dict(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05)),
# False),
(zfit.minimize.Adam, dict(learning_rate=0.05, verbosity=verbosity, tol=0.00001), False), # works
# Minuit minimizer
(zfit.minimize.Minuit, {"verbosity": verbosity}, {'error': True, 'longtests': True}), # works
# Ipyopt minimizer
# TensorFlow Probability minimizer
# (BFGS, {}, True), # doesn't work as it uses the graph, violates assumption in minimizer
# SciPy Minimizer
(zfit.minimize.ScipyLBFGSBV1, {"verbosity": verbosity}, {'error': True,
'numgrad': False, 'approx': True}),
# (zfit.minimize.ScipyTrustNCGV1, {'tol': 1e-5, "verbosity": verbosity}, True),
# (zfit.minimize.ScipyTrustKrylovV1, {"verbosity": verbosity}, True), # Too unstable
(zfit.minimize.ScipyTrustConstrV1, {"verbosity": verbosity, },
{'error': True, 'longtests': bool(zfit.run.get_graph_mode())}),
(zfit.minimize.ScipyPowellV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.ScipySLSQPV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
# (zfit.minimize.ScipyCOBYLAV1, {"verbosity": verbosity, }, {'error': do_errors_most}), # Too bad
# (zfit.minimize.ScipyDoglegV1, {'tol': 1e-5, "verbosity": verbosity}, do_errors_most), # works badly
# (zfit.minimize.ScipyNewtonCGV1, {"verbosity":verbosity,}, {'error': do_errors_most}), # Too sensitive? Fails in line-search?
(zfit.minimize.ScipyTruncNCV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
# NLopt minimizer
(zfit.minimize.NLoptLBFGSV1, {"verbosity": verbosity, },
{'error': True, 'longtests': bool(zfit.run.get_graph_mode())}),
(zfit.minimize.NLoptTruncNewtonV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptSLSQPV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptMMAV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptCCSAQV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptSubplexV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptCOBYLAV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptMLSLV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptStoGOV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptBOBYQAV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptISRESV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptESCHV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
(zfit.minimize.NLoptShiftVarV1, {"verbosity": verbosity, }, {'error': do_errors_most}),
# (zfit.minimize.Scipy, {'tol': 1e-8, 'algorithm': 'CG'}, False),
# (zfit.minimize.Scipy, {'tol': 1e-8, 'algorithm': 'BFGS'}, False), # too bad
# (zfit.minimize.NLopt, {'tol': 1e-8, 'algorithm': nlopt.LN_NELDERMEAD}, True), # performs too bad
]
# minimizers = [(zfit.minimize.Minuit, {"verbosity": verbosity, 'gradient': True}, {'error': True, 'longtests': True})]
# minimizers = [(zfit.minimize.IpyoptV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyLBFGSBV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyPowellV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipySLSQPV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyNelderMeadV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyCOBYLAV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyNewtonCGV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyTrustNCGV1, {'tol': 1e-3, 'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyTruncNCV1, {'tol': 1e-5, 'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyDoglegV1, {'tol': 1e3, 'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyTrustConstrV1, {'tol': 1e-5, 'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.ScipyTrustKrylovV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.NLoptLBFGSV1, {'verbosity': 7}, {'error': True, 'longtests': True})]
# minimizers = [(zfit.minimize.NLoptTruncNewtonV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.NLoptSLSQPV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.NLoptMMAV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.NLoptCCSAQV1, {'verbosity': 7}, True)]
# minimizers = [(zfit.minimize.NLoptMLSLV1, {'verbosity': 7}, {'error': True, 'longtests': True})]
# minimizers = [(zfit.minimize.NLoptStoGOV1, {'verbosity': 7}, {'error': True, 'longtests': True})] # DOESN'T WORK!
# minimizers = [(zfit.minimize.NLoptSubplexV1, {'verbosity': 7}, {'error': True, 'longtests': True})]
# minimizers = [(zfit.minimize.NLoptESCHV1, {'verbosity': 7}, {'error': True, 'longtests': True})]
# minimizers = [(zfit.minimize.NLoptISRESV1, {'verbosity': 7}, {'error': True, 'longtests': True})]
# minimizers = [(zfit.minimize.NLoptBOBYQAV1, {'verbosity': 7}, {'error': True, 'longtests': True})]
# minimizers = [(zfit.minimize.NLoptShiftVarV1, {'verbosity': 7, 'rank': 2}, {'error': True, 'longtests': True})]
# minimizers = [(zfit.minimize.Minuit, {'verbosity': 6}, True)]
# minimizers = [(zfit.minimize.BFGS, {'verbosity': 6}, True)]
minimizers_small = [
(zfit.minimize.NLoptLBFGSV1, {}, True),
(zfit.minimize.ScipyTrustConstrV1, {}, True),
(zfit.minimize.Minuit, {}, True),
]
if platform.system() not in ('Darwin', 'Windows'): # TODO: Ipyopt installation on macosx not working
minimizers_small.append((zfit.minimize.IpyoptV1, {}, False))
minimizers.append((zfit.minimize.IpyoptV1, {"verbosity": verbosity}, {'error': True, 'longtests': True}))
# sort for xdist: https://github.com/pytest-dev/pytest-xdist/issues/432
minimizers = sorted(minimizers, key=lambda val: repr(val))
minimizers_small = sorted(minimizers_small, key=lambda val: repr(val))
obs1 = zfit.Space(obs='obs1', limits=(-2.4, 9.1))
obs1_split = (zfit.Space(obs='obs1', limits=(-2.4, 1.3))
+ zfit.Space(obs='obs1', limits=(1.3, 2.1))
+ zfit.Space(obs='obs1', limits=(2.1, 9.1)))
def test_floating_flag():
obs = zfit.Space("x", limits=(-2, 3))
mu = zfit.Parameter("mu", 1.2, -4, 6)
sigma = zfit.Parameter("sigma", 1.3, 0.1, 10)
sigma.floating = False
gauss = zfit.pdf.Gauss(mu=mu, sigma=sigma, obs=obs)
normal_np = np.random.normal(loc=2., scale=3., size=10000)
data = zfit.Data.from_numpy(obs=obs, array=normal_np)
nll = zfit.loss.UnbinnedNLL(model=gauss, data=data)
minimizer = zfit.minimize.Minuit()
result = minimizer.minimize(nll, params=[mu, sigma])
assert list(result.params.keys()) == [mu]
assert sigma not in result.params
@pytest.mark.parametrize("params", [
# np.random.normal(size=5),
[1.4, 0.6, 1.5],
{'value': [1.4, 0.6, 1.5],
'lower': np.ones(3) * (-5),
'upper': np.ones(3) * (9),
'step_size':
|
np.linspace(0.1, 0.2, 3)
|
numpy.linspace
|
import os
from os.path import dirname
import numpy as np
from pystella.rf.rad_func import MagAB2Flux, Flux2MagAB
from pystella.util.phys_var import phys
__author__ = 'bakl'
# see bands: http://svo2.cab.inta-csic.es/theory/fps3/index.php?mode=browse&gname=GALEX
# see personal page <NAME>: http://www.mso.anu.edu.au/~brad/filters.html
class Band(object):
IsLoad = False
Cache = dict()
Alias = None
FileFilters = 'filters.ini'
FileSettings = 'settings.ini'
NameBol = 'bol'
NameUBVRI = 'ubvri'
NameBolQuasi = 'bolq'
NameZp = 'zp'
NameJy = 'Jy'
DirRoot = os.path.join(dirname(dirname(dirname(os.path.realpath(__file__)))), 'data/bands')
def __init__(self, name=None, fname=None, zp=None, jy=None, is_load=False):
"""Creates a band instance. Required parameters: name and file."""
self._fwhm = None
self.name = name
self._fname = fname # location of the filter response
self._zp = zp # zero points for mag
self._jy = jy # zero points for flux [Jansky] # 1 Jy = 1.51e7 photons sec^-1 m^-2 (dlambda/lambda)^-1
self.sync_zp()
self.__freq = None # frequencies of response [Hz]
self.__wl = None # wavelength of response [cm]
self._resp = None # response
self._is_load = False
self._wl_eff = None # the effective wavelength
self._norm = None
self._normWl = None
if is_load: # and os.path.isfile(self.fname):
self.load()
@property
def is_load(self):
return self._is_load
@property
def fname(self):
return self._fname
@property
def zp(self):
if self._zp is None:
return 0.
return self._zp
@property
def is_zp(self):
return self._zp is not None
@property
def Jy(self):
if self._jy is None:
return 0.
return self._jy
@property
def is_Jy(self):
return self._jy is not None
@property
def freq(self):
return self.__freq
@property
def resp_fr(self):
return self._resp[::-1]
@property
def resp_wl(self):
return self._resp
@property
def wl(self):
return self.__wl
@property
def wl2args(self):
return self.__wl * phys.cm_to_angs
@property
def wlrange(self):
return np.min(self.wl),
|
np.max(self.wl)
|
numpy.max
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple MNIST classifier which displays summaries in TensorBoard.
This is an unimpressive MNIST model, but it is a good example of using
tf.name_scope to make a graph legible in the TensorBoard graph explorer, and of
naming summary tags so that they are grouped meaningfully in TensorBoard.
It demonstrates the functionality of every TensorBoard dashboard.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import matplotlib.pyplot as plt
import matplotlib, seaborn
import numpy
import scipy.stats
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import os, sys
from tfutil import listify, log, restore_latest
from tfutil_deprecated import LayerManager
from mnist_basic import classifier
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_integer('max_steps', 100000, 'Number of steps to run trainer.')
flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')
flags.DEFINE_string('data_dir', '/tmp/data', 'Directory for storing data')
flags.DEFINE_string('summaries_dir', '/tmp/mnist_vae/logs', 'Summaries directory')
flags.DEFINE_string('train_dir', '/tmp/mnist_vae/save', 'Saves directory')
flags.DEFINE_string('viz_dir', '/tmp/mnist_vae/viz', 'Viz directory')
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
IMAGE_SIZE = 28
IMAGE_AREA = IMAGE_SIZE*IMAGE_SIZE
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
PRIOR_BATCH_SIZE = 10
NUM_RUNS_FOR_ENTROPY_ESTIMATES = 100
TRAIN = True
BNAE = False
CONV = False
IND_ERROR = False # encourage normality of Q(z|X) across entire training set
# LATENT_DIM = 20
LATENT_DIM = 2
NUM_HIDDEN_LAYERS = 2
HIDDEN_LAYER_SIZE = 500
if CONV:
small_image_size = IMAGE_SIZE // 4
small_image_area = small_image_size * small_image_size
HIDDEN_LAYER_SIZE = (HIDDEN_LAYER_SIZE // small_image_area) * small_image_area
def stat_summary(a):
a = numpy.array(a)
return [numpy.mean(a <= 0.0), numpy.mean(a <= 0.25), numpy.mean(a <= 0.5), numpy.mean(a <= 1.0), numpy.mean(a<=2.0)]
#return [numpy.min(a), numpy.percentile(a, 25), numpy.percentile(a, 50), numpy.percentile(a, 75), numpy.max(a)]
def train():
print("\nSource code of training file {}:\n\n{}".format(__file__, open(__file__).read()))
log('loading MNIST')
# Import data
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=False)
TRAIN_SIZE=mnist.train.images.shape[0]
lm = LayerManager(forward_biased_estimate=False)
lm_classifier = LayerManager(forward_biased_estimate=False, is_training=False)
batch = tf.Variable(0)
prior_batch_size = tf.placeholder(tf.int64, [])
with tf.name_scope('input'):
all_train_data_initializer = tf.placeholder(tf.float32, [TRAIN_SIZE, IMAGE_AREA])
all_train_data = tf.Variable(all_train_data_initializer, trainable=False, collections=[])
random_training_example = tf.train.slice_input_producer([all_train_data])
training_batch = tf.train.batch([random_training_example], batch_size=BATCH_SIZE, enqueue_many=True)
fed_input_data = tf.placeholder(tf.float32, [None, IMAGE_AREA])
def id_act(z):
return z
def log_std_act(z):
return tf.clip_by_value(z, -5.0, 5.0)
def double_relu(z):
return [tf.nn.relu(z), tf.nn.relu(-z)]
default_act = tf.nn.relu # double_relu
do_bn = dict(bn=False)
def encoder(data):
last = data - 0.5
if CONV:
last = tf.reshape(last, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
last = lm.conv_layer(last, 3, 3, 32, 'encoder/hidden/conv0', act=default_act, **do_bn)
last = lm.conv_layer(last, 3, 3, 32, 'encoder/hidden/conv1', act=default_act, **do_bn)
last = lm.conv_layer(last, 3, 3, 32, 'encoder/hidden/conv2', act=default_act, **do_bn)
last = lm.max_pool(last, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
last = lm.conv_layer(last, 3, 3, 64, 'encoder/hidden/conv3', act=default_act, **do_bn)
last = lm.conv_layer(last, 3, 3, 64, 'encoder/hidden/conv4', act=default_act, **do_bn)
last = lm.conv_layer(last, 3, 3, 64, 'encoder/hidden/conv5', act=default_act, **do_bn)
last = lm.max_pool(last, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
shape = last.get_shape().as_list()
last = tf.reshape(last, [-1, shape[1] * shape[2] * shape[3]])
for i in range(NUM_HIDDEN_LAYERS):
last = lm.nn_layer(last, HIDDEN_LAYER_SIZE, 'encoder/hidden/fc{}'.format(i), act=default_act, **do_bn)
if BNAE:
latent_mean = lm.nn_layer(last, LATENT_DIM, 'latent/mean', act=id_act, **do_bn) #, bias=False, scale=False)
else:
latent_mean = lm.nn_layer(last, LATENT_DIM, 'latent/mean', act=id_act, **do_bn)
latent_log_std = lm.nn_layer(last, LATENT_DIM, 'latent/log_std', act=log_std_act, **do_bn)
return latent_mean, latent_log_std
def decoder(code):
last = code
for i in range(NUM_HIDDEN_LAYERS):
last = lm.nn_layer(last, HIDDEN_LAYER_SIZE, 'decoder/hidden/fc{}'.format(i), act=default_act, **do_bn)
if CONV:
last_num_filters = HIDDEN_LAYER_SIZE // ((IMAGE_SIZE // 4)*(IMAGE_SIZE // 4))
last = [tf.reshape(val, [-1, IMAGE_SIZE // 4, IMAGE_SIZE // 4, last_num_filters]) for val in listify(last)]
last = lm.conv_layer(last, 3, 3, 64, 'decoder/hidden/conv0', act=default_act, **do_bn)
last = [tf.image.resize_images(val, IMAGE_SIZE // 2, IMAGE_SIZE // 2) for val in listify(last)]
last = lm.conv_layer(last, 3, 3, 32, 'decoder/hidden/conv1', act=default_act, **do_bn)
last = lm.conv_layer(last, 3, 3, 32, 'decoder/hidden/conv2', act=default_act, **do_bn)
last = lm.conv_layer(last, 3, 3, 32, 'decoder/hidden/conv3', act=default_act, **do_bn)
last = [tf.image.resize_images(val, IMAGE_SIZE, IMAGE_SIZE) for val in listify(last)]
last = lm.conv_layer(last, 3, 3, 8, 'decoder/hidden/conv4', act=default_act, **do_bn)
last = lm.conv_layer(last, 3, 3, 8, 'decoder/hidden/conv5', act=default_act, **do_bn)
output_mean_logit = lm.conv_layer(last, 3, 3, 1, 'output/mean', act=id_act, **do_bn)
output_log_std = lm.conv_layer(last, 3, 3, 1, 'output/log_std', act=log_std_act, **do_bn)
output_mean_logit = tf.reshape(output_mean_logit, [-1, IMAGE_SIZE*IMAGE_SIZE])
output_log_std = tf.reshape(output_log_std, [-1, IMAGE_SIZE*IMAGE_SIZE])
else:
output_mean_logit = lm.nn_layer(last, IMAGE_SIZE*IMAGE_SIZE, 'output/mean', act=id_act, **do_bn)
output_log_std = lm.nn_layer(last, IMAGE_SIZE*IMAGE_SIZE, 'output/log_std', act=log_std_act, **do_bn)
return output_mean_logit, output_log_std
def full_model(data):
latent_mean, latent_log_std = encoder(data)
if BNAE:
latent = latent_mean
else:
latent = lm.reparam_normal_sample(latent_mean, latent_log_std, 'latent/sample')
output_mean_logit, output_log_std = decoder(latent)
output_mean = tf.nn.sigmoid(output_mean_logit)
with tf.name_scope('likelihood_bound'):
minus_kl = 0.5 * tf.reduce_sum(
1.0 + 2.0 * latent_log_std - tf.square(latent_mean) - tf.exp(2.0 * latent_log_std),
reduction_indices=[1])
# Normal
# reconstruction_error = tf.reduce_sum(
# -0.5 * numpy.log(2 * numpy.pi) - output_log_std - 0.5 * tf.square(output_mean - data) / tf.exp(
# 2.0 * output_log_std), reduction_indices=[1])
# Laplace
# reconstruction_error = tf.reduce_sum(-numpy.log(2.0) - output_log_std - abs(output_mean-data)/tf.exp(output_log_std), reduction_indices=[1])
# Cross Entropy
reconstruction_error = -tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(output_mean_logit, data), reduction_indices=[1])
with tf.name_scope('total'):
likelihood_bound = tf.reduce_mean(minus_kl + reconstruction_error)
lm.summaries.scalar_summary('likelihood bound', likelihood_bound) # Easier to parse graphs if giant negative values of first few iterations are omitted
# likelihood_bound = tf.reduce_mean(tf.clip_by_value(tf.cast(batch, tf.float32)/10000.0 - 2.0, 0.0, 1.0)*minus_kl + reconstruction_error)
with tf.name_scope('error'):
squared_error = tf.reduce_mean(tf.square(data - output_mean))
lm.summaries.scalar_summary('squared_error', squared_error)
with tf.name_scope('independence_error'):
num_normal_constraints = 20*LATENT_DIM # Who knows what this should be
unit = tf.nn.l2_normalize(tf.random_normal((LATENT_DIM, num_normal_constraints)), 0)
z = tf.matmul(latent, unit) # random orthogonal projection of latent
center = tf.truncated_normal([num_normal_constraints])
width = 0.4 # Who knows what this should be
g = tf.nn.tanh((z - center) / (width/2))*tf.exp(tf.square(center)/2) # any family of univariate functions of z
gprime, = tf.gradients(g, z)
# zero for all g iff z is unit normal by Stein's Lemma
stein_lemma_err = tf.reduce_mean(z * g - gprime, reduction_indices=[0], keep_dims=True)
#ind_err = tf.squeeze(tf.matmul(tf.nn.softmax(0.1*abs(stein_lemma_err)), tf.square(stein_lemma_err), transpose_b=True))
ind_err = tf.sqrt(tf.cast(tf.shape(latent)[0], tf.float32)) * tf.reduce_mean(tf.square(stein_lemma_err))
# nonlin = tf.nn.relu(tf.sign(tf.random_normal((LATENT_DIM,)))*latent - tf.random_normal((LATENT_DIM,)))
# nonlin_mean = tf.reduce_mean(nonlin, reduction_indices=[0], keep_dims=True)
# nonlin_cov = tf.matmul(nonlin, nonlin, transpose_a=True)/tf.cast(tf.shape(latent)[0], tf.float32) - tf.matmul(nonlin_mean, nonlin_mean, transpose_a=True)
# ind_err = tf.reduce_sum(tf.square(nonlin_cov)) - tf.reduce_sum(tf.diag_part(tf.square(nonlin_cov)))
lm.summaries.scalar_summary('ind_err', ind_err)
lm.summaries.image_summary('posterior/mean', tf.reshape(output_mean, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), 10)
cov = tf.matmul(latent, latent, transpose_a=True)/tf.cast(tf.shape(latent)[0], tf.float32)
eye = tf.diag(tf.ones((LATENT_DIM,)))
lm.summaries.image_summary('cov', tf.expand_dims(tf.expand_dims(cov, 0), -1), 1)
lm.summaries.image_summary('cov_error', tf.expand_dims(tf.expand_dims(cov-eye, 0), -1), 1)
weight_decay = sum([tf.reduce_sum(t**2) for t in lm.weight_factory.variables + lm.bias_factory.variables])
if BNAE:
error = squared_error
else:
error = -likelihood_bound
if IND_ERROR:
error += ind_err
return output_mean, output_log_std, error
def prior_model(latent=None): # option to call with latent as numpy array of shape 1xLATENT_DIM
if latent is None:
latent = tf.random_normal((prior_batch_size, LATENT_DIM))
else:
latent = tf.convert_to_tensor(latent, dtype=tf.float32)
output_mean_logit, output_log_std = decoder(latent)
output_mean = tf.nn.sigmoid(output_mean_logit)
sample_image = lm.summaries.image_summary('prior/mean', tf.reshape(output_mean, [-1, IMAGE_SIZE, IMAGE_SIZE, 1]), 10)
return output_mean, output_log_std, sample_image
classifier_logits = classifier(lm_classifier, fed_input_data)
classifier_saver = tf.train.Saver([var for var in tf.trainable_variables() + tf.get_collection('BatchNormInternal') if var != batch])
with tf.name_scope('posterior'):
reconstruction, _, error = full_model(training_batch)
training_merged = lm.summaries.merge_all_summaries()
lm.is_training = False
tf.get_variable_scope().reuse_variables()
with tf.name_scope('prior'):
prior_sample, _, prior_sample_image = prior_model()
lm.summaries.reset()
with tf.name_scope('test'):
test_reconstruction, _, test_error = full_model(fed_input_data)
test_merged = lm.summaries.merge_all_summaries() + lm_classifier.summaries.merge_all_summaries()
saver = tf.train.Saver(tf.trainable_variables() + tf.get_collection('BatchNormInternal'))
learning_rate = tf.train.exponential_decay(FLAGS.learning_rate, batch, 5000, 0.8, staircase=True)
train_step = tf.train.AdamOptimizer(learning_rate).minimize(error, global_step=batch, var_list=lm.filter_factory.variables + lm.weight_factory.variables + lm.bias_factory.variables + lm.scale_factory.variables)
def feed_dict(mode):
"""Make a TensorFlow feed_dict: maps data onto Tensor placeholders."""
if mode == 'test':
return {fed_input_data: mnist.test.images, prior_batch_size: 1000}
else:
return {prior_batch_size: PRIOR_BATCH_SIZE}
with tf.Session() as sess:
train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train', sess.graph)
test_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/test')
sess.run(tf.initialize_all_variables())
sess.run(all_train_data.initializer, feed_dict={all_train_data_initializer: mnist.train.images})
sess.run(tf.initialize_variables(tf.get_collection('BatchNormInternal')))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
restore_latest(classifier_saver, sess, '/tmp/mnist_basic')
runs = []
for _ in range(NUM_RUNS_FOR_ENTROPY_ESTIMATES):
new_output_probs, = sess.run([classifier_logits], feed_dict={fed_input_data: mnist.test.images[:1000, :]})
new_output =
|
numpy.argmax(new_output_probs, 1)
|
numpy.argmax
|
"""
Example 1 - Single Variable Linear Regression
NOTE: The example and sample data is being taken from the "Machine Learning course by <NAME>" in Coursera.
Problem:
Suppose you are selling your house and you
want to know what a good market price would be. One way to do this is to
first collect information on recent houses sold and make a model of housing
prices.
"""
import matplotlib.pyplot as plt
# initial imports
import numpy as np
plt.ion()
from models.linear_regression import compute_cost, gradient_descent
# ----------------Loading X and y matrix ---------------
print('Loading data ...')
data = np.loadtxt('data/ex1data1.txt', delimiter=',')
X = data[:, 0:-1]
y = data[:, -1:]
m = y.shape[0]
# printing first 5 elements
print(X[0:5])
# ----------------Plotting Data-----------------
fig = plt.figure("data")
axes1 = fig.add_subplot()
axes1.scatter(X, y, marker="x", c='r')
# ---------------Cost and Gradient descent------------
# adding bias units to X
X = np.hstack((np.ones([m, 1]), X))
# Some gradient descent settings
iterations = 1500
alpha = 0.01
theta = np.zeros([X.shape[1], 1])
print('\nTesting the cost function ...')
# compute and display initial cost
J = compute_cost(X, y, theta);
print('With theta = [0 ; 0]\nCost computed = {}'.format(J))
print('Expected cost value (approx) 32.07')
# further testing of the cost function
J = compute_cost(X, y, np.array([[-1], [2]]))
print('\nWith theta = [-1 ; 2]\nCost computed = {}'.format(J))
print('Expected cost value (approx) 54.24')
print('\nRunning Gradient Descent ...')
# run gradient descent
theta, _ = gradient_descent(X, y, theta, alpha, iterations)
# print theta to screen
print('Theta found by gradient descent:')
print(theta)
print('Expected theta values (approx)')
print(' [-3.6303\n 1.1664]')
# ---------------- plotting the linear model---------------------------
plt.plot(X[:, 1:], np.dot(X, theta), lw=1)
plt.show()
# ------------------ plotting the J(theta0,theta1)---------------------
print('Visualizing J(theta_0, theta_1) ...\n')
# Grid over which we will calculate J
theta0_vals = np.linspace(-10, 10, 100)
theta1_vals = np.linspace(-1, 4, 100)
# initialize J_vals to a matrix of 0's
J_vals = np.zeros([theta0_vals.size, theta1_vals.size])
# Fill out J_vals
for i in range(theta0_vals.size):
for j in range(theta1_vals.size):
t = np.array([theta0_vals[i], theta1_vals[j]]).reshape(2, 1)
J_vals[i, j] = compute_cost(X, y, t)
# Because of the way meshgrids work in the surf command, we need to
# transpose J_vals before calling surf, or else the axes will be flipped
J_vals = J_vals.T
theta0_vals, theta1_vals =
|
np.meshgrid(theta0_vals, theta1_vals)
|
numpy.meshgrid
|
"""
pyrad.util.radar_utils
======================
Miscellaneous functions dealing with radar data
.. autosummary::
:toctree: generated/
get_data_along_rng
get_data_along_azi
get_data_along_ele
get_ROI
rainfall_accumulation
time_series_statistics
find_contiguous_times
join_time_series
get_range_bins_to_avg
belongs_roi_indices
find_ray_index
find_rng_index
find_ang_index
find_nearest_gate
find_neighbour_gates
find_colocated_indexes
get_target_elevations
time_avg_range
get_closest_solar_flux
get_fixed_rng_data
create_sun_hits_field
create_sun_retrieval_field
compute_quantiles
compute_quantiles_from_hist
compute_quantiles_sweep
compute_histogram
compute_histogram_sweep
get_histogram_bins
compute_2d_stats
compute_1d_stats
compute_2d_hist
quantize_field
compute_profile_stats
compute_directional_stats
project_to_vertical
"""
from warnings import warn
from copy import deepcopy
import datetime
import numpy as np
import scipy
try:
import shapely
_SHAPELY_AVAILABLE = True
except ImportError:
warn('shapely not available')
_SHAPELY_AVAILABLE = False
try:
import pandas as pd
_PANDAS_AVAILABLE = True
except ImportError:
warn('Pandas not available')
_PANDAS_AVAILABLE = False
import pyart
from .stat_utils import quantiles_weighted
def get_data_along_rng(radar, field_name, fix_elevations, fix_azimuths,
ang_tol=1., rmin=None, rmax=None):
"""
Get data at particular (azimuths, elevations)
Parameters
----------
radar : radar object
the radar object where the data is
field_name : str
name of the field to filter
fix_elevations, fix_azimuths: list of floats
List of elevations, azimuths couples [deg]
ang_tol : float
Tolerance between the nominal angle and the radar angle [deg]
rmin, rmax: float
Min and Max range of the obtained data [m]
Returns
-------
xvals : list of float arrays
The ranges of each azi, ele pair
yvals : list of float arrays
The values
valid_azi, valid_ele : float arrays
The azi, ele pairs
"""
if rmin is None:
rmin = 0.
if rmax is None:
rmax = np.max(radar.range['data'])
rng_mask = np.logical_and(
radar.range['data'] >= rmin, radar.range['data'] <= rmax)
x = radar.range['data'][rng_mask]
xvals = []
yvals = []
valid_azi = []
valid_ele = []
if radar.scan_type == 'ppi':
for ele, azi in zip(fix_elevations, fix_azimuths):
ind_sweep = find_ang_index(
radar.fixed_angle['data'], ele, ang_tol=ang_tol)
if ind_sweep is None:
warn('No elevation angle found for fix_elevation '+str(ele))
continue
new_dataset = radar.extract_sweeps([ind_sweep])
try:
dataset_line = pyart.util.cross_section_ppi(
new_dataset, [azi], az_tol=ang_tol)
except EnvironmentError:
warn(' No data found at azimuth '+str(azi) +
' and elevation '+str(ele))
continue
yvals.append(dataset_line.fields[field_name]['data'][0, rng_mask])
xvals.append(x)
valid_azi.append(dataset_line.azimuth['data'][0])
valid_ele.append(dataset_line.elevation['data'][0])
else:
for ele, azi in zip(fix_elevations, fix_azimuths):
ind_sweep = find_ang_index(
radar.fixed_angle['data'], azi, ang_tol=ang_tol)
if ind_sweep is None:
warn('No azimuth angle found for fix_azimuth '+str(azi))
continue
new_dataset = radar.extract_sweeps([ind_sweep])
try:
dataset_line = pyart.util.cross_section_rhi(
new_dataset, [ele], el_tol=ang_tol)
except EnvironmentError:
warn(' No data found at azimuth '+str(azi) +
' and elevation '+str(ele))
continue
yvals.append(
dataset_line.fields[field_name]['data'][0, rng_mask])
xvals.append(x)
valid_azi.append(dataset_line.azimuth['data'][0])
valid_ele.append(dataset_line.elevation['data'][0])
return xvals, yvals, valid_azi, valid_ele
def get_data_along_azi(radar, field_name, fix_ranges, fix_elevations,
rng_tol=50., ang_tol=1., azi_start=None,
azi_stop=None):
"""
Get data at particular (ranges, elevations)
Parameters
----------
radar : radar object
the radar object where the data is
field_name : str
name of the field to filter
fix_ranges, fix_elevations: list of floats
List of ranges [m], elevations [deg] couples
rng_tol : float
Tolerance between the nominal range and the radar range [m]
ang_tol : float
Tolerance between the nominal angle and the radar angle [deg]
azi_start, azi_stop: float
Start and stop azimuth angle of the data [deg]
Returns
-------
xvals : list of float arrays
The ranges of each rng, ele pair
yvals : list of float arrays
The values
valid_rng, valid_ele : float arrays
The rng, ele pairs
"""
if azi_start is None:
azi_start = np.min(radar.azimuth['data'])
if azi_stop is None:
azi_stop = np.max(radar.azimuth['data'])
yvals = []
xvals = []
valid_rng = []
valid_ele = []
for rng, ele in zip(fix_ranges, fix_elevations):
ind_rng = find_rng_index(radar.range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No range gate found for fix_range '+str(rng))
continue
if radar.scan_type == 'ppi':
ind_sweep = find_ang_index(
radar.fixed_angle['data'], ele, ang_tol=ang_tol)
if ind_sweep is None:
warn('No elevation angle found for fix_elevation ' +
str(ele))
continue
new_dataset = radar.extract_sweeps([ind_sweep])
else:
try:
new_dataset = pyart.util.cross_section_rhi(
radar, [ele], el_tol=ang_tol)
except EnvironmentError:
warn(
' No data found at range '+str(rng) +
' and elevation '+str(ele))
continue
if azi_start < azi_stop:
azi_mask = np.logical_and(
new_dataset.azimuth['data'] >= azi_start,
new_dataset.azimuth['data'] <= azi_stop)
else:
azi_mask = np.logical_or(
new_dataset.azimuth['data'] >= azi_start,
new_dataset.azimuth['data'] <= azi_stop)
yvals.append(
new_dataset.fields[field_name]['data'][azi_mask, ind_rng])
xvals.append(new_dataset.azimuth['data'][azi_mask])
valid_rng.append(new_dataset.range['data'][ind_rng])
valid_ele.append(new_dataset.elevation['data'][0])
return xvals, yvals, valid_rng, valid_ele
def get_data_along_ele(radar, field_name, fix_ranges, fix_azimuths,
rng_tol=50., ang_tol=1., ele_min=None,
ele_max=None):
"""
Get data at particular (ranges, azimuths)
Parameters
----------
radar : radar object
the radar object where the data is
field_name : str
name of the field to filter
fix_ranges, fix_azimuths: list of floats
List of ranges [m], azimuths [deg] couples
rng_tol : float
Tolerance between the nominal range and the radar range [m]
ang_tol : float
Tolerance between the nominal angle and the radar angle [deg]
ele_min, ele_max: float
Min and max elevation angle [deg]
Returns
-------
xvals : list of float arrays
The ranges of each rng, ele pair
yvals : list of float arrays
The values
valid_rng, valid_ele : float arrays
The rng, ele pairs
"""
if ele_min is None:
ele_min = np.min(radar.elevation['data'])
if ele_max is None:
ele_max = np.max(radar.elevation['data'])
yvals = []
xvals = []
valid_rng = []
valid_azi = []
for rng, azi in zip(fix_ranges, fix_azimuths):
ind_rng = find_rng_index(radar.range['data'], rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No range gate found for fix_range '+str(rng))
continue
if radar.scan_type == 'ppi':
try:
new_dataset = pyart.util.cross_section_ppi(
radar, [azi], az_tol=ang_tol)
except EnvironmentError:
warn(
' No data found at range '+str(rng) +
' and elevation '+str(azi))
continue
else:
ind_sweep = find_ang_index(
radar.fixed_angle['data'], azi, ang_tol=ang_tol)
if ind_sweep is None:
warn('No azimuth angle found for fix_azimuth '+str(azi))
continue
new_dataset = radar.extract_sweeps([ind_sweep])
ele_mask = np.logical_and(
new_dataset.elevation['data'] >= ele_min,
new_dataset.elevation['data'] <= ele_max)
yvals.append(
new_dataset.fields[field_name]['data'][ele_mask, ind_rng])
xvals.append(new_dataset.elevation['data'][ele_mask])
valid_rng.append(new_dataset.range['data'][ind_rng])
valid_azi.append(new_dataset.elevation['data'][0])
return xvals, yvals, valid_rng, valid_azi
def get_ROI(radar, fieldname, sector):
"""
filter out any data outside the region of interest defined by sector
Parameters
----------
radar : radar object
the radar object where the data is
fieldname : str
name of the field to filter
sector : dict
a dictionary defining the region of interest
Returns
-------
roi_flag : ndarray
a field array with ones in gates that are in the Region of Interest
"""
roi_flag = np.ma.ones((radar.nrays, radar.ngates), dtype=int)
# check for altitude limits
if sector['hmin'] is not None:
roi_flag[radar.gate_altitude['data'] < sector['hmin']] = 0
if sector['hmax'] is not None:
roi_flag[radar.gate_altitude['data'] > sector['hmax']] = 0
# check for range limits
if sector['rmin'] is not None:
roi_flag[:, radar.range['data'] < sector['rmin']] = 0
if sector['rmax'] is not None:
roi_flag[:, radar.range['data'] > sector['rmax']] = 0
# check elevation angle limits
if sector['elmin'] is not None:
roi_flag[radar.elevation['data'] < sector['elmin'], :] = 0
if sector['elmax'] is not None:
roi_flag[radar.elevation['data'] > sector['elmax'], :] = 0
# check min and max azimuth angle
if sector['azmin'] is not None and sector['azmax'] is not None:
if sector['azmin'] <= sector['azmax']:
roi_flag[radar.azimuth['data'] < sector['azmin'], :] = 0
roi_flag[radar.azimuth['data'] > sector['azmax'], :] = 0
if sector['azmin'] > sector['azmax']:
roi_flag[np.logical_and(
radar.azimuth['data'] < sector['azmin'],
radar.azimuth['data'] > sector['azmax']), :] = 0
elif sector['azmin'] is not None:
roi_flag[radar.azimuth['data'] < sector['azmin'], :] = 0
elif sector['azmax'] is not None:
roi_flag[radar.azimuth['data'] > sector['azmax'], :] = 0
return roi_flag
def rainfall_accumulation(t_in_vec, val_in_vec, cum_time=3600.,
base_time=0., dropnan=False):
"""
Computes the rainfall accumulation of a time series over a given period
Parameters
----------
t_in_vec : datetime array
the input date and time array
val_in_vec : float array
the input values array [mm/h]
cum_time : int
accumulation time [s]
base_time : int
base time [s]
dropnan : boolean
if True remove NaN from the time series
Returns
-------
t_out_vec : datetime array
the output date and time array
val_out_vec : float array
the output values array
np_vec : int array
the number of samples at each period
"""
# get the number of samples per interval
t_out_vec, np_vec = time_series_statistics(
t_in_vec, np.ones(len(val_in_vec), dtype=float), avg_time=cum_time,
base_time=base_time, method='sum', dropnan=dropnan)
np_vec[np.isnan(np_vec)] = 0
np_vec = np_vec.astype(int)
t_out_vec, val_out_vec = time_series_statistics(
t_in_vec, val_in_vec, avg_time=cum_time, base_time=base_time,
method='sum', dropnan=dropnan)
t_sample = cum_time/np_vec # find accumulation time of each sample
val_out_vec *= (t_sample/3600.) # conversion to mm in cum_time period
val_out_vec = np.ma.asarray(val_out_vec)
val_out_vec[np.isnan(val_out_vec)] = np.ma.masked
return t_out_vec, val_out_vec, np_vec
def time_series_statistics(t_in_vec, val_in_vec, avg_time=3600,
base_time=1800, method='mean', dropnan=False):
"""
Computes statistics over a time-averaged series. Only of package pandas is
available otherwise returns None
Parameters
----------
t_in_vec : datetime array
the input date and time array
val_in_vec : float array
the input values array
avg_time : int
averaging time [s]
base_time : int
base time [s]
method : str
statistical method
dropnan : boolean
if True remove NaN from the time series
Returns
-------
t_out_vec : datetime array
the output date and time array
val_out_vec : float array
the output values array
"""
if not _PANDAS_AVAILABLE:
warn('Pandas not available. Unable to compute time series statistics')
return None, None
df_in = pd.DataFrame(data=val_in_vec, index=pd.DatetimeIndex(t_in_vec))
df_out = getattr(df_in.resample(
str(avg_time)+'S', closed='right', label='right', base=base_time),
method)()
if dropnan is True:
df_out = df_out.dropna(how='any')
t_out_vec = df_out.index.to_pydatetime()
val_out_vec = df_out.values.flatten()
return t_out_vec, val_out_vec
def find_contiguous_times(times, step=600):
"""
Given and array of ordered times, find those contiguous according to
a maximum time step
Parameters
----------
times : array of datetimes
The array of times
step : float
The time step [s]
Returns
-------
start_times, end_times : array of date times
The start and end of each consecutive time period
"""
run = []
periods = []
expect = None
for time in times:
if expect is None:
run.append(time)
elif time <= expect:
run.append(time)
else:
run = [time]
periods.append(run)
expect = time+datetime.timedelta(seconds=step)
if not periods:
periods = [times]
elif periods[0][0] != times[0]:
periods.insert(0, [times[0]])
print('number of consecutive periods: '+str(len(periods)))
start_times = np.array([], dtype=datetime.datetime)
end_times = np.array([], dtype=datetime.datetime)
for period in periods:
start_times = np.append(
start_times, period[0]-datetime.timedelta(seconds=step))
end_times = np.append(end_times, period[-1])
return start_times, end_times
def join_time_series(t1, val1, t2, val2, dropnan=False):
"""
joins time_series. Only of package pandas is available otherwise returns
None.
Parameters
----------
t1 : datetime array
time of first series
val1 : float array
value of first series
t2 : datetime array
time of second series
val2 : float array
value of second series
dropnan : boolean
if True remove NaN from the time series
Returns
-------
t_out_vec : datetime array
the resultant date time after joining the series
val1_out_vec : float array
value of first series
val2_out_vec : float array
value of second series
"""
if not _PANDAS_AVAILABLE:
warn('Pandas not available. Unable to join time series')
return None, None, None
df1 = pd.DataFrame(data=val1, index=pd.DatetimeIndex(t1))
df2 = pd.DataFrame(data=val2, index=pd.DatetimeIndex(t2))
df_out = pd.concat([df1, df2], join='outer', axis=1)
if dropnan is True:
df_out = df_out.dropna(how='any')
t_out_vec = df_out.index.to_pydatetime()
val1_out_vec = df_out.values[:, 0].flatten()
val2_out_vec = df_out.values[:, 1].flatten()
return t_out_vec, val1_out_vec, val2_out_vec
def get_range_bins_to_avg(rad1_rng, rad2_rng):
"""
Compares the resolution of two radars and determines if and which radar
has to be averaged and the length of the averaging window
Parameters
----------
rad1_rng : array
the range of radar 1
rad2_rng : datetime
the range of radar 2
Returns
-------
avg_rad1, avg_rad2 : Boolean
Booleans specifying if the radar data has to be average in range
avg_rad_lim : array with two elements
the limits to the average (centered on each range gate)
"""
rad1_res = rad1_rng[1]-rad1_rng[0]
rad2_res = rad2_rng[1]-rad2_rng[0]
res_ratio = rad1_res/rad2_res
avg_rad1 = False
avg_rad2 = False
avg_rad_lim = None
if res_ratio > 1.5:
avg_rad2 = True
nbins = int(res_ratio)
if nbins % 2 == 0:
avg_rad_lim = [-int(nbins/2)-1, int(nbins/2)]
else:
avg_rad_lim = [-int((nbins-1)/2), int((nbins-1)/2)]
elif res_ratio < 1./1.5:
avg_rad1 = True
nbins = int(1./res_ratio)
if nbins % 2 == 0:
avg_rad_lim = [-int(nbins/2)-1, int(nbins/2)]
else:
avg_rad_lim = [-int((nbins-1)/2), int((nbins-1)/2)]
return avg_rad1, avg_rad2, avg_rad_lim
def belongs_roi_indices(lat, lon, roi):
"""
Get the indices of points that belong to roi in a list of points
Parameters
----------
lat, lon : float arrays
latitudes and longitudes to check
roi : dict
Dictionary describing the region of interest
Returns
-------
inds : array of ints
list of indices of points belonging to ROI
is_roi : str
Whether the list of points is within the region of interest.
Can be 'All', 'None', 'Some'
"""
if not _SHAPELY_AVAILABLE:
warn('shapely package not available. ' +
'Unable to determine if points belong to Region Of Interest')
return np.asarray([]), 'None'
lon_list = lon.flatten()
lat_list = lat.flatten()
polygon = shapely.geometry.Polygon(list(zip(roi['lon'], roi['lat'])))
points = shapely.geometry.MultiPoint(list(zip(lon_list, lat_list)))
inds = []
if polygon.contains(points):
warn('All points in the region of interest')
is_roi = 'All'
inds = np.indices(np.shape(lon))
elif polygon.disjoint(points):
warn('No points in the region of interest')
is_roi = 'None'
else:
points_roi = points.intersection(polygon)
if points_roi.geom_type == 'Point':
ind = np.where(
np.logical_and(lon == points_roi.x, lat == points_roi.y))
if len(ind) == 1:
ind = ind[0]
inds.extend(ind)
else:
points_roi_list = list(points_roi)
for point in points_roi_list:
ind = np.where(np.logical_and(lon == point.x, lat == point.y))
if len(ind) == 1:
ind = ind[0]
inds.extend(ind)
nroi = len(lat[inds])
npoint = len(lat_list)
warn(str(nroi)+' points out of '+str(npoint) +
' in the region of interest')
is_roi = 'Some'
return np.asarray(inds), is_roi
def find_ray_index(ele_vec, azi_vec, ele, azi, ele_tol=0., azi_tol=0.,
nearest='azi'):
"""
Find the ray index corresponding to a particular elevation and azimuth
Parameters
----------
ele_vec, azi_vec : float arrays
The elevation and azimuth data arrays where to look for
ele, azi : floats
The elevation and azimuth to search
ele_tol, azi_tol : floats
Tolerances [deg]
nearest : str
criteria to define wich ray to keep if multiple rays are within
tolerance. azi: nearest azimuth, ele: nearest elevation
Returns
-------
ind_ray : int
The ray index
"""
ind_ray = np.where(np.logical_and(
np.logical_and(ele_vec <= ele+ele_tol, ele_vec >= ele-ele_tol),
np.logical_and(azi_vec <= azi+azi_tol, azi_vec >= azi-azi_tol)))[0]
if ind_ray.size == 0:
return None
if ind_ray.size == 1:
return ind_ray[0]
if nearest == 'azi':
ind_min = np.argmin(np.abs(azi_vec[ind_ray]-azi))
else:
ind_min = np.argmin(np.abs(ele_vec[ind_ray]-ele))
return ind_ray[ind_min]
def find_rng_index(rng_vec, rng, rng_tol=0.):
"""
Find the range index corresponding to a particular range
Parameters
----------
rng_vec : float array
The range data array where to look for
rng : float
The range to search
rng_tol : float
Tolerance [m]
Returns
-------
ind_rng : int
The range index
"""
dist = np.abs(rng_vec-rng)
ind_rng = np.argmin(dist)
if dist[ind_rng] > rng_tol:
return None
return ind_rng
def find_ang_index(ang_vec, ang, ang_tol=0.):
"""
Find the angle index corresponding to a particular fixed angle
Parameters
----------
ang_vec : float array
The angle data array where to look for
ang : float
The angle to search
ang_tol : float
Tolerance [deg]
Returns
-------
ind_ang : int
The angle index
"""
dist = np.abs(ang_vec-ang)
ind_ang = np.argmin(dist)
if dist[ind_ang] > ang_tol:
return None
return ind_ang
def find_nearest_gate(radar, lat, lon, latlon_tol=0.0005):
"""
Find the radar gate closest to a lat,lon point
Parameters
----------
radar : radar object
the radar object
lat, lon : float
The position of the point
latlon_tol : float
The tolerance around this point
Returns
-------
ind_ray, ind_rng : int
The ray and range index
azi, rng : float
the range and azimuth position of the gate
"""
# find gates close to lat lon point
inds_ray_aux, inds_rng_aux = np.where(np.logical_and(
np.logical_and(
radar.gate_latitude['data'] < lat+latlon_tol,
radar.gate_latitude['data'] > lat-latlon_tol),
np.logical_and(
radar.gate_longitude['data'] < lon+latlon_tol,
radar.gate_longitude['data'] > lon-latlon_tol)))
if inds_ray_aux.size == 0:
warn('No data found at point lat '+str(lat)+' +- ' +
str(latlon_tol)+' lon '+str(lon)+' +- ' +
str(latlon_tol)+' deg')
return None, None, None, None
# find closest latitude
ind_min = np.argmin(np.abs(
radar.gate_latitude['data'][inds_ray_aux, inds_rng_aux]-lat))
ind_ray = inds_ray_aux[ind_min]
ind_rng = inds_rng_aux[ind_min]
azi = radar.azimuth['data'][ind_ray]
rng = radar.range['data'][ind_rng]
return ind_ray, ind_rng, azi, rng
def find_neighbour_gates(radar, azi, rng, delta_azi=None, delta_rng=None):
"""
Find the neighbouring gates within +-delta_azi and +-delta_rng
Parameters
----------
radar : radar object
the radar object
azi, rng : float
The azimuth [deg] and range [m] of the central gate
delta_azi, delta_rng : float
The extend where to look for
Returns
-------
inds_ray_aux, ind_rng_aux : int
The indices (ray, rng) of the neighbouring gates
"""
# find gates close to lat lon point
if delta_azi is None:
inds_ray = np.ma.arange(radar.azimuth['data'].size)
else:
azi_max = azi+delta_azi
azi_min = azi-delta_azi
if azi_max > 360.:
azi_max -= 360.
if azi_min < 0.:
azi_min += 360.
if azi_max > azi_min:
inds_ray = np.where(np.logical_and(
radar.azimuth['data'] < azi_max,
radar.azimuth['data'] > azi_min))[0]
else:
inds_ray = np.where(np.logical_or(
radar.azimuth['data'] > azi_min,
radar.azimuth['data'] < azi_max))[0]
if delta_rng is None:
inds_rng = np.ma.arange(radar.range['data'].size)
else:
inds_rng = np.where(np.logical_and(
radar.range['data'] < rng+delta_rng,
radar.range['data'] > rng-delta_rng))[0]
return inds_ray, inds_rng
def find_colocated_indexes(radar1, radar2, rad1_ele, rad1_azi, rad1_rng,
rad2_ele, rad2_azi, rad2_rng, ele_tol=0.5,
azi_tol=0.5, rng_tol=50.):
"""
Given the theoretical elevation, azimuth and range of the co-located gates
of two radars and a given tolerance returns the indices of the gates for
the current radars
Parameters
----------
radar1, radar2 : radar objects
the two radar objects
rad1_ele, rad1_azi, rad1_rng : array of floats
the radar coordinates of the radar1 gates
rad2_ele, rad2_azi, rad2_rng : array of floats
the radar coordinates of the radar2 gates
ele_tol, azi_tol : floats
azimuth and elevation angle tolerance [deg]
rng_tol : float
range Tolerance [m]
Returns
-------
ind_ray_rad1, ind_rng_rad1, ind_ray_rad2, ind_rng_rad2 : array of ints
the ray and range indexes of each radar gate
"""
ngates = len(rad1_ele)
ind_ray_rad1 = np.ma.masked_all(ngates, dtype=int)
ind_rng_rad1 = np.ma.masked_all(ngates, dtype=int)
ind_ray_rad2 = np.ma.masked_all(ngates, dtype=int)
ind_rng_rad2 = np.ma.masked_all(ngates, dtype=int)
for i in range(ngates):
ind_ray_rad1_aux = find_ray_index(
radar1.elevation['data'], radar1.azimuth['data'], rad1_ele[i],
rad1_azi[i], ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray_rad1_aux is None:
continue
ind_rng_rad1_aux = find_rng_index(
radar1.range['data'], rad1_rng[i], rng_tol=rng_tol)
if ind_rng_rad1_aux is None:
continue
ind_ray_rad2_aux = find_ray_index(
radar2.elevation['data'], radar2.azimuth['data'], rad2_ele[i],
rad2_azi[i], ele_tol=ele_tol, azi_tol=azi_tol)
if ind_ray_rad2_aux is None:
continue
ind_rng_rad2_aux = find_rng_index(
radar2.range['data'], rad2_rng[i], rng_tol=rng_tol)
if ind_rng_rad2_aux is None:
continue
ind_ray_rad1[i] = ind_ray_rad1_aux
ind_rng_rad1[i] = ind_rng_rad1_aux
ind_ray_rad2[i] = ind_ray_rad2_aux
ind_rng_rad2[i] = ind_rng_rad2_aux
ind_ray_rad1 = ind_ray_rad1.compressed()
ind_rng_rad1 = ind_rng_rad1.compressed()
ind_ray_rad2 = ind_ray_rad2.compressed()
ind_rng_rad2 = ind_rng_rad2.compressed()
return ind_ray_rad1, ind_rng_rad1, ind_ray_rad2, ind_rng_rad2
def get_target_elevations(radar_in):
"""
Gets RHI target elevations
Parameters
----------
radar_in : Radar object
current radar object
Returns
-------
target_elevations : 1D-array
Azimuth angles
el_tol : float
azimuth tolerance
"""
sweep_start = radar_in.sweep_start_ray_index['data'][0]
sweep_end = radar_in.sweep_end_ray_index['data'][0]
target_elevations = np.sort(
radar_in.elevation['data'][sweep_start:sweep_end+1])
el_tol = np.median(target_elevations[1:]-target_elevations[:-1])
return target_elevations, el_tol
def time_avg_range(timeinfo, avg_starttime, avg_endtime, period):
"""
finds the new start and end time of an averaging
Parameters
----------
timeinfo : datetime
the current volume time
avg_starttime : datetime
the current average start time
avg_endtime: datetime
the current average end time
period: float
the averaging period
Returns
-------
new_starttime : datetime
the new average start time
new_endtime : datetime
the new average end time
"""
new_starttime = deepcopy(avg_starttime)
new_endtime = deepcopy(avg_endtime)
within_range = False
while not within_range:
if timeinfo > new_endtime:
new_starttime += datetime.timedelta(seconds=period)
new_endtime += datetime.timedelta(seconds=period)
else:
within_range = True
return new_starttime, new_endtime
def get_closest_solar_flux(hit_datetime_list, flux_datetime_list,
flux_value_list):
"""
finds the solar flux measurement closest to the sun hit
Parameters
----------
hit_datetime_list : datetime array
the date and time of the sun hit
flux_datetime_list : datetime array
the date and time of the solar flux measurement
flux_value_list: ndarray 1D
the solar flux values
Returns
-------
flux_datetime_closest_list : datetime array
the date and time of the solar flux measurement closest to sun hit
flux_value_closest_list : ndarray 1D
the solar flux values closest to the sun hit time
"""
flux_datetime_closest_list = list()
flux_value_closest_list = np.ma.masked_all(len(hit_datetime_list))
i = 0
for hit_dt in hit_datetime_list:
flux_datetime_closest = min(
flux_datetime_list, key=lambda x: abs(x-hit_dt))
flux_datetime_closest_list.append(flux_datetime_closest)
# solar flux observation within 24h of sun hit
time_diff = abs(flux_datetime_closest-hit_dt).total_seconds()
if time_diff < 86400.:
ind = flux_datetime_list.index(flux_datetime_closest)
flux_value_closest_list[i] = flux_value_list[ind]
else:
warn('Nearest solar flux observation further than ' +
str(time_diff)+' s in time')
i += 1
return flux_datetime_closest_list, flux_value_closest_list
def get_fixed_rng_data(radar, field_names, fixed_rng, rng_tol=50.,
ele_min=None, ele_max=None, azi_min=None,
azi_max=None):
"""
Creates a 2D-grid with (azi, ele) data at a fixed range
Parameters
----------
radar : radar object
The radar object containing the data
field_name : str
The field name
fixed_rng : float
The fixed range [m]
rng_tol : float
The tolerance between the nominal range and the actual radar range [m]
ele_min, ele_max, azi_min, azi_max : float or None
The limits of the grid [deg]. If None the limits will be the limits
of the radar volume
Returns
-------
radar : radar object
The radar object containing only the desired data
"""
radar_aux = deepcopy(radar)
ind_rng = find_rng_index(
radar_aux.range['data'], fixed_rng, rng_tol=rng_tol)
if ind_rng is None:
warn('No range bin at range '+str(fixed_rng)+' with tolerance ' +
str(rng_tol))
return None, None, None
# Determine angle limits
if radar_aux.scan_type == 'ppi':
if ele_min is None:
ele_min = np.min(radar_aux.fixed_angle['data'])
if ele_max is None:
ele_max = np.max(radar_aux.fixed_angle['data'])
if azi_min is None:
azi_min = np.min(radar_aux.azimuth['data'])
if azi_max is None:
azi_max = np.max(radar_aux.azimuth['data'])
else:
if ele_min is None:
ele_min = np.min(radar_aux.elevation['data'])
if ele_max is None:
ele_max = np.max(radar_aux.elevation['data'])
if azi_min is None:
azi_min = np.min(radar_aux.fixed_angle['data'])
if azi_max is None:
azi_max = np.max(radar_aux.fixed_angle['data'])
if radar_aux.scan_type == 'ppi':
# Get radar elevation angles within limits
ele_vec = np.sort(radar_aux.fixed_angle['data'])
ele_vec = ele_vec[
np.logical_and(ele_vec >= ele_min, ele_vec <= ele_max)]
if ele_vec is None:
warn('No elevation angles between '+str(ele_min)+' and ' +
str(ele_max))
return None, None, None
# get sweeps corresponding to the desired elevation angles
ind_sweeps = []
for ele in ele_vec:
ind_sweeps.append(
np.where(radar_aux.fixed_angle['data'] == ele)[0][0])
radar_aux = radar_aux.extract_sweeps(ind_sweeps)
# Get indices of rays within limits
if azi_min < azi_max:
ind_rays = np.where(np.logical_and(
radar_aux.azimuth['data'] >= azi_min,
radar_aux.azimuth['data'] <= azi_max))[0]
else:
ind_rays = np.where(np.logical_or(
radar_aux.azimuth['data'] >= azi_min,
radar_aux.azimuth['data'] <= azi_max))[0]
else:
# Get radar azimuth angles within limits
azi_vec = radar_aux.fixed_angle['data']
if azi_min < azi_max:
azi_vec = np.sort(azi_vec[
np.logical_and(azi_vec >= azi_min, azi_vec <= azi_max)])
else:
azi_vec = azi_vec[
np.logical_or(azi_vec >= azi_min, azi_vec <= azi_max)]
azi_vec = np.append(
np.sort(azi_vec[azi_vec >= azi_min]),
np.sort(azi_vec[azi_vec < azi_min]))
if azi_vec is None:
warn('No azimuth angles between '+str(azi_min)+' and ' +
str(azi_max))
return None, None, None
# get sweeps corresponding to the desired azimuth angles
ind_sweeps = []
for azi in azi_vec:
ind_sweeps.append(
np.where(radar_aux.fixed_angle['data'] == azi)[0][0])
radar_aux = radar_aux.extract_sweeps(ind_sweeps)
# Get indices of rays within limits
ind_rays = np.where(np.logical_and(
radar_aux.elevation['data'] >= ele_min,
radar_aux.elevation['data'] <= ele_max))[0]
# get new sweep start index and stop index
sweep_start_inds = deepcopy(radar_aux.sweep_start_ray_index['data'])
sweep_end_inds = deepcopy(radar_aux.sweep_end_ray_index['data'])
nrays = 0
for j in range(radar_aux.nsweeps):
# get azimuth indices for this elevation
rays_in_sweep = np.size(
ind_rays[np.logical_and(ind_rays >= sweep_start_inds[j],
ind_rays <= sweep_end_inds[j])])
radar_aux.rays_per_sweep['data'][j] = rays_in_sweep
if j == 0:
radar_aux.sweep_start_ray_index['data'][j] = 0
else:
radar_aux.sweep_start_ray_index['data'][j] = int(
radar_aux.sweep_end_ray_index['data'][j-1]+1)
radar_aux.sweep_end_ray_index['data'][j] = (
radar_aux.sweep_start_ray_index['data'][j]+rays_in_sweep-1)
nrays += rays_in_sweep
# Get new fields
for field_name in field_names:
if field_name not in radar_aux.fields:
warn('Field '+field_name+' not available')
continue
radar_aux.fields[field_name]['data'] = (
radar_aux.fields[field_name]['data'][:, ind_rng])
radar_aux.fields[field_name]['data'] = (
radar_aux.fields[field_name]['data'][ind_rays, np.newaxis])
# Update metadata
radar_aux.time['data'] = radar_aux.time['data'][ind_rays]
radar_aux.range['data'] = np.array([fixed_rng])
radar_aux.azimuth['data'] = radar_aux.azimuth['data'][ind_rays]
radar_aux.elevation['data'] = radar_aux.elevation['data'][ind_rays]
radar_aux.init_gate_x_y_z()
radar_aux.init_gate_longitude_latitude()
radar_aux.init_gate_altitude()
radar_aux.nrays = nrays
radar_aux.ngates = 1
return radar_aux
def create_sun_hits_field(rad_el, rad_az, sun_el, sun_az, data, imgcfg):
"""
creates a sun hits field from the position and power of the sun hits
Parameters
----------
rad_el, rad_az, sun_el, sun_az : ndarray 1D
azimuth and elevation of the radar and the sun respectively in degree
data : masked ndarray 1D
the sun hit data
imgcfg: dict
a dictionary specifying the ranges and resolution of the field to
create
Returns
-------
field : masked ndarray 2D
the sun hit field
"""
if data.compressed().size == 0:
warn('No valid sun hits to plot.')
return None
azmin = imgcfg['azmin']
azmax = imgcfg['azmax']
elmin = imgcfg['elmin']
elmax = imgcfg['elmax']
azres = imgcfg['azres']
elres = imgcfg['elres']
mask = np.ma.getmaskarray(data)
rad_el = rad_el[~mask]
rad_az = rad_az[~mask]
sun_el = sun_el[~mask]
sun_az = sun_az[~mask]
data = data[~mask]
d_el = rad_el-sun_el
d_az = (rad_az-sun_az)*np.cos(sun_el*np.pi/180.)
npix_az = int((azmax-azmin)/azres)
npix_el = int((elmax-elmin)/elres)
field = np.ma.masked_all((npix_az, npix_el))
ind_az = ((d_az+azmin)/azres).astype(int)
ind_el = ((d_el+elmin)/elres).astype(int)
field[ind_az, ind_el] = data
return field
def create_sun_retrieval_field(par, field_name, imgcfg, lant=0.):
"""
creates a sun retrieval field from the retrieval parameters
Parameters
----------
par : ndarray 1D
the 5 retrieval parameters
imgcfg: dict
a dictionary specifying the ranges and resolution of the field to
create
Returns
-------
field : masked ndarray 2D
the sun retrieval field
"""
azmin = imgcfg['azmin']
azmax = imgcfg['azmax']
elmin = imgcfg['elmin']
elmax = imgcfg['elmax']
azres = imgcfg['azres']
elres = imgcfg['elres']
npix_az = int((azmax-azmin)/azres)
npix_el = int((elmax-elmin)/elres)
field = np.ma.masked_all((npix_az, npix_el))
d_az = np.array(np.array(range(npix_az))*azres+azmin)
d_el = np.array(np.array(range(npix_el))*elres+elmin)
d_az_mat = np.broadcast_to(d_az.reshape(npix_az, 1), (npix_az, npix_el))
d_el_mat = np.broadcast_to(d_el.reshape(1, npix_el), (npix_az, npix_el))
field = (par[0]+par[1]*d_az_mat+par[2]*d_el_mat+par[3]*d_az_mat*d_az_mat +
par[4]*d_el_mat*d_el_mat)
if field_name in ('sun_est_power_h', 'sun_est_power_v'):
# account for polarization of the antenna and scanning losses
field += 3.+lant
return field
def compute_quantiles(field, quantiles=None):
"""
computes quantiles
Parameters
----------
field : ndarray 2D
the radar field
ray_start, ray_end : int
starting and ending ray indexes
quantiles: float array
list of quantiles to compute
Returns
-------
quantiles : float array
list of quantiles
values : float array
values at each quantile
"""
if quantiles is None:
quantiles = [10., 20., 30., 40., 50., 60., 70., 80., 90., 95.]
warn('No quantiles have been defined. Default ' + str(quantiles) +
' will be used')
nquantiles = len(quantiles)
values = np.ma.masked_all(nquantiles)
data_valid = field.compressed()
if
|
np.size(data_valid)
|
numpy.size
|
# change lists to arrays for performance --> Missing
from sudokuField import field as baseField
import numpy as np
import colored
import math
import time
import os
update = True
field = baseField.copy()
length = len(field)
size = int(math.sqrt(length))
index = 0
cF_field = []
for j in [0, 3, 6, 27, 30, 33, 54, 57, 60]: # hard coded :( --> Missing
temp = []
temp.append(j)
for i, x in enumerate(range(1, 9)):
if x % 3 == 0:
n = temp[i] + 7
else:
n = temp[i] + 1
temp.append(n)
cF_field.append(temp)
cF_field = np.array(cF_field)
cF_field = np.resize(cF_field, (9, 9))
def isPossible(index, nr):
# checks if n can be put into field[index] using sudoku rules
ud_index = 0
for i, x in enumerate(range(9, length+1, size)):
# (x-size < index < x)
# output x: 9, 18, 27, 36, 45, 54,, 63, 72, 81
if index > x-size and index < x:
ud_index = i
break
if index < 9:
lr_index = index
else:
lr_index = int(index/9) if ud_index==0 else index - 9 * ud_index
if index%9==0:
# why do I have to do this???
lr_index, ud_index = ud_index, lr_index
cF_index = 0
for i, x in enumerate(cF_field):
if index in x:
cF_index = i
temp_field = np.array(field, dtype=np.int32)
temp_lr = np.resize([temp_field[x::9] for x in range(9)], (size, size))
lr = temp_lr[lr_index]
temp_ud = np.resize(temp_field, (size, size))
ud = temp_ud[ud_index]
cF = np.array([], dtype=np.int32)
for x in cF_field[cF_index]:
cF = np.append(cF, field[x])
# print("Testing:", nr, "index:", index, lr_index, lr, ud_index, ud, cF_index, cF)
temp = np.concatenate((lr, ud, cF)) # a list containing all numbers intresting to sudoku rules
temp = temp[temp!=0] # delete all zeros
return False if nr in temp else True
def last_number(index):
# returns last index that is 0 in baseField
while True:
index -= 1
# print(index, baseField[index])
if baseField[index] == 0:
if field[index] != 9:
return index
else:
field[index] = 0
def change_number(n):
global index
breaking = False
color_temp = "green"
for x in range(n+1, 10):
output = "Increase the number by 1"
if isPossible(index, x):
# x is working
color_temp = "green"
output = "Changing index {}: {} -> {}".format(index, field[index], x)
field[index] = x # --> colored output
index += 1
n = 0
breaking = True
elif x == 9:
# no hits for x: backtracking
color_temp = "red"
output = "Backtracking at:", index
field[index] = 0
index = last_number(index)
change_number(field[index])
breaking = True
else:
field[index] = x
temp_field = np.resize(field, (9, 9))
# print(output)
# print(temp_field) # better looking output --> Missing
for col_index, x in enumerate(temp_field):
for row_index, i in enumerate(x):
current_index = col_index*9+row_index
if current_index != index:
color = "white"
else:
color = color_temp
print(colored.stylize(i, colored.fg(color)), end=" ")
print()
if update: time.sleep(.0075) # timedelta?
if update: os.system("cls")
if breaking: break
while True:
if index > 80:
break
if baseField[index] == 0:
n = 0
change_number(n)
else:
index += 1 # index=index+1 if BaseField[index]!=0 else index
baseField =
|
np.resize(baseField, (9,9))
|
numpy.resize
|
import numpy as np
import torch
def plot_train_inout(x, mels, y, y_hat, fig_path):
import matplotlib.pyplot as plt
batch = 0
f, axarr = plt.subplots(4, 1)
axarr[0].plot(x[batch].cpu().numpy())
axarr[0].set_title('Original input audio')
axarr[1].imshow(mels[batch].cpu().numpy().T)
axarr[1].set_title('Original input mel spec')
axarr[2].plot(y[batch].cpu().numpy())
axarr[2].set_title('Original output audio')
axarr[3].plot(y_hat[batch].cpu().numpy())
axarr[3].set_title('Predicted output audio')
plt.tight_layout()
plt.savefig(fig_path)
def num_params_count(model):
parameters = filter(lambda p: p.requires_grad, model.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1000000
return parameters
def num_params(model):
print('Trainable Parameters: %.3f million' % num_params_count(model))
# for mulaw encoding and decoding in torch tensors, modified from: https://github.com/pytorch/audio/blob/master/torchaudio/transforms.py
def mulaw_quantize(x, quantization_channels=256):
"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int): Number of channels. default: 256
"""
mu = quantization_channels - 1
if isinstance(x, np.ndarray):
x_mu = np.sign(x) * np.log1p(mu *
|
np.abs(x)
|
numpy.abs
|
import os, sys, random, itertools, json
dir_path = os.path.dirname(os.path.realpath(__file__))
import scipy.stats
from scipy.stats import ks_2samp
from collections import defaultdict, Counter
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import igraph as ig
import seaborn as sns
# import networkx as nx
from matplotlib.ticker import ScalarFormatter
import matplotlib
matplotlib.rcParams['figure.figsize'] = (6.0, 4.0)
DEL_THRESHOLD = 0
sns.set_context(
"talk",
font_scale=1,
rc={
"lines.linewidth": 2.5,
"text.usetex": False,
"font.family": 'serif',
"font.serif": ['Palatino'],
"font.size": 16
})
sns.set_style('white')
################
# MISC
################
get_full_path = lambda p: os.path.join(os.path.dirname(os.path.abspath(__file__)), p)
def ig_to_nx(ig_graph, directed=False, nodes=None):
g = nx.DiGraph() if directed else nx.Graph()
nodes = nodes if nodes else ig_graph.vs
edges = ig_graph.induced_subgraph(nodes).es if nodes else ig_graph.es
for node in nodes: g.add_node(node.index, **node.attributes())
for edge in edges: g.add_edge(edge.source, edge.target)
return g
def _get_deg_dist(degree, del_threshold=DEL_THRESHOLD):
counter = Counter(degree)
for k, v in counter.items():
if v <= del_threshold: del counter[k]
deg, freq = map(np.array, zip(*sorted(counter.items())))
prob = freq/float(np.sum(freq))
return deg, prob
def renormalize_deg_dist(xk, pk, xk2):
xk_, pk_ = [], []
xk2_set = set(xk2)
for x,p in zip(xk,pk):
if x in xk2_set:
xk_.append(x);pk_.append(p)
xk_, pk_ = map(np.array, [xk_,pk_])
pk_ /= np.sum(pk_)
return xk_, pk_
def get_avg_nbor_indeg(graph, out_to_in=True):
"""Avg nbor indegree given node outdegree"""
d = defaultdict(list)
for node in graph.vs:
deg = node.outdegree() if out_to_in else node.indegree()
for nbor in node.neighbors(mode='OUT'):
d[deg].append(nbor.indegree())
d = {k:np.mean(v) for k,v in dict(d).items()}
return map(np.array, zip(*sorted(d.items())))
def compute_indeg_map(graph, time_attr):
src_tgt_indeg_map = defaultdict(dict)
for node in graph.vs:
nid = node.index
nbors = [n for n in node.neighbors(mode='IN') if n[time_attr] == n[time_attr]]
nbors = sorted(nbors, key=lambda n: n[time_attr])
for indeg, nbor in enumerate(nbors): src_tgt_indeg_map[nbor.index][nid] = indeg
return dict(src_tgt_indeg_map)
def get_chunk_degree_sequence(graph, time_attr, return_keys=True, use_median=False, get_int=True):
outdegs, chunks = defaultdict(list), defaultdict(int)
for node in graph.vs:
time, outdeg = node[time_attr], node.outdegree()
outdegs[time].append(outdeg)
chunks[time] += 1
f = np.median if use_median else np.mean
outdegs = {k: int(round(f(v))) if get_int else f(v) for k,v in outdegs.items() if k in chunks}
skeys = sorted(chunks.keys())
chunks = [chunks[k] for k in skeys]
outdegs = [outdegs[k] for k in skeys]
if return_keys: return skeys, chunks, outdegs
return chunks, outdegs
def get_config_model(g):
config = ig.Graph.Degree_Sequence(g.vs.outdegree(), g.vs.indegree())
for attr in g.vs.attributes(): config.vs[attr] = g.vs[attr]
return config
def get_time_config_model(g, time_attr, debug=False):
"""
configuration model controlled for time
node u cannot link to nodes that join network in year > year_u
"""
indeg_map = dict(zip(g.vs.indices, g.vs.indegree()))
outdeg_map = dict(zip(g.vs.indices, g.vs.outdegree()))
year_nodes_map = defaultdict(list)
for node in g.vs: year_nodes_map[node[time_attr]].append(node.index)
years = sorted(year_nodes_map.keys())
instubs, new_edges = [], []
if debug: print ("{} time_attr vals".format(len(years)))
for year in years:
if debug: print (year, end=' ')
new_nodes = year_nodes_map[year]
for nid in new_nodes: instubs.extend([nid]*indeg_map[nid])
random.shuffle(instubs)
for nid in new_nodes:
for _ in xrange(outdeg_map[nid]):
new_edges.append((nid, instubs.pop()))
config = g.copy()
config.delete_edges(None)
config.add_edges(new_edges)
return config
def bin_clustering(cc, step=0.005):
cc = pd.Series(cc)
bins = np.arange(0, 1+step, step)
cc = pd.cut(cc, bins=bins, right=False).value_counts()
cc = cc.astype(float)/np.sum(cc)
cc.index = np.arange(0, 1, step) #+ step/2.
return cc
def clustering(graph, undirected_max=False, get_indeg=False, del_threshold=DEL_THRESHOLD):
N = len(graph.vs)
E_counter = [0.]*N
# "parents" map
target_map = defaultdict(set)
for nhood in graph.neighborhood(mode='OUT'):
target_map[nhood[0]] = set(nhood[1:])
# increment edge counter of common parents
for edge in graph.es:
n1, n2 = edge.source, edge.target
common_targets = target_map[n1].intersection(target_map[n2])
for tgt in common_targets: E_counter[tgt] += 1
cc, nids, degs = [], [], []
for e, indeg, nid in zip(E_counter, graph.indegree(), graph.vs.indices):
if indeg <= 1: continue
degs.append(indeg)
cc.append(e/(indeg*(indeg-1)))
nids.append(nid)
cc, nids, degs = map(np.array, [cc, nids, degs])
cc, nids, degs = cc[degs>del_threshold], nids[degs>del_threshold], degs[degs>del_threshold]
if undirected_max: cc *= 2
if get_indeg: return cc, nids, degs
return cc, nids
def ego_clustering(graph, attr_name, same_attr=True, undirected_max=False, get_indeg=False, del_threshold=DEL_THRESHOLD):
N = len(graph.vs)
E_counter = [0.]*N
nid_attr_map = dict(zip(graph.vs.indices, graph.vs[attr_name]))
# "parents" map
target_map = defaultdict(set)
for nhood in graph.neighborhood(mode='OUT'):
target_map[nhood[0]] = set(nhood[1:])
# increment edge counter of common parents
for edge in graph.es:
n1, n2 = edge.source, edge.target
same_attr_val = nid_attr_map[n1] == nid_attr_map[n2]
if same_attr != same_attr_val: continue
common_targets = target_map[n1].intersection(target_map[n2])
for tgt in common_targets: E_counter[tgt] += 1
cc, nids, degs = [], [], []
for e, indeg, nid in zip(E_counter, graph.indegree(), graph.vs.indices):
if indeg <= 1: continue
degs.append(indeg)
cc.append(e/(indeg*(indeg-1)))
nids.append(nid)
cc, nids, degs = map(np.array, [cc, nids, degs])
cc, nids, degs = cc[degs>del_threshold], nids[degs>del_threshold], degs[degs>del_threshold]
if undirected_max: cc *= 2
if get_indeg: return cc, nids, degs
return cc, nids
def avg_clustering_degree(graph, undirected_max=False, del_threshold=DEL_THRESHOLD, data=None):
if data: cc, nids, indeg = data
else: cc, nids, indeg = clustering(graph, undirected_max=undirected_max, get_indeg=True)
indeg_map = defaultdict(list)
for cc_, indeg_ in zip(cc, indeg): indeg_map[indeg_].append(cc_)
indeg_map = {k:
|
np.mean(v)
|
numpy.mean
|
from mock import patch
import pyCGM_Single.pyCGM as pyCGM
import pytest
import numpy as np
rounding_precision = 6
class TestUpperBodyAxis():
"""
This class tests the upper body axis functions in pyCGM.py:
headJC
thoraxJC
findshoulderJC
shoulderAxisCalc
elbowJointCenter
wristJointCenter
handJointCenter
"""
nan_3d = [np.nan, np.nan, np.nan]
rand_coor = [np.random.randint(0, 10), np.random.randint(0, 10), np.random.randint(0, 10)]
@pytest.mark.parametrize(["frame", "vsk", "expected"], [
# Test from running sample data
({'LFHD': np.array([184.55158997, 409.68713379, 1721.34289551]), 'RFHD': np.array([325.82983398, 402.55450439, 1722.49816895]), 'LBHD': np.array([197.8621521 , 251.28889465, 1696.90197754]), 'RBHD': np.array([304.39898682, 242.91339111, 1694.97497559])},
{'HeadOffset': 0.2571990469310653},
[[[255.21685582510975, 407.11593887758056, 1721.8253843887082], [254.19105385179665, 406.146809183757, 1721.9176771191715], [255.19034370229795, 406.2160090443217, 1722.9159912851449]], [255.19071197509766, 406.1208190917969, 1721.9205322265625]]),
# Basic test with a variance of 1 in the x and y dimensions of the markers
({'LFHD': np.array([1, 1, 0]), 'RFHD': np.array([0, 1, 0]), 'LBHD': np.array([1, 0, 0]), 'RBHD': np.array([0, 0, 0])},
{'HeadOffset': 0.0},
[[[0.5, 2, 0], [1.5, 1, 0], [0.5, 1, -1]], [0.5, 1, 0]]),
# Setting the markers so there's no variance in the x-dimension
({'LFHD': np.array([0, 1, 0]), 'RFHD': np.array([0, 1, 0]), 'LBHD': np.array([0, 0, 0]), 'RBHD': np.array([0, 0, 0])},
{'HeadOffset': 0.0},
[[nan_3d, nan_3d, nan_3d], [0, 1, 0]]),
# Setting the markers so there's no variance in the y-dimension
({'LFHD': np.array([1, 0, 0]), 'RFHD': np.array([0, 0, 0]), 'LBHD': np.array([1, 0, 0]), 'RBHD': np.array([0, 0, 0])},
{'HeadOffset': 0.0},
[[nan_3d, nan_3d, nan_3d], [0.5, 0, 0]]),
# Setting each marker in a different xy quadrant
({'LFHD': np.array([-1, 1, 0]), 'RFHD': np.array([1, 1, 0]), 'LBHD': np.array([-1, -1, 0]), 'RBHD': np.array([1, -1, 0])},
{'HeadOffset': 0.0},
[[[0, 2, 0], [-1, 1, 0], [0, 1, 1]], [0, 1, 0]]),
# Setting values of the markers so that midpoints will be on diagonals
({'LFHD': np.array([-2, 1, 0]), 'RFHD': np.array([1, 2, 0]), 'LBHD': np.array([-1, -2, 0]), 'RBHD': np.array([2, -1, 0])},
{'HeadOffset': 0.0},
[[[-0.81622777, 2.4486833 , 0], [-1.4486833, 1.18377223, 0], [-0.5, 1.5, 1]], [-0.5, 1.5, 0]]),
# Adding the value of 1 in the z dimension for all 4 markers
({'LFHD': np.array([1, 1, 1]), 'RFHD': np.array([0, 1, 1]), 'LBHD': np.array([1, 0, 1]), 'RBHD': np.array([0, 0, 1])},
{'HeadOffset': 0.0},
[[[0.5, 2, 1], [1.5, 1, 1], [0.5, 1, 0]], [0.5, 1, 1]]),
# Setting the z dimension value higher for LFHD and LBHD
({'LFHD': np.array([1, 1, 2]), 'RFHD': np.array([0, 1, 1]), 'LBHD': np.array([1, 0, 2]), 'RBHD': np.array([0, 0, 1])},
{'HeadOffset': 0.0},
[[[0.5, 2, 1.5], [1.20710678, 1, 2.20710678], [1.20710678, 1, 0.79289322]], [0.5, 1, 1.5]]),
# Setting the z dimension value higher for LFHD and RFHD
({'LFHD': np.array([1, 1, 2]), 'RFHD': np.array([0, 1, 2]), 'LBHD': np.array([1, 0, 1]), 'RBHD': np.array([0, 0, 1])},
{'HeadOffset': 0.0},
[[[0.5, 1.70710678, 2.70710678], [1.5, 1, 2], [0.5, 1.70710678, 1.29289322]], [0.5, 1, 2]]),
# Adding a value for HeadOffset
({'LFHD': np.array([1, 1, 0]), 'RFHD': np.array([0, 1, 0]), 'LBHD': np.array([1, 0, 0]), 'RBHD': np.array([0, 0, 0])},
{'HeadOffset': 0.5},
[[[0.5, 1.87758256, 0.47942554], [1.5, 1, 0], [0.5, 1.47942554, -0.87758256]], [0.5, 1, 0]]),
# Testing that when frame is a list of ints and headOffset is an int
({'LFHD': [1, 1, 0], 'RFHD': [0, 1, 0], 'LBHD': [1, 0, 0], 'RBHD': [0, 0, 0]},
{'HeadOffset': 1.0},
[[[0.5, 1.5403023058681398, 0.8414709848078965], [1.5, 1, 0], [0.5, 1.8414709848078965, -0.5403023058681398]], [0.5, 1, 0]]),
# Testing that when frame is a numpy array of ints and headOffset is an int
({'LFHD': np.array([1, 1, 0], dtype='int'), 'RFHD': np.array([0, 1, 0], dtype='int'),
'LBHD': np.array([1, 0, 0], dtype='int'), 'RBHD': np.array([0, 0, 0], dtype='int')},
{'HeadOffset': 1},
[[[0.5, 1.5403023058681398, 0.8414709848078965], [1.5, 1, 0], [0.5, 1.8414709848078965, -0.5403023058681398]], [0.5, 1, 0]]),
# Testing that when frame is a list of floats and headOffset is a float
({'LFHD': [1.0, 1.0, 0.0], 'RFHD': [0.0, 1.0, 0.0], 'LBHD': [1.0, 0.0, 0.0], 'RBHD': [0.0, 0.0, 0.0]},
{'HeadOffset': 1.0},
[[[0.5, 1.5403023058681398, 0.8414709848078965], [1.5, 1, 0], [0.5, 1.8414709848078965, -0.5403023058681398]], [0.5, 1, 0]]),
# Testing that when frame is a numpy array of floats and headOffset is a float
({'LFHD': np.array([1.0, 1.0, 0.0], dtype='float'), 'RFHD': np.array([0.0, 1.0, 0.0], dtype='float'),
'LBHD': np.array([1.0, 0.0, 0.0], dtype='float'), 'RBHD': np.array([0.0, 0.0, 0.0], dtype='float')},
{'HeadOffset': 1.0},
[[[0.5, 1.5403023058681398, 0.8414709848078965], [1.5, 1, 0], [0.5, 1.8414709848078965, -0.5403023058681398]], [0.5, 1, 0]])])
def test_headJC(self, frame, vsk, expected):
"""
This test provides coverage of the headJC function in pyCGM.py, defined as headJC(frame, vsk)
This test takes 3 parameters:
frame: dictionary of marker lists
vsk: dictionary containing subject measurements from a VSK file
expected: the expected result from calling headJC on frame and vsk
The function uses the LFHD, RFHD, LBHD, and RBHD markers from the frame to calculate the midpoints of the front, back, left, and right center positions of the head.
The head axis vector components are then calculated using the aforementioned midpoints.
Afterwords, the axes are made orthogonal by calculating the cross product of each individual axis.
Finally, the head axis is then rotated around the y axis based off the head offset angle in the VSK.
This test is checking to make sure the head joint center and head joint axis are calculated correctly given
the 4 coordinates given in frame. This includes testing when there is no variance in the coordinates,
when the coordinates are in different quadrants, when the midpoints will be on diagonals, and when the z
dimension is variable. It also checks to see the difference when a value is set for HeadOffSet in vsk.
Lastly, it checks that the resulting output is correct when frame is a list of ints, a numpy array of
ints, a list of floats, and a numpy array of floats and when headOffset is an int and a float.
"""
result = pyCGM.headJC(frame, vsk)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["frame", "expected"], [
# Test from running sample data
({'C7': np.array([251.22619629, 229.75683594, 1533.77624512]), 'T10': np.array([228.64323425, 192.32041931, 1279.6418457]), 'CLAV': np.array([256.78051758, 371.28042603, 1459.70300293]), 'STRN': np.array([251.67492676, 414.10391235, 1292.08508301])},
[[[256.23991128535846, 365.30496976939753, 1459.662169500559], [257.1435863244796, 364.21960599061947, 1459.588978712983], [256.0843053658035, 364.32180498523223, 1458.6575930699294]], [256.149810236564, 364.3090603933987, 1459.6553639290375]]),
# Basic test with a variance of 1 in the x and y dimensions of the markers
({'C7': np.array([1, 1, 0]), 'T10': np.array([0, 1, 0]), 'CLAV': np.array([1, 0, 0]), 'STRN': np.array([0, 0, 0])},
[[[1, 6, 0], [1, 7, 1], [0, 7, 0]], [1, 7, 0]]),
# Setting the markers so there's no variance in the x-dimension
({'C7': np.array([0, 1, 0]), 'T10': np.array([0, 1, 0]), 'CLAV': np.array([0, 0, 0]), 'STRN': np.array([0, 0, 0])},
[[nan_3d, nan_3d, nan_3d], nan_3d]),
# Setting the markers so there's no variance in the y-dimension
({'C7': np.array([1, 0, 0]), 'T10': np.array([0, 0, 0]), 'CLAV': np.array([1, 0, 0]), 'STRN': np.array([0, 0, 0])},
[[nan_3d, nan_3d, nan_3d], nan_3d]),
# Setting each marker in a different xy quadrant
({'C7': np.array([-1, 1, 0]), 'T10': np.array([1, 1, 0]), 'CLAV': np.array([-1, -1, 0]), 'STRN': np.array([1, -1, 0])},
[[[-1, 5, 0], [-1, 6, -1], [0, 6, 0]], [-1, 6, 0]]),
# Setting values of the markers so that midpoints will be on diagonals
({'C7': np.array([-2, 1, 0]), 'T10': np.array([1, 2, 0]), 'CLAV': np.array([-1, -2, 0]), 'STRN': np.array([2, -1, 0])},
[[[-2.8973666, 3.69209979, 0], [-3.21359436, 4.64078309, -1], [-2.26491106, 4.95701085, 0]], [-3.21359436, 4.64078309, 0]]),
# Adding the value of 1 in the z dimension for all 4 markers
({'C7': np.array([1, 1, 1]), 'T10': np.array([0, 1, 1]), 'CLAV': np.array([1, 0, 1]), 'STRN': np.array([0, 0, 1])},
[[[1, 6, 1], [1, 7, 2], [0, 7, 1]], [1, 7, 1]]),
# Setting the z dimension value higher for C7 and CLAV
({'C7': np.array([1, 1, 2]), 'T10': np.array([0, 1, 1]), 'CLAV': np.array([1, 0, 2]), 'STRN': np.array([0, 0, 1])},
[[[1, 6, 2], [0.29289322, 7, 2.70710678], [0.29289322, 7, 1.29289322]], [1, 7, 2]]),
# Setting the z dimension value higher for C7 and T10
({'C7': np.array([1, 1, 2]), 'T10': np.array([0, 1, 2]), 'CLAV': np.array([1, 0, 1]), 'STRN': np.array([0, 0, 1])},
[[[1, 4.24264069, 5.24264069], [1, 4.24264069, 6.65685425], [0, 4.94974747, 5.94974747]], [1, 4.94974747, 5.94974747]]),
# Testing that when frame is a list of ints
({'C7': [1, 1, 2], 'T10': [0, 1, 2], 'CLAV': [1, 0, 1], 'STRN': [0, 0, 1]},
[[[1, 4.24264069, 5.24264069], [1, 4.24264069, 6.65685425], [0, 4.94974747, 5.94974747]],
[1, 4.94974747, 5.94974747]]),
# Testing that when frame is a numpy array of ints
({'C7': np.array([1, 1, 2], dtype='int'), 'T10': np.array([0, 1, 2], dtype='int'),
'CLAV': np.array([1, 0, 1], dtype='int'), 'STRN': np.array([0, 0, 1], dtype='int')},
[[[1, 4.24264069, 5.24264069], [1, 4.24264069, 6.65685425], [0, 4.94974747, 5.94974747]],
[1, 4.94974747, 5.94974747]]),
# Testing that when frame is a list of floats
({'C7': [1.0, 1.0, 2.0], 'T10': [0.0, 1.0, 2.0], 'CLAV': [1.0, 0.0, 1.0], 'STRN': [0.0, 0.0, 1.0]},
[[[1, 4.24264069, 5.24264069], [1, 4.24264069, 6.65685425], [0, 4.94974747, 5.94974747]],
[1, 4.94974747, 5.94974747]]),
# Testing that when frame is a numpy array of floats
({'C7': np.array([1.0, 1.0, 2.0], dtype='float'), 'T10': np.array([0.0, 1.0, 2.0], dtype='float'),
'CLAV': np.array([1.0, 0.0, 1.0], dtype='float'), 'STRN': np.array([0.0, 0.0, 1.0], dtype='float')},
[[[1, 4.24264069, 5.24264069], [1, 4.24264069, 6.65685425], [0, 4.94974747, 5.94974747]],
[1, 4.94974747, 5.94974747]])])
def test_thoraxJC(self, frame, expected):
"""
This test provides coverage of the thoraxJC function in pyCGM.py, defined as thoraxJC(frame)
This test takes 2 parameters:
frame: dictionary of marker lists
expected: the expected result from calling thoraxJC on frame
The function uses the CLAV, C7, STRN, and T10 markers from the frame to calculate the midpoints of the front, back, left, and right center positions of the thorax.
The thorax axis vector components are then calculated using subtracting the pairs (left to right, back to front) of the aforementioned midpoints.
Afterwords, the axes are made orthogonal by calculating the cross product of each individual axis.
Finally, the head axis is then rotated around the x axis based off the thorax offset angle in the VSK.
This test is checking to make sure the thorax joint center and thorax joint axis are calculated correctly given
the 4 coordinates given in frame. This includes testing when there is no variance in the coordinates,
when the coordinates are in different quadrants, when the midpoints will be on diagonals, and when the z
dimension is variable. Lastly, it checks that the resulting output is correct when frame is a list of ints, a
numpy array of ints, a list of floats, and a numpy array of floats.
"""
result = pyCGM.thoraxJC(frame)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["frame", "thorax", "wand", "vsk", "expected_args"], [
# Test from running sample data
({'RSHO': np.array([428.88476562, 270.552948, 1500.73010254]), 'LSHO': np.array([68.24668121, 269.01049805, 1510.1072998])},
[[[256.23991128535846, 365.30496976939753, 1459.662169500559], [257.1435863244796, 364.21960599061947, 1459.588978712983], [256.0843053658035, 364.32180498523223, 1458.6575930699294]], [256.149810236564, 364.3090603933987, 1459.6553639290375]],
[[255.92550222678443, 364.3226950497605, 1460.6297868417887], [256.42380097331767, 364.27770361353487, 1460.6165849382387]],
{'RightShoulderOffset': 40.0, 'LeftShoulderOffset': 40.0},
[[[255.92550222678443, 364.3226950497605, 1460.6297868417887], [256.149810236564, 364.3090603933987, 1459.6553639290375], np.array([ 428.88476562, 270.552948 , 1500.73010254]), 47.0],
[[256.42380097331767, 364.27770361353487, 1460.6165849382387], [256.149810236564, 364.3090603933987, 1459.6553639290375], np.array([68.24668121, 269.01049805, 1510.1072998]), 47.0]]),
# Basic test with zeros for all params
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0])},
[[rand_coor, rand_coor, rand_coor], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]],
{'RightShoulderOffset': 0.0, 'LeftShoulderOffset': 0.0},
[[[0, 0, 0], [0, 0, 0], np.array([0, 0, 0]), 7.0],
[[0, 0, 0], [0, 0, 0], np.array([0, 0, 0]), 7.0]]),
# Testing when values are added to RSHO and LSHO
({'RSHO': np.array([2, -1, 3]), 'LSHO': np.array([-3, 1, 2])},
[[rand_coor, rand_coor, rand_coor], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]],
{'RightShoulderOffset': 0.0, 'LeftShoulderOffset': 0.0},
[[[0, 0, 0], [0, 0, 0], np.array([2, -1, 3]), 7.0],
[[0, 0, 0], [0, 0, 0], np.array([-3, 1, 2]), 7.0]]),
# Testing when a value is added to thorax_origin
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0])},
[[rand_coor, rand_coor, rand_coor], [5, -2, 7]],
[[0, 0, 0], [0, 0, 0]],
{'RightShoulderOffset': 0.0, 'LeftShoulderOffset': 0.0},
[[[0, 0, 0], [5, -2, 7], np.array([0, 0, 0]), 7.0],
[[0, 0, 0], [5, -2, 7], np.array([0, 0, 0]), 7.0]]),
# Testing when a value is added to wand
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0])},
[[rand_coor, rand_coor, rand_coor], [0, 0, 0]],
[[2, 6, -4], [-3, 5, 2]],
{'RightShoulderOffset': 0.0, 'LeftShoulderOffset': 0.0},
[[[2, 6, -4], [0, 0, 0], np.array([0, 0, 0]), 7.0],
[[-3, 5, 2], [0, 0, 0], np.array([0, 0, 0]), 7.0]]),
# Testing when values are added to RightShoulderOffset and LeftShoulderOffset
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0])},
[[rand_coor, rand_coor, rand_coor], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]],
{'RightShoulderOffset': 20.0, 'LeftShoulderOffset': -20.0},
[[[0, 0, 0], [0, 0, 0], np.array([0, 0, 0]), 27.0],
[[0, 0, 0], [0, 0, 0], np.array([0, 0, 0]), -13.0]]),
# Adding when values are added to all params
({'RSHO': np.array([3, -5, 2]), 'LSHO': np.array([-7, 3 , 9])},
[[rand_coor, rand_coor, rand_coor], [-1, -9, -5]],
[[-7, -1, 5], [5, -9, 2]],
{'RightShoulderOffset': -6.0, 'LeftShoulderOffset': 42.0},
[[[-7, -1, 5], [-1, -9, -5], np.array([3, -5, 2]), 1.0],
[[5, -9, 2], [-1, -9, -5], np.array([-7, 3 , 9]), 49.0]]),
# Testing that when frame, thorax, wand and vsk are lists of ints
({'RSHO': [3, -5, 2], 'LSHO': [-7, 3, 9]},
[[rand_coor, rand_coor, rand_coor], [-1, -9, -5]],
[[-7, -1, 5], [5, -9, 2]],
{'RightShoulderOffset': -6, 'LeftShoulderOffset': 42},
[[[-7, -1, 5], [-1, -9, -5], np.array([3, -5, 2]), 1.0],
[[5, -9, 2], [-1, -9, -5], np.array([-7, 3, 9]), 49.0]]),
# Testing that when frame, wand and vsk are numpy arrays of ints
({'RSHO': np.array([3, -5, 2], dtype='int'), 'LSHO': np.array([-7, 3, 9], dtype='int')},
[[rand_coor, rand_coor, rand_coor], np.array([-1, -9, -5], dtype='int')],
np.array([[-7, -1, 5], [5, -9, 2]], dtype='int'),
{'RightShoulderOffset': -6, 'LeftShoulderOffset': 42},
[[[-7, -1, 5], [-1, -9, -5], np.array([3, -5, 2]), 1.0],
[[5, -9, 2], [-1, -9, -5], np.array([-7, 3, 9]), 49.0]]),
# Testing that when frame, thorax, wand and vsk are lists of floats
({'RSHO': [3.0, -5.0, 2.0], 'LSHO': [-7.0, 3.0, 9.0]},
[[rand_coor, rand_coor, rand_coor], [-1.0, -9.0, -5.0]],
[[-7.0, -1.0, 5.0], [5.0, -9.0, 2.0]],
{'RightShoulderOffset': -6.0, 'LeftShoulderOffset': 42.0},
[[[-7, -1, 5], [-1, -9, -5], np.array([3, -5, 2]), 1.0],
[[5, -9, 2], [-1, -9, -5], np.array([-7, 3, 9]), 49.0]]),
# Testing that when frame, wand and vsk are numpy arrays of floats
({'RSHO': np.array([3.0, -5.0, 2.0], dtype='float'), 'LSHO': np.array([-7.0, 3.0, 9.0], dtype='float')},
[[rand_coor, rand_coor, rand_coor], np.array([-1.0, -9.0, -5.0], dtype='float')],
np.array([[-7.0, -1.0, 5.0], [5.0, -9.0, 2.0]], dtype='float'),
{'RightShoulderOffset': -6.0, 'LeftShoulderOffset': 42.0},
[[[-7, -1, 5], [-1, -9, -5], np.array([3, -5, 2]), 1.0],
[[5, -9, 2], [-1, -9, -5], np.array([-7, 3, 9]), 49.0]])])
def test_findshoulderJC(self, frame, thorax, wand, vsk, expected_args):
"""
This test provides coverage of the findshoulderJC function in pyCGM.py, defined as findshoulderJC(frame, thorax, wand, vsk)
This test takes 5 parameters:
frame: dictionary of marker lists
thorax: array containing several x,y,z markers for the thorax
wand: array containing two x,y,z markers for wand
vsk: dictionary containing subject measurements from a VSK file
expected_args: the expected arguments used to call the mocked function, findJointC
The function uses the RSHO and LSHO markers from the frame given, as well as the thorax origin position and the wand.
The right shoulder joint center by using the the RSHO marker, right wand position, and thorax origin position, as positions in a
plane for the Rodriques' rotation formula to find the right shoulder joint center. It is the same for the left shoulder joint center,
although the left wand and LSHO markers are used instead.
This test is checking to make sure the shoulder joint center is calculated correctly given the input parameters.
This tests mocks findJointC to make sure the correct parameters are being passed into it given the parameters
passed into findshoulderJC.
Lastly, it checks that the resulting output is correct when frame and wand are a list of ints, a
numpy array of ints, a list of floats, and a numpy array of floats, vsk values are either an int or a float,
and thorax values are either an int or a float. Thorax cannot be a numpy array due it not being shaped like
a multi-dimensional array.
"""
rand_coor = [np.random.randint(0, 10), np.random.randint(0, 10), np.random.randint(0, 10)]
with patch.object(pyCGM, 'findJointC', return_value=rand_coor) as mock_findJointC:
result = pyCGM.findshoulderJC(frame, thorax, wand, vsk)
# Asserting that there were only 2 calls to findJointC
np.testing.assert_equal(mock_findJointC.call_count, 2)
# Asserting that the correct params were sent in the 1st (right) call to findJointC
np.testing.assert_almost_equal(expected_args[0][0], mock_findJointC.call_args_list[0][0][0], rounding_precision)
np.testing.assert_almost_equal(expected_args[0][1], mock_findJointC.call_args_list[0][0][1], rounding_precision)
np.testing.assert_almost_equal(expected_args[0][2], mock_findJointC.call_args_list[0][0][2], rounding_precision)
np.testing.assert_almost_equal(expected_args[0][3], mock_findJointC.call_args_list[0][0][3], rounding_precision)
# Asserting that the correct params were sent in the 2nd (left) call to findJointC
np.testing.assert_almost_equal(expected_args[1][0], mock_findJointC.call_args_list[1][0][0], rounding_precision)
np.testing.assert_almost_equal(expected_args[1][1], mock_findJointC.call_args_list[1][0][1], rounding_precision)
np.testing.assert_almost_equal(expected_args[1][2], mock_findJointC.call_args_list[1][0][2], rounding_precision)
np.testing.assert_almost_equal(expected_args[1][3], mock_findJointC.call_args_list[1][0][3], rounding_precision)
# Asserting that findShoulderJC returned the correct result given the return value given by mocked findJointC
np.testing.assert_almost_equal(result[0], rand_coor, rounding_precision)
np.testing.assert_almost_equal(result[1], rand_coor, rounding_precision)
@pytest.mark.parametrize(["thorax", "shoulderJC", "wand", "expected"], [
# Test from running sample data
([[[256.23991128535846, 365.30496976939753, 1459.662169500559], [257.1435863244796, 364.21960599061947, 1459.588978712983], [256.0843053658035, 364.32180498523223, 1458.6575930699294]], [256.149810236564, 364.3090603933987, 1459.6553639290375]],
[np.array([429.66951995, 275.06718615, 1453.95397813]), np.array([64.51952734, 274.93442161, 1463.6313334 ])],
[[255.92550222678443, 364.3226950497605, 1460.6297868417887], [256.42380097331767, 364.27770361353487, 1460.6165849382387]],
[[np.array([429.66951995, 275.06718615, 1453.95397813]), np.array([64.51952734, 274.93442161, 1463.6313334 ])],
[[[430.12731330596756, 275.9513661907463, 1454.0469882869343], [429.6862168456729, 275.1632337671314, 1452.9587414419757], [428.78061812142147, 275.5243518770602, 1453.9831850281803]],
[[64.10400324869988, 275.83192826468195, 1463.7790545425955], [64.59882848203122, 274.80838068265837, 1464.620183745389], [65.42564601518438, 275.3570272042577, 1463.6125331307376]]]]),
# Test with zeros for all params
([[rand_coor, rand_coor, rand_coor], [0, 0, 0]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[0, 0, 0], [0, 0, 0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]]]),
# Testing when adding values in thorax but zeros for all other params
([[rand_coor, rand_coor, rand_coor], [8, 2, -6]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[0, 0, 0], [0, 0, 0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[nan_3d, nan_3d, [0.78446454, 0.19611614, -0.58834841]],
[nan_3d, nan_3d, [0.78446454, 0.19611614, -0.58834841]]]]),
# Testing when adding values in shoulderJC but zeros for all other params
([[rand_coor, rand_coor, rand_coor], [0, 0, 0]],
[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[0, 0, 0], [0, 0, 0]],
[[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[nan_3d, nan_3d, [0.830969149054, 4.154845745271, -2.4929074471]],
[nan_3d, nan_3d, [0.0, -8.02381293981, 1.783069542181]]]]),
# Testing when adding values in wand but zeros for all other params
([[rand_coor, rand_coor, rand_coor], [0, 0, 0]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[1, 0, -7], [-3, 5, 3]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]]]),
# Testing when adding values to thorax and shoulderJC
([[rand_coor, rand_coor, rand_coor], [8, 2, -6]],
[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[0, 0, 0], [0, 0, 0]],
[[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[[0.50428457, 4.62821343, -3.78488277], [1.15140320, 5.85290468, -3.49963055], [1.85518611, 4.63349167, -3.36650833]],
[[-0.5611251741, -9.179560055, 1.191979749], [-0.65430149, -8.305871473, 2.3001252440], [0.5069794004, -8.302903324, 1.493020599]]]]),
# Testing when adding values to thorax and wand
([[rand_coor, rand_coor, rand_coor], [8, 2, -6]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[1, 0, -7], [-3, 5, 3]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-0.269430125, 0.96225044, -0.03849001], [0.55859, 0.18871284, 0.80769095], [0.78446454, 0.19611614, -0.58834841]],
[[-0.6130824329, 0.10218040549, -0.7833831087], [-0.09351638899, 0.9752423423, 0.20039226212], [0.7844645405, 0.19611613513, -0.5883484054]]]]),
# Testing when adding values to shoulderJC and wand
([[rand_coor, rand_coor, rand_coor], [0, 0, 0]],
[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[1, 0, -7], [-3, 5, 3]],
[[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[[1.98367400, 4.88758011, -2.85947514], [0.93824211, 5.52256679, -2.14964131], [0.83096915, 4.15484575, -2.49290745]],
[[-0.80094836, -9.12988352, 1.41552417], [-0.59873343, -8.82624991, 2.78187543], [0.0, -8.02381294, 1.78306954]]]]),
# Testing when adding values to thorax, shoulderJC and wand
([[rand_coor, rand_coor, rand_coor], [8, 2, -6]],
[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[1, 0, -7], [-3, 5, 3]],
[[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[[0.93321781, 5.62330046, -3.77912558], [1.51400083, 5.69077360, -2.49143833], [1.85518611, 4.63349167, -3.36650833]],
[[-0.64460664, -9.08385127, 1.24009787], [-0.57223612, -8.287942994, 2.40684228], [0.50697940, -8.30290332, 1.4930206]]]]),
# Testing that when thorax, shoulderJC, and wand are lists of ints
([[rand_coor, rand_coor, rand_coor], [8, 2, -6]],
[[1, 5, -3], [0, -9, 2]],
[[1, 0, -7], [-3, 5, 3]],
[[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[[0.93321781, 5.62330046, -3.77912558], [1.51400083, 5.69077360, -2.49143833],
[1.85518611, 4.63349167, -3.36650833]],
[[-0.64460664, -9.08385127, 1.24009787], [-0.57223612, -8.287942994, 2.40684228],
[0.50697940, -8.30290332, 1.4930206]]]]),
# Testing that when thorax, shoulderJC and wand are numpy arrays of ints
([[rand_coor, rand_coor, rand_coor], np.array([8, 2, -6], dtype='int')],
np.array([np.array([1, 5, -3], dtype='int'), np.array([0, -9, 2], dtype='int')], dtype='int'),
np.array([np.array([1, 0, -7], dtype='int'), np.array([-3, 5, 3], dtype='int')], dtype='int'),
[[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[[0.93321781, 5.62330046, -3.77912558], [1.51400083, 5.69077360, -2.49143833],
[1.85518611, 4.63349167, -3.36650833]],
[[-0.64460664, -9.08385127, 1.24009787], [-0.57223612, -8.287942994, 2.40684228],
[0.50697940, -8.30290332, 1.4930206]]]]),
# Testing that when thorax, shoulderJC and wand are lists of floats
([[rand_coor, rand_coor, rand_coor], [8.0, 2.0, -6.0]],
[[1.0, 5.0, -3.0], [0.0, -9.0, 2.0]],
[[1.0, 0.0, -7.0], [-3.0, 5.0, 3.0]],
[[np.array([1, 5, -3]), np.array([0, -9, 2])],
[[[0.93321781, 5.62330046, -3.77912558], [1.51400083, 5.69077360, -2.49143833],
[1.85518611, 4.63349167, -3.36650833]],
[[-0.64460664, -9.08385127, 1.24009787], [-0.57223612, -8.287942994, 2.40684228],
[0.50697940, -8.30290332, 1.4930206]]]]),
# Testing that when thorax, shoulderJC and wand are numpy arrays of floats
([[rand_coor, rand_coor, rand_coor], np.array([8.0, 2.0, -6.0], dtype='float')],
np.array([np.array([1.0, 5.0, -3.0], dtype='float'), np.array([0.0, -9.0, 2.0], dtype='float')], dtype='float'),
np.array([np.array([1.0, 0.0, -7.0], dtype='float'), np.array([-3.0, 5.0, 3.0], dtype='float')], dtype='float'),
[[np.array([1.0, 5.0, -3.0]), np.array([0.0, -9.0, 2.0])],
[[[0.93321781, 5.62330046, -3.77912558], [1.51400083, 5.69077360, -2.49143833],
[1.85518611, 4.63349167, -3.36650833]],
[[-0.64460664, -9.08385127, 1.24009787], [-0.57223612, -8.287942994, 2.40684228],
[0.50697940, -8.30290332, 1.4930206]]]])])
def test_shoulderAxisCalc(self, thorax, shoulderJC, wand, expected):
"""
This test provides coverage of the shoulderAxisCalc function in pyCGM.py, defined as shoulderAxisCalc(frame, thorax, shoulderJC, wand)
This test takes 4 parameters:
thorax: array containing several x,y,z markers for the thorax
shoulderJC: array containing x,y,z position of the shoulder joint center
wand: array containing two x,y,z markers for wand
expected: the expected result from calling shoulderAxisCalc on thorax, shoulderJC, and wand
For the left and right shoulder axis, the respective axis is calculated by taking the difference from the respective direction (left or right) and the throax origin.
The difference is then used to get the direction of each respective shoulder joint center for each shoulder axis in the order of Z, X, Y.
The direction is then applied backwords to each shoulder joint center to account for variations in marker sizes.
Lastly, it checks that the resulting output is correct when shoulderJC and wand are a list of ints, a
numpy array of ints, a list of floats, and a numpy array of floats, and thorax values are either an int or a
float. Thorax cannot be a numpy array due it not being shaped like a multi-dimensional array.
"""
result = pyCGM.shoulderAxisCalc(None, thorax, shoulderJC, wand)
np.testing.assert_almost_equal(result[0], expected[0], rounding_precision)
np.testing.assert_almost_equal(result[1], expected[1], rounding_precision)
@pytest.mark.parametrize(["frame", "thorax", "shoulderJC", "vsk", "mockReturnVal", "expectedMockArgs", "expected"], [
# Test from running sample data
({'RSHO': np.array([428.88476562, 270.552948, 1500.73010254]), 'LSHO': np.array([68.24668121, 269.01049805, 1510.1072998]),
'RELB': np.array([658.90338135, 326.07580566, 1285.28515625]), 'LELB': np.array([-156.32162476, 335.2583313, 1287.39916992]),
'RWRA': np.array([ 776.51898193, 495.68103027, 1108.38464355]), 'RWRB': np.array([ 830.9072876 , 436.75341797, 1119.11901855]),
'LWRA': np.array([-249.28146362, 525.32977295, 1117.09057617]), 'LWRB': np.array([-311.77532959, 477.22512817, 1125.1619873 ])},
[[rand_coor, [257.1435863244796, 364.21960599061947, 1459.588978712983], rand_coor],
[256.149810236564, 364.3090603933987, 1459.6553639290375]],
[np.array([429.66951995, 275.06718615, 1453.95397813]), np.array([64.51952734, 274.93442161, 1463.6313334])],
{'RightElbowWidth': 74.0, 'LeftElbowWidth': 74.0, 'RightWristWidth': 55.0, 'LeftWristWidth': 55.0},
[[633.66707588, 304.95542115, 1256.07799541], [-129.16952219, 316.8671644, 1258.06440717]],
[[[429.7839232523795, 96.8248244295684, 904.5644429627703], [429.66951995, 275.06718615, 1453.95397813], [658.90338135, 326.07580566, 1285.28515625], -44.0],
[[-409.6146956013004, 530.6280208729519, 1671.682014527917], [64.51952734, 274.93442161, 1463.6313334], [-156.32162476, 335.2583313, 1287.39916992], 44.0]],
[[np.array([633.66707587, 304.95542115, 1256.07799541]),
np.array([-129.16952218, 316.8671644, 1258.06440717])],
[[[633.8107013869995, 303.96579004975194, 1256.07658506845], [634.3524799178464, 305.0538658933253, 1256.799473014224], [632.9532180390149, 304.85083190737765, 1256.770431750491]],
[[-129.32391792749496, 315.8807291324946, 1258.008662931836], [-128.45117135279028, 316.79382333592827, 1257.3726028780698], [-128.49119037560908, 316.72030884193634, 1258.7843373067021]]],
[[793.3281430325068, 451.2913478825204, 1084.4325513020426], [-272.4594189740742, 485.801522109477, 1091.3666238350822]]]),
# Test with zeros for all params
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0]), 'RELB': np.array([0, 0, 0]), 'LELB': np.array([0, 0, 0]),
'RWRA': np.array([0, 0, 0]), 'RWRB': np.array([0, 0, 0]), 'LWRA': np.array([0, 0, 0]), 'LWRB': np.array([0, 0, 0])},
[[rand_coor, [0, 0, 0], rand_coor], [0, 0, 0]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
{'RightElbowWidth': 0.0, 'LeftElbowWidth': 0.0, 'RightWristWidth': 0.0, 'LeftWristWidth': 0.0},
[[0, 0, 0], [0, 0, 0]],
[[nan_3d, [0, 0, 0], [0, 0, 0], -7.0], [nan_3d, [0, 0, 0], [0, 0, 0], 7.0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])], [[nan_3d, nan_3d, nan_3d], [nan_3d, nan_3d, nan_3d]], [nan_3d, nan_3d]]),
# Testing when values are added to frame
({'RSHO': np.array([9, -7, -6]), 'LSHO': np.array([3, -8, 5]), 'RELB': np.array([-9, 1, -4]), 'LELB': np.array([-4, 1, -6]),
'RWRA': np.array([2, -3, 9]), 'RWRB': np.array([-4, -2, -7]), 'LWRA': np.array([-9, 1, -1]), 'LWRB': np.array([-3, -4, -9])},
[[rand_coor, [0, 0, 0], rand_coor], [0, 0, 0]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
{'RightElbowWidth': 0.0, 'LeftElbowWidth': 0.0, 'RightWristWidth': 0.0, 'LeftWristWidth': 0.0},
[[0, 0, 0], [0, 0, 0]],
[[[149.87576359540907, -228.48721408225754, -418.8422716102348], [0, 0, 0], [-9, 1, -4], -7.0], [[282.73117218166414, -326.69276820761615, -251.76957615571214], [0, 0, 0], [-4, 1, -6], 7.0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[nan_3d, nan_3d, nan_3d],
[nan_3d, nan_3d, nan_3d]],
[[4.7413281, -5.7386979, -1.35541665], [-4.96790631, 4.69256216, -8.09628108]]]),
# Testing when values are added to thorax
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0]), 'RELB': np.array([0, 0, 0]), 'LELB': np.array([0, 0, 0]),
'RWRA': np.array([0, 0, 0]), 'RWRB': np.array([0, 0, 0]), 'LWRA': np.array([0, 0, 0]), 'LWRB': np.array([0, 0, 0])},
[[rand_coor, [-9, 5, -5], rand_coor], [-5, -2, -3]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
{'RightElbowWidth': 0.0, 'LeftElbowWidth': 0.0, 'RightWristWidth': 0.0, 'LeftWristWidth': 0.0},
[[0, 0, 0], [0, 0, 0]],
[[nan_3d, [0, 0, 0], [0, 0, 0], -7.0],
[nan_3d, [0, 0, 0], [0, 0, 0], 7.0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])], [[nan_3d, nan_3d, nan_3d], [nan_3d, nan_3d, nan_3d]], [nan_3d, nan_3d]]),
# Testing when values are added to shoulderJC
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0]), 'RELB': np.array([0, 0, 0]), 'LELB': np.array([0, 0, 0]),
'RWRA': np.array([0, 0, 0]), 'RWRB': np.array([0, 0, 0]), 'LWRA': np.array([0, 0, 0]), 'LWRB': np.array([0, 0, 0])},
[[rand_coor, [0, 0, 0], rand_coor], [0, 0, 0]],
[np.array([-2, -8, -3]), np.array([5, -3, 2])],
{'RightElbowWidth': 0.0, 'LeftElbowWidth': 0.0, 'RightWristWidth': 0.0, 'LeftWristWidth': 0.0},
[[0, 0, 0], [0, 0, 0]],
[[nan_3d, [-2, -8, -3], [0, 0, 0], -7.0],
[nan_3d, [5, -3, 2], [0, 0, 0], 7.0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[nan_3d, nan_3d, [-0.2279211529192759, -0.9116846116771036, -0.3418817293789138]],
[nan_3d, nan_3d, [0.8111071056538127, -0.48666426339228763, 0.3244428422615251]]],
[nan_3d, nan_3d]]),
# Testing when values are added to vsk
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0]), 'RELB': np.array([0, 0, 0]), 'LELB': np.array([0, 0, 0]),
'RWRA': np.array([0, 0, 0]), 'RWRB': np.array([0, 0, 0]), 'LWRA': np.array([0, 0, 0]), 'LWRB': np.array([0, 0, 0])},
[[rand_coor, [0, 0, 0], rand_coor], [0, 0, 0]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
{'RightElbowWidth': -38.0, 'LeftElbowWidth': 6.0, 'RightWristWidth': 47.0, 'LeftWristWidth': -7.0},
[[0, 0, 0], [0, 0, 0]],
[[nan_3d, [0, 0, 0], [0, 0, 0], 12.0],
[nan_3d, [0, 0, 0], [0, 0, 0], 10.0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])], [[nan_3d, nan_3d, nan_3d], [nan_3d, nan_3d, nan_3d]], [nan_3d, nan_3d]]),
# Testing when values are added to mockReturnVal
({'RSHO': np.array([0, 0, 0]), 'LSHO': np.array([0, 0, 0]), 'RELB': np.array([0, 0, 0]), 'LELB': np.array([0, 0, 0]),
'RWRA': np.array([0, 0, 0]), 'RWRB': np.array([0, 0, 0]), 'LWRA': np.array([0, 0, 0]), 'LWRB': np.array([0, 0, 0])},
[[rand_coor, [0, 0, 0], rand_coor], [0, 0, 0]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
{'RightElbowWidth': 0.0, 'LeftElbowWidth': 0.0, 'RightWristWidth': 0.0, 'LeftWristWidth': 0.0},
[[5, 4, -4], [6, 3, 5]],
[[nan_3d, [0, 0, 0], [0, 0, 0], -7.0],
[nan_3d, [0, 0, 0], [0, 0, 0], 7.0]],
[[np.array([5, 4, -4]), np.array([6, 3, 5])],
[[nan_3d, nan_3d, [4.337733821467478, 3.4701870571739826, -3.4701870571739826]],
[nan_3d, nan_3d, [5.2828628343993635, 2.6414314171996818, 4.4023856953328036]]],
[nan_3d, nan_3d]]),
# Testing when values are added to frame and thorax
({'RSHO': np.array([9, -7, -6]), 'LSHO': np.array([3, -8, 5]), 'RELB': np.array([-9, 1, -4]), 'LELB': np.array([-4, 1, -6]),
'RWRA': np.array([2, -3, 9]), 'RWRB': np.array([-4, -2, -7]), 'LWRA': np.array([-9, 1, -1]), 'LWRB': np.array([-3, -4, -9])},
[[rand_coor, [-9, 5, -5], rand_coor], [-5, -2, -3]],
[np.array([0, 0, 0]), np.array([0, 0, 0])],
{'RightElbowWidth': 0.0, 'LeftElbowWidth': 0.0, 'RightWristWidth': 0.0, 'LeftWristWidth': 0.0},
[[0, 0, 0], [0, 0, 0]],
[[[149.87576359540907, -228.48721408225754, -418.8422716102348], [0, 0, 0], [-9, 1, -4], -7.0],
[[282.73117218166414, -326.69276820761615, -251.76957615571214], [0, 0, 0], [-4, 1, -6], 7.0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])], [[nan_3d, nan_3d, nan_3d], [nan_3d, nan_3d, nan_3d]],
[[4.7413281, -5.7386979, -1.35541665], [-4.96790631, 4.69256216, -8.09628108]]]),
# Testing when values are added to frame, thorax, and shoulderJC
({'RSHO': np.array([9, -7, -6]), 'LSHO': np.array([3, -8, 5]), 'RELB': np.array([-9, 1, -4]), 'LELB': np.array([-4, 1, -6]),
'RWRA': np.array([2, -3, 9]), 'RWRB': np.array([-4, -2, -7]), 'LWRA': np.array([-9, 1, -1]), 'LWRB': np.array([-3, -4, -9])},
[[rand_coor, [-9, 5, -5], rand_coor], [-5, -2, -3]],
[np.array([-2, -8, -3]), np.array([5, -3, 2])],
{'RightElbowWidth': 0.0, 'LeftElbowWidth': 0.0, 'RightWristWidth': 0.0, 'LeftWristWidth': 0.0},
[[0, 0, 0], [0, 0, 0]],
[[[-311.42865408643604, -195.76081109238007, 342.15327877363165], [-2, -8, -3], [-9, 1, -4], -7.0],
[[183.9753004933977, -292.7114070209339, -364.32791656553934], [5, -3, 2], [-4, 1, -6], 7.0]],
[[np.array([0, 0, 0]), np.array([0, 0, 0])],
[[[-0.9661174276011973, 0.2554279765068226, -0.03706298561739535], [0.12111591199009825, 0.3218504585188577, -0.9390118307103527], [-0.2279211529192759, -0.9116846116771036, -0.3418817293789138]],
[[-0.40160401780320154, -0.06011448807273248, 0.9138383123989052], [-0.4252287337918506, -0.8715182976051595, -0.24420561192811296], [0.8111071056538127, -0.48666426339228763, 0.3244428422615251]]],
[[4.7413281, -5.7386979, -1.35541665], [-4.96790631, 4.69256216, -8.09628108]]]),
# Testing when values are added to frame, thorax, shoulderJC, and vsk
({'RSHO': np.array([9, -7, -6]), 'LSHO': np.array([3, -8, 5]), 'RELB': np.array([-9, 1, -4]), 'LELB': np.array([-4, 1, -6]),
'RWRA': np.array([2, -3, 9]), 'RWRB': np.array([-4, -2, -7]), 'LWRA': np.array([-9, 1, -1]), 'LWRB': np.array([-3, -4, -9])},
[[rand_coor, [-9, 5, -5], rand_coor], [-5, -2, -3]],
[
|
np.array([-2, -8, -3])
|
numpy.array
|
import anndata
import dask.array
import h5py
import numpy as np
import os
import pytest
import scipy.sparse
from sfaira.data import load_store
from sfaira.unit_tests.data_for_tests.loaders import PrepareData
@pytest.mark.parametrize("store_format", ["h5ad", "dao", "anndata"])
def test_fatal(store_format: str):
"""
Test if basic methods of stores abort.
"""
if store_format == "anndata":
stores = PrepareData().prepare_store_anndata()
else:
store_path = PrepareData().prepare_store(store_format=store_format)
stores = load_store(cache_path=store_path, store_format=store_format)
stores.subset(attr_key="organism", values=["Mus musculus"])
store = stores.stores["Mus musculus"]
# Test both single and multi-store:
for x in [store, stores]:
_ = x.n_obs
_ = x.n_vars
_ = x.var_names
_ = x.shape
_ = x.indices
_ = x.genome_container
@pytest.mark.parametrize("store_format", ["h5ad", "dao"])
def test_config(store_format: str):
"""
Test that data set config files can be set, written and recovered.
"""
store_path = PrepareData().prepare_store(store_format=store_format)
config_path = os.path.join(store_path, "config_lung")
store = load_store(cache_path=store_path, store_format=store_format)
store.subset(attr_key="organism", values=["Mus musculus"])
store.subset(attr_key="assay_sc", values=["10x technology"])
store.write_config(fn=config_path)
store2 = load_store(cache_path=store_path, store_format=store_format)
store2.load_config(fn=config_path + ".pickle")
assert np.all(store.indices.keys() == store2.indices.keys())
assert np.all([np.all(store.indices[k] == store2.indices[k])
for k in store.indices.keys()])
@pytest.mark.parametrize("store_format", ["h5ad", "dao"])
def test_store_data(store_format: str):
"""
Test if the data exposed by the store are the same as in the original Dataset instance after streamlining.
"""
data = PrepareData()
# Run standard streamlining workflow on dsg and compare to object relayed via store.
# Prepare dsg.
dsg = data.prepare_dsg(load=True)
# Prepare store.
# Rewriting store to avoid mismatch of randomly generated data in cache and store.
store_path = data.prepare_store(store_format=store_format, rewrite=False, rewrite_store=True)
store = load_store(cache_path=store_path, store_format=store_format)
store.subset(attr_key="doi_journal", values=["no_doi_mock1"])
dataset_id = store.adata_by_key[list(store.indices.keys())[0]].uns["id"]
adata_store = store.adata_by_key[dataset_id]
x_store = store.data_by_key[dataset_id]
adata_ds = dsg.datasets[dataset_id].adata
x_ds = adata_ds.X.todense()
if isinstance(x_store, dask.array.Array):
x_store = x_store.compute()
if isinstance(x_store, h5py.Dataset):
# Need to load sparse matrix into memory if it comes from a backed anndata object.
x_store = x_store[:, :]
if isinstance(x_store, anndata._core.sparse_dataset.SparseDataset):
# Need to load sparse matrix into memory if it comes from a backed anndata object.
x_store = x_store[:, :]
if isinstance(x_store, scipy.sparse.csr_matrix):
x_store = x_store.todense()
if isinstance(x_ds, anndata._core.sparse_dataset.SparseDataset):
# Need to load sparse matrix into memory if it comes from a backed anndata object.
x_ds = x_ds[:, :]
if isinstance(x_ds, scipy.sparse.csr_matrix):
x_ds = x_ds.todense()
# Check that non-zero elements are the same:
assert x_store.shape[0] == x_ds.shape[0]
assert x_store.shape[1] == x_ds.shape[1]
assert np.all(np.where(x_store > 0)[0] == np.where(x_ds > 0)[0]), (np.sum(x_store > 0), np.sum(x_ds > 0))
assert np.all(np.where(x_store > 0)[1] == np.where(x_ds > 0)[1]), (np.sum(x_store > 0), np.sum(x_ds > 0))
assert np.all(x_store - x_ds == 0.), (
|
np.sum(x_store)
|
numpy.sum
|
import matplotlib.pyplot as plt
import numpy as np
"""
This code computes and plots the efficacy of contact tracing
as a function of app uptake among Android and iOS users.
The model for tracing efficacy is outline in the Corona
paper draft, and the probabilities of detecting a contact
are taken from Smittestopp data.
"""
#probabilities of detecting a contact, i = iOS, a = Android:
#p_ii = 0.54; p_ai = 0.53; p_ia = 0.53; p_aa = 0.74
p_ii = 0.54; p_ai = 0.53; p_ia = 0.53; p_aa = 0.74
#p_ia = prob. that a contact between iOS and Android is detected by iOS
n = 31
a_i = np.linspace(0,1,n)
a_a = np.linspace(0,1,n)
#market share iOS, should be replaced with real data:
phi = 0.5
#create 2D arrays with identical rows/columns:
c_ii = np.outer(a_i*a_i*phi**2,
|
np.ones(n)
|
numpy.ones
|
"""
retrieved from https://github.com/princewen/tensorflow_practice/blob/master/RL/Basic-MADDPG-Demo/replay_buffer.py
"""
import numpy as np
import random
class ReplayBuffer(object):
def __init__(self,size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def add(self,obs_t,action_agent, action_others, reward,obs_tp1,done):
"""add a experince into storage"""
data = (obs_t, action_agent, action_others, reward, obs_tp1, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, agent_actions, other_actions, rewards, obses_tp1, dones = [], [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, other_action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
agent_actions.append(np.array(action, copy=False))
other_actions.append(np.array(other_action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return
|
np.array(obses_t)
|
numpy.array
|
import sys
sys.path.append('..')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from chebyspectral import *
import time
deg = 24 # degree of Chebyshev polynomial
# Source term
f = lambda xq, yq: 0*xq*yq
fHat = chebfit2d(f, deg)
# Boundary conditions
f_bc_x_a = lambda xq: 0
f_bc_y_a = lambda xq: 0
f_bc_x_b = lambda xq: 1/5.0*np.sin(3*np.pi*xq)
f_bc_y_b = lambda xq: np.sin(np.pi*xq)**4 * (xq<0)
# Boundary conditions
bc_derivative_order = 0 # Dirichlet, 0-th derivative
bc_position = -1
bc_axis = 0 # x-axis
bc_x_1 = [chebfit(f_bc_x_a, deg), bc_derivative_order, bc_position, bc_axis]
bc_x_2 = [chebfit(f_bc_x_b, deg), bc_derivative_order, -bc_position, bc_axis]
bc_y_1 = [chebfit(f_bc_y_a, deg), bc_derivative_order, bc_position, 1]
bc_y_2 = [chebfit(f_bc_y_b, deg), bc_derivative_order, -bc_position, 1]
bc = [bc_x_1, bc_x_2, bc_y_1, bc_y_2]
# Differentiation matrix
l_operator = [[0, 0, 1], [0, 0, 1]]
L = chebdiff(l_operator, deg)
L, fHat = chebbc(L, fHat, bc)
# Compute solution
t0 = time.time()
uHat = np.dot(np.linalg.pinv(L), fHat)
uHat = uHat.reshape((deg+1, deg+1))
print('Elapsed time: {}s'.format(np.round(time.time()-t0,2)))
u_analytical = 0.0495946503
u_sol = chebeval(uHat, 0, 0)
print('Absolute error at (0, 0): {}'.format(np.abs(u_analytical - u_sol)))
# Plot solution
N = 100
x = np.linspace(-1, 1, N)
y =
|
np.linspace(-1, 1, N)
|
numpy.linspace
|
import numpy as np
from numpy.linalg import inv
from numpy.linalg import solve
from numpy.linalg import eigvals, matrix_rank
from scipy.integrate import solve_ivp #odeint
from scipy.interpolate import interp1d
from scipy.optimize import OptimizeResult as OdeResultsClass
from scipy.linalg import expm
# Local
# --------------------------------------------------------------------------------}
# --- Simple statespace functions ltiss (linear time invariant state space)
# --------------------------------------------------------------------------------{
def state_function(t, x, u, p):
return p['A'].dot(x) + p['B'].dot(u)
def output_function(t, x, u, p):
return p['C'].dot(x) + p['D'].dot(u)
def integrate(t_eval, q0, A, B, fU, method='LSODA', **options):
"""
Perform time integration of a LTI state space system
INPUTS:
- q0: initial states, array of length nStates
- A: state matrix (nStates x nStates)
- B: input matrix (nStates x nInputs)
- fU: function/interpolants interface U=fU(t) or U=fU(t,q)
U : array of inputs
OUTPUTS:
- res: object with attributes `t` and `y`(states for now..) and other attributse from solve_ivp
"""
hasq=False
try:
fU(t_eval[0],q0)
hasq=True
except:
try:
fU(t_eval[0])
hasq=False
except:
raise
if hasq:
odefun = lambda t, q : np.dot(A, q) + np.dot(B, fU(t,q))
else:
odefun = lambda t, q : np.dot(A, q) + np.dot(B, fU(t) )
res = solve_ivp(fun=odefun, t_span=[t_eval[0], t_eval[-1]], y0=q0, t_eval=t_eval, method=method, vectorized=False, **options)
# TODO consider returning y
return res
def integrate_convolution(time, A, B, fU, C=None):
"""
Perform time integration of a LTI state space system using convolution method
INPUTS:
- A: state matrix (nStates x nStates)
- B: input matrix (nStates x nInputs)
- fU: function/interpolants with interface U=fU(t) or U=fU(t,q)
where U is the array of inputs at t
OUTPUTS:
- x: state vector
"""
H = impulse_response_matrix(time, A, B)
x = np.zeros((A.shape[0], len(time)))
from welib.tools.signal import convolution_integral
# TODO inline and optimize
try:
U = fU(time)
except:
print('[WARN] Cannot evaluate fU for all time, doing a for loop...')
U = np.zeros((B.shape[1], len(time)))
for it,t in enumerate(time):
U[:,it] = fU(t) # NOTE: cannot do state dependency here
for i in np.arange(H.shape[0]):
x_sum=0
for j in np.arange(H.shape[1]):
x_sum += convolution_integral(time, U[j,:], H[i,j,:] )
x[i,:] = x_sum
return x
# TODO consider returning y
def impulse_response_matrix(time, A, B, C=None, outputBoth=False):
"""
Return the impulse response matrix for all time steps defined by `time`
H_x(t) = exp(At) B
H_y(t) = C exp(At) B
see e.g.
Friedland p 76
"""
H_x = np.zeros((A.shape[0], B.shape[1], len(time)))
for it, t in enumerate(time):
H_x[:,:, it] = expm(A*t).dot(B)
if outputBoth:
raise NotImplementedError()
if C is None:
raise Exception('Provide `C` to output both impulse response matrices H_x and H_y')
H_y = C.dot(H_x) # TODO verify
return H_x, H_y
else:
return H_x
# --------------------------------------------------------------------------------}
# --- Linear State Space system
# --------------------------------------------------------------------------------{
class LinearStateSpace():
"""
def setStateInitialConditions(self,q0=None):
def setInputTimeSeries(self,vTime,vU):
def setInputFunction(self,fn):
def Inputs(self,t,x=None):
def integrate(self, t_eval, method='RK4', y0=None, **options):
def dqdt(self, t, q):
def RHS(self,t,q):
def nStates(self):
def nInputs(self):
def nOuputs(self):
"""
def __init__(self,A,B,C=None,D=None,q0=None):
self.A=np.asarray(A)
self.B=np.asarray(B)
if C is None:
self.C=np.eye(A.shape[0]) # output all states
else:
self.C=np.asarray(C)
if D is None:
self.D=np.zeros((self.C.shape[0],0))
else:
self.D=np.asarray(D)
# Initial conditions
self.setStateInitialConditions(q0)
# Time integration results
self.res=None
@property
def nStates(self):
return self.A.shape[0]
@property
def nInputs(self):
return self.B.shape[1]
@property
def nOuputs(self):
if self.C is not None:
return self.C.shape[1]
else:
return 0
# --------------------------------------------------------------------------------}
# --- Time domain
# --------------------------------------------------------------------------------{
def setStateInitialConditions(self,q0=None):
self.q0 =
|
np.zeros(self.nStates)
|
numpy.zeros
|
"""
k-Nearest Neighbours Regression
"""
# Imports
import numpy as np
# Functions
def get_data(data_frame, features, output):
"""
Purpose: Extract features and prepare a feature matrix
Set the first feature x0 = 1
Input : Original Dataframe, list of feature variables, output variable
Output : Feature matrix array, output array
"""
data_frame['constant'] = 1.0
features = ['constant'] + features
features_matrix = np.array(data_frame[features])
if output != None:
output_array =
|
np.array(data_frame[output])
|
numpy.array
|
import os
import sys
# Check the postifx!!!!!!!!!!!!!!!!!! in the csv file
from PIL import Image
import numpy as np
from os import path, makedirs
import numpy as np
import re
import pandas as pd
from sklearn.cluster import KMeans
from PIL import Image, ImageFont, ImageDraw, ImageEnhance
from sympy import Interval, Union
def ChangeToOtherMachine(filelist, repo="EQanalytics", remote_machine=""):
"""
Takes a list of file_names located in a repo and changes it to the local machines file names. File must be executed from withing the repository
Example:
'/home/ubuntu/EQanalytics/Data/Street_View_Images/vulnerable/test.jpg'
Get's converted to
'C:/Users/Anton/EQanalytics/Data/Street_View_Images/vulnerable/test.jpg'
"""
filelist = [x.replace("\\", "/") for x in filelist]
if repo[-1] == "/":
repo = repo[:-1]
if remote_machine:
prefix = remote_machine.replace("\\", "/")
else:
prefix = (
(os.path.dirname(
os.path.abspath(__file__)).split(repo))[0]).replace(
"\\", "/")
new_list = []
for file in filelist:
suffix = (file.split(repo))[1]
if suffix[0] == "/":
suffix = suffix[1:]
new_list.append(
os.path.join(
prefix,
repo +
"/",
suffix).replace(
"\\",
"/"))
return new_list
def get_files(directory, ending=".jpg"):
result = []
for file in os.listdir(directory):
if file.endswith(ending):
result.append(file)
return result
def load_and_filter_df(df_path, file_path, label_dict):
file_list = get_files(file_path)
file_list = [x[:-14] + ".jpg" for x in file_list]
df = pd.read_csv(df_path)
df["file"] = df["image"].apply(lambda x: (x.split("/"))[-1])
df["label_name"] = df["label"].apply(lambda x: label_dict[x])
return df[df["file"].isin(file_list)]
def get_features(
df,
label_dict,
label_names=[
"door",
"window",
"blind",
"shop"]):
df["label_name"] = df["label"].apply(lambda x: label_dict[x])
df.loc[:, "x_len"] = df["xmax"] - df["xmin"]
df.loc[:, "y_len"] = df["ymax"] - df["ymin"]
df.loc[:, "rel_y_center"] = (df["ymin"] + df["ymax"]) / 2 / df["y_size"]
df.loc[:, "area"] = df["x_len"] * df["y_len"]
return df[df["label_name"].isin(label_names)]
def get_intervall_union(data):
"""
Given a list of intervals, i.e. a = [(7, 10), (11, 13), (11, 15), (14, 20), (23, 39)],
this function return the length of the interval union. In the example it takes the union
as [Interval(7, 10), Interval(11, 20), Interval(23, 39)] and computes the length as 28
"""
# Convert to list of tuples if the input is list of list:
if not data:
return 0
if isinstance(data[0], type([])):
data = [tuple(l) for l in data]
intervals = [Interval(begin, end) for (begin, end) in data]
u = Union(*intervals)
union_list = [list(u.args[:2])] if isinstance(
u, Interval) else list(u.args)
length = 0
if isinstance(union_list[0], type([])):
union_list = [tuple(l) for l in union_list]
union_list = [Interval(begin, end) for (begin, end) in union_list]
for item in union_list:
length += item.end - item.start
return length
def get_iou(re1, re2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Parameters
----------
bb1 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x1, y1) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
bb2 : dict
Keys: {'x1', 'x2', 'y1', 'y2'}
The (x, y) position is at the top left corner,
the (x2, y2) position is at the bottom right corner
Returns
-------
float
in [0, 1]
"""
bb1 = dict(zip(["x1", "x2", "y1", "y2"], re1))
bb2 = dict(zip(["x1", "x2", "y1", "y2"], re2))
assert bb1["x1"] < bb1["x2"]
assert bb1["y1"] < bb1["y2"]
assert bb2["x1"] < bb2["x2"]
assert bb2["y1"] < bb2["y2"]
# determine the coordinates of the intersection rectangle
x_left = max(bb1["x1"], bb2["x1"])
y_top = max(bb1["y1"], bb2["y1"])
x_right = min(bb1["x2"], bb2["x2"])
y_bottom = min(bb1["y2"], bb2["y2"])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1["x2"] - bb1["x1"]) * (bb1["y2"] - bb1["y1"])
bb2_area = (bb2["x2"] - bb2["x1"]) * (bb2["y2"] - bb2["y1"])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def find_index_to_keep(rect, rects, areas, iou_threshold=0.75):
rects = np.asarray(rects)
ious = []
for re in rects:
ious.append(get_iou(rect, re))
intersect =
|
np.array(ious)
|
numpy.array
|
#!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/aghast/blob/master/LICENSE
import unittest
import numpy
from aghast import *
class Test(unittest.TestCase):
def runTest(self):
pass
def test_validity_Metadata(self):
h = Collection(
{}, metadata=Metadata("""{"one": 1, "two": 2}""", language=Metadata.json)
)
h.checkvalid()
assert h.metadata.data == """{"one": 1, "two": 2}"""
assert h.metadata.language == Metadata.json
def test_validity_Decoration(self):
h = Collection(
{},
decoration=Decoration("""points { color: red }""", language=Decoration.css),
)
h.checkvalid()
assert h.decoration.data == """points { color: red }"""
assert h.decoration.css == Decoration.css
def test_validity_RawInlineBuffer(self):
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[
Page(
RawInlineBuffer(
numpy.zeros(1, dtype=numpy.int32)
)
)
],
[0, 1],
)
]
)
]
)
],
)
h.checkvalid()
assert len(h.instances[0].chunks[0].column_chunks[0].pages[0].array) == 1
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[Page(RawInlineBuffer(b"\x05\x00\x00\x00"))], [0, 1]
)
]
)
]
)
],
)
h.checkvalid()
assert h.instances[0].chunks[0].column_chunks[0].pages[0].array.tolist() == [5]
def test_validity_RawExternalBuffer(self):
buf = numpy.zeros(1, dtype=numpy.int32)
h = Ntuple(
[Column("one", Column.int32)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[
Page(
RawExternalBuffer(
buf.ctypes.data, buf.nbytes
)
)
],
[0, 1],
)
]
)
]
)
],
)
h.checkvalid()
assert len(h.instances[0].chunks[0].column_chunks[0].pages[0].array) == 1
buf = numpy.array([3.14], dtype=numpy.float64)
h = Ntuple(
[Column("one", Column.float64)],
[
NtupleInstance(
[
Chunk(
[
ColumnChunk(
[
Page(
RawExternalBuffer(
buf.ctypes.data, buf.nbytes
)
)
],
[0, 1],
)
]
)
]
)
],
)
h.checkvalid()
assert h.instances[0].chunks[0].column_chunks[0].pages[0].array.tolist() == [
3.14
]
def test_validity_InterpretedInlineBuffer(self):
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedInlineBuffer(
numpy.zeros(1, dtype=numpy.int32), dtype=InterpretedInlineBuffer.int32
),
)
h.checkvalid()
assert h.values.array.tolist() == [0]
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedInlineBuffer(
b"\x07\x00\x00\x00", dtype=InterpretedInlineBuffer.int32
),
)
h.checkvalid()
assert h.values.array.tolist() == [7]
def test_validity_InterpretedExternalBuffer(self):
buf = numpy.zeros(1, dtype=numpy.float64)
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedExternalBuffer(
buf.ctypes.data, buf.nbytes, dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0]
buf = numpy.array([3.14], dtype=numpy.float64)
h = BinnedEvaluatedFunction(
[Axis()],
InterpretedExternalBuffer(
buf.ctypes.data, buf.nbytes, dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [3.14]
def test_validity_IntegerBinning(self):
h = BinnedEvaluatedFunction(
[Axis(IntegerBinning(10, 20))],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
h = BinnedEvaluatedFunction(
[Axis(IntegerBinning(20, 10))],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
assert not h.isvalid
h = BinnedEvaluatedFunction(
[
Axis(
IntegerBinning(
10,
20,
loc_underflow=IntegerBinning.nonexistent,
loc_overflow=IntegerBinning.nonexistent,
)
)
],
InterpretedInlineBuffer(
numpy.zeros(11), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 11
h = BinnedEvaluatedFunction(
[
Axis(
IntegerBinning(
10,
20,
loc_underflow=IntegerBinning.below1,
loc_overflow=IntegerBinning.nonexistent,
)
)
],
InterpretedInlineBuffer(
numpy.zeros(12), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 12
h = BinnedEvaluatedFunction(
[
Axis(
IntegerBinning(
10,
20,
loc_underflow=IntegerBinning.nonexistent,
loc_overflow=IntegerBinning.above1,
)
)
],
InterpretedInlineBuffer(
numpy.zeros(12), dtype=InterpretedInlineBuffer.float64
),
)
h.checkvalid()
assert h.values.array.tolist() == [0.0] * 12
h = BinnedEvaluatedFunction(
[
Axis(
IntegerBinning(
10,
20,
loc_underflow=IntegerBinning.below1,
loc_overflow=IntegerBinning.above1,
)
)
],
InterpretedInlineBuffer(
|
numpy.zeros(13)
|
numpy.zeros
|
"""*******************************************************
A python implementation of the celestial functions
******************************************************"""
#print __doc__
import math
import numpy as np
import pdb
import sys
#sys.setrecursionlimit(100000)
def cone_in_polysphere(PolesLong,PolesLat,Long,Lat,Radius):
"""
Check if a cone (small circle) is within a convex spherical polygon
Package:
Description: Check if a cone (small circle) is within a convex spherical
polygon which sides are great circles.
Input : - Numpy array 3-by-N, in which each column represent the longitude of the
poles of the half-spaces of a spherical polygin, where the
pole is directed into the polygon center of mass [rad].
- Numpy array in which each column represent the latitude of the
poles of the half-spaces of a spherical polygin, where the
pole is directed into the polygon center of mass [rad].
- Vector of longitudes of the cones center [rad].
The size is either 1 or like the number of columns of the
first and second input arguments.
- Vector of latitudes of the cones center [rad].
The size is either 1 or like the number of columns of the
first and second input arguments.
- Vector of radii of the cones [rad].
The size is either 1 or like the number of columns of the
first and second input arguments.
Output : - Flag of logical indicating if cone is in polygon.
By : <NAME> (original Matlab function by <NAME>) Feb 2018
URL : http://weizmann.ac.il/home/eofek/matlab/
Example: HTM=celestial.htm.htm_build(4);
Flag=celestial.htm.cone_in_polysphere(HTM(end).PolesCoo(:,1),HTM(end).PolesCoo(:,2),5.5,-0.6,0.01);
PLong=rand(3,1000); PLat=rand(3,1000); Long=rand(1,1000); Lat=rand(1000,1); Radius=0.01.*rand(1000,1);
Flag=celestial.htm.cone_in_polysphere(PLong,PLat,Long,Lat,Radius);
Reliable:
"""
#Longitudes_circle=Long # N longitudes de cercles
#Latitudes_circle=Lat # N latitudes de cercles
#Radius_circles=Radius # N radius de cercles
Dist=np.arccos(np.multiply(np.sin(PolesLat),np.sin(Lat))+np.multiply(np.cos(PolesLat),np.cos(Lat))*np.cos(PolesLong-Long))
Flag=np.zeros(np.shape(Dist)[1])
for i in range(np.shape(Dist)[1]):#optimize
Flag[i]=all(Dist[:,i]<=0.5*math.pi+Radius) #1 if all distances are smaller than..
return Flag
def sphere_distance_fast(RA_1,Dec_1,RA_2,Dec_2):#RADIANS!
Dist = np.arccos(np.sin(Dec_1)*np.sin(Dec_2) + np.cos(Dec_1)* np.cos(Dec_2)* np.cos(RA_1 - RA_2))
return Dist
def sphere_dist_fast(RA_1,Dec_1,RA_2,Dec_2):#used by xmatch_2cats and match_cats
"""Description: Names after the function with the same name by Eran.
Calculate the angular distance between two points on the celestial sphere. Works only with radians and calculate only the distance.
Input: - np.array of longitudes for the first point [radians]
- np.array of latitudes for the first point [radians]
- np.array of longitudes for the second point [radians]
- np.array of latitudes for the second point [radians]
Output: - np.array of distances between points [radians]"""
Dist = np.arccos(np.sin(Dec_1)*np.sin(Dec_2) + np.cos(Dec_1)* np.cos(Dec_2)* np.cos(RA_1 - RA_2))
dRA = RA_1 - RA_2
SinPA = np.sin(dRA)* np.cos(Dec_2)/np.sin(Dist)
CosPA = (np.sin(Dec_2)* np.cos(Dec_1) - np.cos(Dec_2)* np.sin(Dec_1) * np.cos(dRA))/ np.sin(Dist)
PA = np.arctan2(SinPA, CosPA)
#print(PA)
if type(PA) is np.ndarray:
#I = find(PA < 0);
PA[(PA<0)] = 2. * math.pi + PA[(PA<0)]
else:
if PA<0:
PA=2*math.pi+PA
#print('Dist before nan_to_num',Dist)
#print('RA1:',RA_1)
#print('DEC1:',Dec_1)
#print('RA2',RA_2)
#print('DEC2',Dec_2)
Distx=np.nan_to_num(Dist)
#print('Dist after nan_to_num',Distx)
return Distx,PA
def number_of_trixels_to_level(N_trixels):
"""Description: in Eran's library, this is called
Input: - N_trixels: number of trixels
Output: - number of levels
- number of trixels in the lowest level"""
number_of_levels=math.floor(math.log(N_trixels/2.)/math.log(4))
number_of_trixels_in_highest_level=2*4**number_of_levels
return number_of_levels,number_of_trixels_in_highest_level
def coo2cosined(Long,Lat):#TESTED compared with Eran's, ok
"""Description: Convert coordinates to cosine directions in the same reference frame.
Input:-np.array of longitudes [radians]
-np.array of lattitudes [radians]
Output: - np.array of first cosine directions
- np.array of second cosine directions
- np.array of third cosine directions"""
CosLat=np.cos(Lat)
CD1=np.cos(Long)*CosLat
CD2=np.sin(Long)*CosLat
CD3=np.sin(Lat)
return CD1,CD2,CD3
def cosined2coo(CD1,CD2,CD3):#TESTED compared with Eran's, ok
"""Description: Convert cosine directions into coordinated in the same reference frame.
Input: - np.array of first cosine directions
- np.array of second cosine directions
- np.array of third cosine directions
Output:-np.array of longitudes [radians]
-np.array of lattitudes [radians]
example: [RA,Dec]=cosined2coo(0.1,0,1)"""
if type(CD1) is np.ndarray:
#print(type(CD1))
#print(type(CD2))
#print(CD2[0])
#print(CD1[0])
#print(np.shape(CD1))
#print(np.shape(CD2))
Long=
|
np.arctan2(CD2,CD1)
|
numpy.arctan2
|
"""
Created: 2020-07-15
Author: <NAME>
"""
from collections.abc import Iterable
from pycqed.analysis import fitting_models as fit_mods
from pycqed.analysis import measurement_analysis as ma
import pycqed.measurement.hdf5_data as hd5
import lmfit
import numpy as np
import logging
# Filter and optimization tools
import pycqed.measurement.kernel_functions_ZI as kzi
from scipy import signal
import cma
log = logging.getLogger(__name__)
# ######################################################################
# Analysis utilities
# ######################################################################
def rough_freq_to_amp(
amp_pars, time_ns, freqs, plateau_time_start_ns=-25, plateau_time_end_ns=-5,
):
time_ = time_ns[: len(freqs)]
time_ref_start = time_[-1] if plateau_time_start_ns < 0 else 0
time_ref_stop = time_[-1] if plateau_time_end_ns < 0 else 0
where = np.where(
(time_ > time_ref_start + plateau_time_start_ns)
& (time_ < time_ref_stop + plateau_time_end_ns)
)
avg_f = np.average(freqs[where])
dac_amp = amp_pars["dac_amp"]
Vpp = amp_pars["vpp"]
cfg_amp = amp_pars["cfg_amp"]
amp = cfg_amp * Vpp / 2 * dac_amp
# coarse approximation of the arc assuming centered arc and quadratic fit
a = avg_f / (amp ** 2)
amps = np.sqrt(freqs / a)
amp_plateau = np.average(amps[where])
amps_norm = amps / amp_plateau
res = {
"amps": amps,
"step_response": amps_norm,
"amp_plateau": amp_plateau,
"frequency_plateau": avg_f,
}
return res
def moving_cos_fitting_window(
x_data_ns,
y_data,
fit_window_pnts_nr: int = 6,
init_guess: dict = {"phase": 0.0},
fixed_params: dict = {},
max_params: dict = {},
min_params: dict = {},
):
"""
NB: Intended to be used with input data in ns, this assumption is
used to generate educated guesses for the fitting
"""
model = lmfit.Model(fit_mods.CosFunc)
if "offset" not in init_guess.keys():
offset_guess = np.average(y_data)
init_guess["offset"] = offset_guess
if "amplitude" not in init_guess.keys():
amplitude_guess = np.max(y_data[len(y_data) // 2 :]) - init_guess["offset"]
init_guess["amplitude"] = amplitude_guess
if "frequency" not in init_guess.keys():
min_t = x_data_ns[0]
max_t = x_data_ns[-1]
total_t = min_t - max_t
y_data_for_fft = y_data[
np.where(
(x_data_ns > min_t + 0.1 * total_t)
& (x_data_ns < max_t - 0.1 * total_t)
)[0]
]
w = np.fft.fft(y_data_for_fft)[
: len(y_data_for_fft) // 2
] # ignore negative values
f = np.fft.fftfreq(len(y_data_for_fft), x_data_ns[1] - x_data_ns[0])[: len(w)]
w[0] = 0 # ignore DC component
frequency_guess = f[np.argmax(np.abs(w))]
init_guess["frequency"] = frequency_guess
print("Frequency guess from FFT: {:.3g} GHz".format(frequency_guess))
warn_thr = 0.7 # GHz
if frequency_guess > warn_thr:
log.warning(
"\nHigh detuning above {} GHz detected. Cosine fitting may fail! "
"Consider using lower detuning!".format(warn_thr)
)
if "phase" not in init_guess.keys():
init_guess["phase"] = 0.0
params = model.make_params(**init_guess)
def fix_pars(params, i):
# The large range is just to allow the phase to move continuously
# between the adjacent fits even if it is not inside [-pi, pi]
params["phase"].min = -100.0 * np.pi
params["phase"].max = 100.0 * np.pi
params["amplitude"].min = 0.1 * init_guess["amplitude"]
params["amplitude"].max = 2.0 * init_guess["amplitude"]
# Not expected to be used for > 0.8 GHz
params["frequency"].min = 0.1
params["frequency"].max = 0.8
for par, val in fixed_params.items():
# iterable case is for the amplitude
params[par].value = val[i] if isinstance(val, Iterable) else val
params[par].vary = False
for par, val in max_params.items():
params[par].max = val
for par, val in min_params.items():
params[par].min = val
pnts_per_fit = fit_window_pnts_nr
pnts_per_fit_idx = pnts_per_fit + 1
max_num_fits = len(x_data_ns) - pnts_per_fit + 1
middle_fits_num = max_num_fits // 2
results = [None for i in range(max_num_fits)]
# print(results)
results_stderr = [None for i in range(max_num_fits)]
# We iterate from the middle of the data to avoid fitting issue
# This was verified to help!
# There is an iteration from the middle to the end and another one
# from the middle to the beginning
for fit_ref, iterator in zip(
[-1, +1],
[range(middle_fits_num, max_num_fits), reversed(range(middle_fits_num))],
):
for i in iterator:
if i != middle_fits_num:
# Take the adjacent fit as the initial guess for the next fit
params = model.make_params(
amplitude=results[i + fit_ref][0],
frequency=results[i + fit_ref][1],
phase=results[i + fit_ref][2],
offset=results[i + fit_ref][3],
)
fix_pars(params, i)
t_fit_data = x_data_ns[i : i + pnts_per_fit_idx]
fit_data = y_data[i : i + pnts_per_fit_idx]
res = model.fit(fit_data, t=t_fit_data, params=params)
res_pars = res.params.valuesdict()
results[i] = np.fromiter(res_pars.values(), dtype=np.float64)
results_stderr[i] = np.fromiter(
(param.stderr for par_name, param in res.params.items()),
dtype=np.float64,
)
results = np.array(results).T
results_stderr = np.array(results_stderr).T
results = {key: values for key, values in zip(res_pars.keys(), results)}
results_stderr = {
key: values for key, values in zip(res_pars.keys(), results_stderr)
}
return {
"results": results,
"results_stderr": results_stderr,
}
def cryoscope_v2_processing(
time_ns: np.array,
osc_data: np.array,
pnts_per_fit_first_pass: int = 4,
pnts_per_fit_second_pass: int = 3,
init_guess_first_pass: dict = {},
fixed_params_first_pass: dict = {},
init_guess_second_pass: dict = {},
max_params: dict = {},
min_params: dict = {},
vln: str = "",
insert_ideal_projection: bool = True,
osc_amp_envelop_poly_deg: int = 1,
):
"""
TBW
Provide time in ns to avoid numerical issues, data processing here is elaborated
`pnts_per_fit_second_pass` shouldn't be smaller than 3, this is the limit
to fit the cosine (technically 2 is the limit but but probably will not
work very well)
"""
assert time_ns[0] != 0.0, "Cryoscope time should not start at zero!"
def add_ideal_projection_at_zero(time_ns, y_data, vln, offset, osc_amp):
"""
Inserts and ideal point at t = 0 based on the type of projection
"""
if vln:
if "mcos" in vln:
time_ns = np.insert(time_ns, 0, 0)
y_data = np.insert(y_data, 0, offset - osc_amp)
elif "cos" in vln:
time_ns = np.insert(time_ns, 0, 0)
y_data = np.insert(y_data, 0, offset + osc_amp)
elif "sin" in vln or "msin" in vln:
time_ns = np.insert(time_ns, 0, 0)
y_data = np.insert(y_data, 0, offset)
else:
log.warning(
"Projection type not supported. Unexpected results may arise."
)
return time_ns, y_data
else:
log.warning("\nSkipping ideal projection!")
return time_ns, y_data
res_dict = moving_cos_fitting_window(
x_data_ns=time_ns,
y_data=osc_data,
fit_window_pnts_nr=pnts_per_fit_first_pass,
init_guess=init_guess_first_pass,
fixed_params=fixed_params_first_pass,
max_params=max_params,
min_params=min_params,
)
results = res_dict["results"]
amps_from_fit = results["amplitude"]
x_for_fit = time_ns[: len(amps_from_fit)]
# Here we are intentionally using poly of deg 1 to avoid the amplitude to be lower
# in the beginning which should not be physical
line_fit = np.polyfit(x_for_fit, amps_from_fit, osc_amp_envelop_poly_deg)
poly1d = np.poly1d(line_fit)
fixed_offset = np.average(results["offset"])
if not len(init_guess_second_pass):
init_guess_second_pass = {
"offset": fixed_offset,
# "frequency": np.average(results["frequency"]),
"phase": 0.0,
}
if insert_ideal_projection:
# This helps with the uncertainty of not knowing very well what is
# the amplitude of the first point of the step response
time_ns, osc_data = add_ideal_projection_at_zero(
time_ns=time_ns,
y_data=osc_data,
vln=vln,
osc_amp=poly1d(0.0),
offset=fixed_offset,
)
res_dict = moving_cos_fitting_window(
x_data_ns=time_ns,
y_data=osc_data,
fit_window_pnts_nr=pnts_per_fit_second_pass,
init_guess=init_guess_second_pass,
fixed_params={"offset": fixed_offset, "amplitude": poly1d(time_ns)},
max_params=max_params,
min_params=min_params,
)
res_dict["time_ns"] = time_ns
res_dict["osc_data"] = osc_data
return res_dict
def extract_amp_pars(
qubit: str,
timestamp: str,
dac_amp_key: str = "Snapshot/instruments/flux_lm_{}/parameters/sq_amp",
vpp_key: str = "Snapshot/instruments/flux_lm_{}/parameters/cfg_awg_channel_range",
cfg_amp_key: str = "Snapshot/instruments/flux_lm_{}/parameters/cfg_awg_channel_amplitude",
):
"""
Assumes centered flux arc and converts cryoscope oscillation frequency
to amplitude
"""
dac_amp_key = dac_amp_key.format(qubit)
vpp_key = vpp_key.format(qubit)
cfg_amp_key = cfg_amp_key.format(qubit)
filepath = ma.a_tools.get_datafilepath_from_timestamp(timestamp)
exctraction_spec = {
"dac_amp": (dac_amp_key, "attr:value"),
"vpp": (vpp_key, "attr:value"),
"cfg_amp": (cfg_amp_key, "attr:value"),
}
extracted = hd5.extract_pars_from_datafile(filepath, param_spec=exctraction_spec)
return extracted
# ######################################################################
# IIRs (exponential filters) utilities
# ######################################################################
def pred_corrected_sig(sig, taus, amps):
"""
[2020-07-15 Victor] Not tested in a while, see old cryoscope notebooks
"""
for i, (tau, amp) in enumerate(zip(taus, amps)):
sig = kzi.exponential_decay_correction_hw_friendly(
sig, tau, amp, sampling_rate=2.4e9
)
return sig
def predicted_waveform(time, tau0, amp0, tau1, amp1, tau2, amp2, tau3, amp3):
"""
[2020-07-15 Victor] Not tested in a while, see old cryoscope notebooks
"""
taus = [tau0, tau1, tau2, tau3]
amps = [amp0, amp1, amp2, amp3]
y_pred = pred_corrected_sig(a0, taus, amps)
# Normalized
y_pred /= np.mean(y_pred[-100:])
# Smooth tail
# y_pred[100:] = filtfilt(a=[1], b=1/20*np.ones(20), x=y_pred[100:])
# y_pred[50:100] = filtfilt(a=[1], b=1/5*np.ones(5), x=y_pred[50:100])
return y_pred
# ######################################################################
# FIRs utilities
# ######################################################################
def print_FIR_loading(qubit, model_num, FIR, real_time=False):
print(
(
"lin_dist_kern_{}.filter_model_0{:1d}({{'params': {{'weights': np."
+ repr(FIR)
+ "}}, 'model': 'FIR', 'real-time': {} }})"
).format(qubit, model_num, real_time)
)
def optimize_fir_software(
y,
baseline_start=100,
baseline_stop=None,
taps=72,
max_taps=72,
start_sample=0,
stop_sample=None,
cma_options={},
):
step_response = np.concatenate((np.array([0]), y))
baseline = np.mean(y[baseline_start:baseline_stop])
x0 = [1] + (max_taps - 1) * [0]
def objective_function_fir(x):
y = step_response
zeros = np.zeros(taps - max_taps)
x = np.concatenate((x, zeros))
yc = signal.lfilter(x, 1, y)
return np.mean(np.abs(yc[1 + start_sample : stop_sample] - baseline)) / np.abs(
baseline
)
return cma.fmin2(objective_function_fir, x0, 0.1, options=cma_options)
def optimize_fir_HDAWG(
y,
baseline_start=100,
baseline_stop=None,
start_sample=0,
stop_sample=None,
cma_options={},
max_taps=40,
hdawg_taps=40,
):
step_response = np.concatenate((np.array([0]), y))
baseline = np.mean(y[baseline_start:baseline_stop])
x0 = [1] + (max_taps - 1) * [0]
def objective_function_fir(x):
y = step_response
zeros = np.zeros(hdawg_taps - max_taps)
x =
|
np.concatenate((x, zeros))
|
numpy.concatenate
|
import numpy as np
import time
import logging
def print_time(before):
logging.info(time.time()-before)
return time.time()
def linewidth_from_data_units(linewidth, axis, reference='y'):
"""
Convert a linewidth in data units to linewidth in points.
Parameters
----------
linewidth: float
Linewidth in data units of the respective reference-axis
axis: matplotlib axis
The axis which is used to extract the relevant transformation
data (data limits and size must not change afterwards)
reference: string
The axis that is taken as a reference for the data width.
Possible values: 'x' and 'y'. Defaults to 'y'.
Returns
-------
linewidth: float
Linewidth in points
"""
fig = axis.get_figure()
if reference == 'x':
length = fig.bbox_inches.width * axis.get_position().width
value_range = np.diff(axis.get_xlim())[0]
elif reference == 'y':
length = fig.bbox_inches.height * axis.get_position().height
value_range = np.diff(axis.get_ylim())[0]
# Convert length to points
length *= 72
# Scale linewidth to value range
return linewidth * (length / value_range)
def add_walls_and_source_to_ax(ax, colors, S, scatter_size):
s_mat = np.array(S)
M, N = s_mat.shape[0], s_mat.shape[1]
scatter_dict = {
"color_item_-1_x_y":[]
}
for iter in range(np.sum(np.unique(s_mat) > 0)):
scatter_dict[f"color_item_{iter+1}_x_y"] = []
for i in range(M):
for j in range(N):
if S[i][j] != 0:
scatter_dict[f"color_item_{S[i][j]}_x_y"].append([j,i])
for key in scatter_dict:
color_item = int(key.split('_')[2])
mat = np.array(scatter_dict[key]).T
j = mat[0].tolist()
i = mat[1].tolist()
ax.scatter(j, i, s=scatter_size*2, color=colors[color_item] if color_item != 0 else "black", marker = "x" if color_item < 0 else None)
def plot_area(S):
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
ax = plt.gca()
color = [matplotlib.colors.to_hex(c) for c in plt.cm.tab20.colors]
s_mat = np.array(S)
M, N = s_mat.shape[0], s_mat.shape[1]
# create discrete colormap
# print(color[:s_mat.max()] + color[-1])
cmap = colors.ListedColormap([color[-1]] + color[:s_mat.max()+1])
# cmap = colors.ListedColormap(['red', 'blue', 'green', 'yellow', 'black'])
bounds = [i for i in np.arange(-1, s_mat.max()+2, 1)]
norm = colors.BoundaryNorm(bounds, cmap.N)
ax.imshow(s_mat + 0.5, cmap=cmap, norm=norm, alpha=0.5) # half is for the threshold
# draw gridlines
ax.set_facecolor('black')
ax.set_ylim(M - 0.5, -0.5)
ax.set_xlim(-0.5, N - 0.5)
return ax.figure
def plot_walls(S):
import matplotlib.pyplot as plt
plt.clf()
plt.cla()
ax = plt.gca()
colors = plt.cm.tab20.colors
s_mat = np.array(S)
M, N = s_mat.shape[0], s_mat.shape[1]
linewidth = linewidth_from_data_units(0.45, ax)
ax.set_ylim(M - 0.5, -0.5)
ax.set_xlim(-0.5, N - 0.5)
ax.set_aspect('equal')
ax.set_facecolor('black')
ax.set_yticks([i + 0.5 for i in range(M - 1)], minor=True)
ax.set_xticks([j + 0.5 for j in range(N - 1)], minor=True)
ax.grid(b=True, which='minor', color='white')
ax.set_xticks([])
ax.set_yticks([])
ax.tick_params(axis='both', which='both', length=0)
add_walls_and_source_to_ax(ax, colors, S, linewidth)
logging.info("finish plot_walls for right side")
return ax.figure
def put_num_on_matrix(m, walls, in_num):
matrix = np.copy(m)
for coords in walls:
x_min, x_max, y_min, y_max = coords[0], coords[2], coords[1], coords[3]
for x in range(x_min, x_max + 1 if (x_min < x_max) else x_max - 1 , 1 if (x_min < x_max) else -1):
x = int(x)
for y in range(y_min, y_max + 1 if (y_min < y_max) else y_max - 1 , 1 if (y_min < y_max) else -1):
y = int(y)
if y < len(matrix) and x < len(matrix[0]) and matrix is not None:
matrix[y][x] = in_num
return matrix
def put_rect_source_on_matrix(matrix, coords):
number_of_source = 0
x_min, x_max, y_min, y_max = coords[0], coords[2], coords[1], coords[3]
for x in range(x_min, x_max + 1 if (x_min < x_max) else x_max - 1 , 1 if (x_min < x_max) else -1):
x = int(x)
for y in range(y_min, y_max + 1 if (y_min < y_max) else y_max - 1 , 1 if (y_min < y_max) else -1):
y = int(y)
matrix[y][x] = int(number_of_source/2) + 1
number_of_source += 1
assert number_of_source%2 == 0 and "Number of source outlet must be mutiply of 2!"
return matrix
def put_point_source_on_matrix(matrix, coords):
number_of_source = len(matrix)
assert number_of_source%2 == 0 and "Number of source outlet must be mutiply of 2!"
print(coords)
for x,y,col in coords:
print(x,y,col)
matrix[int(y)][int(x)] = col
return matrix
def create_matrix_and_area_matrix(color_dict, canvas_size, pipe_size, source, arr):
matrix = np.zeros((np.array(canvas_size)/np.array(pipe_size)).astype('int').tolist()).astype('int')
area_matrix = None
color_set = list(set([item.split("_")[-1] for item in color_dict.keys()]))
for index in color_set:
for start_point, end_point in zip(color_dict[f"start_{index}"], color_dict[f"end_{index}"]):
# graph.draw_rectangle(start_point, end_point, fill_color=colors[int(index)], line_color=colors[int(index)])
start = np.array(start_point)
end = np.array(end_point)
start[1] = canvas_size[1]-start[1]
end[1] = canvas_size[1]-end[1]
start[0], start[1], end[0], end[1] = start[0]/pipe_size[0], start[1]/pipe_size[1], end[0]/pipe_size[0], end[1]/pipe_size[1]
arr = np.append(arr, [
|
np.append(start, end)
|
numpy.append
|
#!/usr/bin/env python
"""
faps -- Frontend for Automated Adsorption Analysis of Porous Solids.
aka
shpes -- Sorption analysis with a High throughput Python
frontend to Examine binding in Structures
Strucutre adsorption property analysis for high throughput processing. Run
as a script, faps will automatically run complete analysis on a structure.
Sensible defaults are implemented, but calculations can be easily customised.
Faps also provides classes and methods for adapting the simulation or only
doing select parts.
"""
# Turn on keyword expansion to get revision numbers in version strings
# in .hg/hgrc put
# [extensions]
# keyword =
#
# [keyword]
# faps.py =
#
# [keywordmaps]
# Revision = {rev}
try:
__version_info__ = (1, 5, 0, int("$Revision$".strip("$Revision: ")))
except ValueError:
__version_info__ = (1, 5, 0, 0)
__version__ = "%i.%i.%i.%i" % __version_info__
import bz2
import code
try:
import configparser
except ImportError:
import ConfigParser as configparser
import gzip
import mmap
import os
import pickle
import re
import shlex
import shutil
import subprocess
import sys
import tarfile
import textwrap
import time
from copy import copy
from glob import glob
from itertools import count
from logging import warning, debug, error, info, critical
from math import ceil, log
from os import path
import numpy as np
from numpy import pi, cos, sin, sqrt, arccos, arctan2
from numpy import array, identity, dot, cross
from numpy.linalg import norm
from binding_sites.absl import calculate_binding_sites
from config import Options
from elements import WEIGHT, ATOMIC_NUMBER, UFF, VASP_PSEUDO_PREF
from elements import CCDC_BOND_ORDERS, GULP_BOND_ORDERS, OB_BOND_ORDERS, METALS
from elements import COVALENT_RADII, UFF_FULL, QEQ_PARAMS
from eos import peng_robinson
from job_handler import JobHandler
from logo import LOGO
# Global constants
DEG2RAD = pi / 180.0
BOHR2ANG = 0.52917720859
EV2KCAL = 23.060542301389
NAVOGADRO = 6.02214129E23
INFINITY = float('inf')
KCAL_TO_KJ = 4.1868 # Steam tables from openbabel
FASTMC_DEFAULT_GRID_SPACING = 0.1
# ID values for system state
NOT_RUN = 0
RUNNING = 1
FINISHED = 2
UPDATED = 3
SKIPPED = -1
NOT_SUBMITTED = -2
# Possible folder names; need these so that similar_ones_with_underscores are
# not globbed
FOLDER_SUFFIXES = ['gulp', 'gulp_opt', 'gulp_fit', 'siesta', 'vasp', 'egulp',
'repeat', 'fastmc', 'properties', 'absl', 'gromacs']
class PyNiss(object):
"""
PyNiss -- Negotiation of Intermediate System States
A single property calculation for one structure. Instance with a set of
options, then run the job_dispatcher() to begin the calculation. The
calculation will pickle itself, or can be pickled at any time, by calling
dump_state().
"""
def __init__(self, options):
"""
Instance an empty structure in the calculation; The dispatcher should
be called to fill it up with data, as needed.
"""
self.options = options
self.structure = Structure(options.get('job_name'))
self.state = {'init': (NOT_RUN, False),
'ff_opt': (NOT_RUN, False),
'dft': (NOT_RUN, False),
'esp': (NOT_RUN, False),
'charges': (NOT_RUN, False),
'properties': (NOT_RUN, False),
'absl': {},
'gcmc': {}}
self.job_handler = JobHandler(options)
def dump_state(self):
"""Write the .niss file holding the current system state."""
job_name = self.options.get('job_name')
info("Writing state file, %s.niss." % job_name)
os.chdir(self.options.get('job_dir'))
# Don't save the job handler in case it changes
save_handler = self.job_handler
self.job_handler = None
my_niss = open(job_name + ".niss", "wb")
pickle.dump(self, my_niss)
my_niss.close()
# put the job handler back and continue
self.job_handler = save_handler
def re_init(self, new_options):
"""Re initialize simulation (with updated options)."""
if new_options.getbool('update_opts'):
info("Using new options.")
self.options = new_options
self.structure.name = new_options.get('job_name')
else:
# Just update command line stuff
info("Using old options with new command line arguments.")
self.options.args = new_options.args
self.options.options = new_options.options
self.options.cmdopts = new_options.cmdopts
self.structure.name = new_options.get('job_name')
self.status(initial=True)
def job_dispatcher(self):
"""
Run parts explicity specified on the command line or do the next step
in an automated run. Drop to interactive mode, if requested.
"""
# In case options have changed, re-intitialize
self.job_handler = JobHandler(self.options)
if 'status' in self.options.args:
self.status(initial=True)
if self.options.getbool('interactive'):
code_locals = locals()
code_locals.update(globals())
console = code.InteractiveConsole(code_locals)
console.push('import rlcompleter, readline')
console.push('readline.parse_and_bind("tab: complete")')
banner = ("((-----------------------------------------------))\n"
"(( Interactive faps mode ))\n"
"(( ===================== ))\n"
"(( ))\n"
"(( WARNING: mode is designed for devs and ))\n"
"(( experts only! ))\n"
"(( Current simulation is accessed as 'self' and ))\n"
"(( associated methods. Type 'dir()' to see the ))\n"
"(( methods in the local namespace and 'help(x)' ))\n"
"(( for help on any object. ))\n"
"(( Use 'self.dump_state()' to save any changes. ))\n"
"((-----------------------------------------------))")
console.interact(banner=banner)
if self.options.getbool('import'):
info("Importing results from a previous simulation")
self.import_old()
self.dump_state()
if self.state['init'][0] == NOT_RUN:
info("Reading in structure")
# No structure, should get one
self.structure.from_file(
self.options.get('job_name'),
self.options.get('initial_structure_format'),
self.options)
if self.options.getbool('order_atom_types'):
info("Forcing atom re-ordering by types")
self.structure.order_by_types()
self.state['init'] = (UPDATED, False)
self.dump_state()
self.step_force_field()
self.step_dft()
self.step_charges()
if self.options.getbool('qeq_fit'):
if not 'qeq_fit' in self.state and self.state['charges'][0] == UPDATED:
info("QEq parameter fit requested")
self.run_qeq_gulp(fitting=True)
self.dump_state()
self.step_properties()
self.step_gcmc()
self.step_absl()
self.send_to_database()
self.post_summary()
def status(self, initial=False):
"""Print the current status to the terminal."""
valid_states = {NOT_RUN: 'Not run',
RUNNING: 'Running',
FINISHED: 'Finished',
UPDATED: 'Processed',
SKIPPED: 'Skipped',
NOT_SUBMITTED: 'Not submitted'}
if initial:
info("Previous system state reported from .niss file "
"(running jobs may have already finished):")
else:
info("Current system status:")
for step, state in self.state.items():
if step == 'gcmc':
if not state:
info(" * State of GCMC: Not run")
else:
for point, job in state.items():
if job[0] is RUNNING:
info(" * GCMC %s: Running, jobid: %s" %
(point, job[1]))
else:
info(" * GCMC %s: %s" %
(point, valid_states[job[0]]))
elif step == 'absl':
if not state:
info(" * State of ABSL: Not run")
else:
# ABSL used to be multiple jobs, still treat jobid as list
for point, jobs in state.items():
if jobs[0] is RUNNING:
info(" * ABSL %s: Running, jobids: %s" %
(point, ",".join('%s' % x for x in jobs[1])))
else:
info(" * ABSL %s: %s" %
(point, valid_states[jobs[0]]))
elif state[0] is RUNNING:
info(" * State of %s: Running, jobid: %s" % (step, state[1]))
else:
info(" * State of %s: %s" % (step, valid_states[state[0]]))
def send_to_database(self):
"""If using a database, store the results"""
# we can skip if not using a database
if not 'sql' in self.options.get('initial_structure_format'):
return
# extract the database and structure names
db_params = self.options.get('job_name').split('.')
# import this here so sqlalchemy is not required generally
from backend.sql import AlchemyBackend
database = AlchemyBackend(db_params[0])
info("Storing results in database")
database.store_results(db_params[1], int(db_params[2]), self.structure)
debug("Database finished")
def post_summary(self):
"""Summarise any results for GCMC, properties..."""
# Also includes excess calculation if void volume calculated
# N = pV/RT
all_csvs = {}
R_GAS = 8.3144621E25 / NAVOGADRO # A^3 bar K-1 molecule
job_name = self.options.get('job_name')
info("Summary of GCMC results")
info("======= ======= ======= ======= =======")
nguests = len(self.structure.guests)
for idx, guest in enumerate(self.structure.guests):
# Determine whether we can calculate the excess for
# any different probes
void_volume = self.structure.sub_property('void_volume')
he_excess, guest_excess = "", ""
if 1.0 in void_volume:
he_excess = 'He-xs-molc/uc,He-xs-mmol/g,He-xs-v/v,He-xs-wt%,'
if hasattr(guest, 'probe_radius'):
if guest.probe_radius != 1.0 and guest.probe_radius in void_volume:
guest_excess = 'xs-molc/uc,xs-mmol/g,xs-v/v,xs-wt%,'
if hasattr(guest, 'c_v') and guest.c_v:
#TODO(tdaff): Make standard in 2.0
# makes sure that c_v is there and not empty
cv_header = "C_v,stdev,"
else:
cv_header = ""
if hasattr(guest, 'fugacities') and guest.fugacities:
fuga_header = "f/bar,"
else:
fuga_header = ""
# Generate headers separately
csv = ["#T/K,p/bar,molc/uc,mmol/g,stdev,",
"v/v,stdev,wt%,stdev,hoa/kcal/mol,stdev,",
guest_excess, he_excess, cv_header, fuga_header,
",".join("p(g%i)" % gidx for gidx in range(nguests)), "\n"]
info(guest.name)
info("---------------------------------------")
info("molc/uc mmol/g vstp/v hoa T_P")
info("======= ======= ======= ======= =======")
for tp_point in sorted(guest.uptake):
# <N>, sd, supercell
uptake = guest.uptake[tp_point]
uptake = [uptake[0]/uptake[2], uptake[1]/uptake[2]]
hoa = guest.hoa[tp_point]
# uptake in mmol/g
muptake = 1000*uptake[0]/self.structure.weight
muptake_stdev = 1000*uptake[1]/self.structure.weight
# volumetric uptake
vuptake = (guest.molar_volume*uptake[0]/
(6.023E-4*self.structure.volume))
vuptake_stdev = (guest.molar_volume*uptake[1]/
(6.023E-4*self.structure.volume))
# weight percent uptake
wtpc = 100*(1 - self.structure.weight/
(self.structure.weight + uptake[0]*guest.weight))
wtpc_stdev = 100*(1 - self.structure.weight/
(self.structure.weight + uptake[1]*guest.weight))
info("%7.2f %7.2f %7.2f %7.2f %s" % (
uptake[0], muptake, vuptake, hoa[0],
("T=%s" % tp_point[0] +
''.join(['P=%s' % x for x in tp_point[1]]))))
csv.append("%f,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f," % (
tp_point[0], tp_point[1][idx], uptake[0],
muptake, muptake_stdev,
vuptake, vuptake_stdev,
wtpc, wtpc_stdev,
hoa[0], hoa[1]))
if guest_excess:
guest_void = void_volume[guest.probe_radius]
n_bulk = (tp_point[1][idx]*guest_void)/(tp_point[0]*R_GAS)
xs_uptake = uptake[0]-n_bulk
# uptake in mmol/g
muptake = 1000*xs_uptake/self.structure.weight
# volumetric uptake
vuptake = (guest.molar_volume*xs_uptake/
(6.023E-4*self.structure.volume))
# weight percent uptake
wtpc = 100*(1 - self.structure.weight/
(self.structure.weight + xs_uptake*guest.weight))
csv.append("%f,%f,%f,%f," % (
xs_uptake, muptake, vuptake, wtpc,))
if he_excess:
guest_void = void_volume[1.0]
n_bulk = (tp_point[1][idx]*guest_void)/(tp_point[0]*R_GAS)
xs_uptake = uptake[0]-n_bulk
# uptake in mmol/g
muptake = 1000*xs_uptake/self.structure.weight
# volumetric uptake
vuptake = (guest.molar_volume*xs_uptake/
(6.023E-4*self.structure.volume))
# weight percent uptake
wtpc = 100*(1 - self.structure.weight/
(self.structure.weight + xs_uptake*guest.weight))
csv.append("%f,%f,%f,%f," % (
xs_uptake, muptake, vuptake, wtpc,))
if cv_header:
csv.append("%f,%f," % (guest.c_v[tp_point]))
if fuga_header:
try:
csv.append("%f," % (guest.fugacities[tp_point]))
except KeyError:
# Assume it was done without fugacity correction
csv.append("%f," % tp_point[1][idx])
# list all the other guest pressures and start a new line
csv.append(",".join("%f" % x for x in tp_point[1]) + "\n")
csv_filename = '%s-%s.csv' % (job_name, guest.ident)
csv_file = open(csv_filename, 'w')
csv_file.writelines(csv)
csv_file.close()
all_csvs[csv_filename] = "".join(csv)
info("======= ======= ======= ======= =======")
info("Structure properties")
# Internally calculated surface area
surf_area_results = self.structure.surface_area()
if surf_area_results:
info("Summary of faps surface areas")
info("========= ========= ========= =========")
info(" radius/A total/A^2 m^2/cm^3 m^2/g")
info("========= ========= ========= =========")
for probe, area in surf_area_results.items():
vol_area = 1E4*area/self.structure.volume
specific_area = NAVOGADRO*area/(1E20*self.structure.weight)
info("%9.3f %9.2f %9.2f %9.2f" %
(probe, area, vol_area, specific_area))
info("========= ========= ========= =========")
# Messy, but check individual properties that might not be there
# and dump them to the screen
info("weight (u): %f" % self.structure.weight)
if hasattr(self.structure, 'pore_diameter'):
info("pores (A): %f %f %f" % self.structure.pore_diameter)
channel_results = self.structure.sub_property('dimensionality')
if channel_results:
for probe, channels in channel_results.items():
info(("channels %.2f probe: " % probe) +
" ".join("%i" % x for x in channels))
# The table is copied from above as it does some calculating
surf_area_results = self.structure.sub_property('zeo_surface_area')
if surf_area_results:
info("Summary of zeo++ surface areas")
info("========= ========= ========= =========")
info(" radius/A total/A^2 m^2/cm^3 m^2/g")
info("========= ========= ========= =========")
for probe, area in surf_area_results.items():
vol_area = 1E4*area/self.structure.volume
specific_area = NAVOGADRO*area/(1E20*self.structure.weight)
info("%9.3f %9.2f %9.2f %9.2f" %
(probe, area, vol_area, specific_area))
info("========= ========= ========= =========")
info("volume (A^3): %f" % self.structure.volume)
void_volume_results = self.structure.sub_property('void_volume')
if surf_area_results:
info("Summary of zeo++ void volumes")
info("========= ========= ========= =========")
info(" radius/A total/A^3 fraction cm^3/g")
info("========= ========= ========= =========")
for probe, void in void_volume_results.items():
void_fraction = void/self.structure.volume
specific_area = NAVOGADRO*void/(1E24*self.structure.weight)
info("%9.3f %9.2f %9.5f %9.4f" %
(probe, void, void_fraction, specific_area))
info("========= ========= ========= =========")
pxrd = self.structure.sub_property('pxrd')
if pxrd:
info("Summary of PXRD; see properties for cpi file")
for probe, pattern in pxrd.items():
info("%s Powder XRD:" % probe)
plot = [['|']*21]
# 1000 points makes this 75 columns wide
averaged = [sum(pattern[x:x+20])/20.0
for x in range(0, 1000, 20)]
# make peaks horizontal first
peak = max(averaged)
for point in averaged:
height = int(round(15*point/peak))
plot.append([' ']*(15-height) + ['|']*height + ['-'])
# transpose for printing
plot = zip(*plot)
for line in plot:
info(''.join(line))
# Email at the end so everything is in the .flog
self.email(all_csvs)
def email(self, csvs=None):
"""Send an email, if one has not already been sent."""
job_name = self.options.get('job_name')
email_addresses = self.options.gettuple('email')
if email_addresses:
info("Emailing results to %s" % ", ".join(email_addresses))
else:
# nobody to email to, why bother?
return
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
# Construct an email, thanks documentation!
sender = '<NAME> <<EMAIL>>'
outer = MIMEMultipart()
outer['Subject'] = 'Results for faps job on %s' % job_name
outer['To'] = ', '.join(email_addresses)
outer['From'] = sender
outer.preamble = 'This is a MIME multipart message\n'
# Just attach all the csv files
if csvs is not None:
for csv in csvs:
msg = MIMEText(csvs[csv], _subtype='csv')
msg.add_header('Content-Disposition', 'attachment',
filename=csv)
outer.attach(msg)
# Include a cif file
msg_cif = MIMEText("".join(self.structure.to_cif()))
msg_cif.add_header('Content-Disposition', 'attachment',
filename="%s.faps.cif" % job_name)
outer.attach(msg_cif)
# And the flog file
try:
flog = open("%s.flog" % job_name)
msg_flog = MIMEText(flog.read())
flog.close()
msg_flog.add_header('Content-Disposition', 'attachment',
filename="%s.flog" % job_name)
outer.attach(msg_flog)
except IOError:
# Error reading the file, don't care
pass
# Send via local SMTP server
s = smtplib.SMTP('localhost')
s.sendmail(sender, email_addresses, outer.as_string())
s.quit()
def step_force_field(self):
"""Check the force field step of the calculation."""
end_after = False
if 'ff_opt' not in self.state:
self.state['ff_opt'] = (NOT_RUN, False)
if self.state['ff_opt'][0] not in [UPDATED, SKIPPED]:
if self.options.getbool('no_force_field_opt'):
info("Skipping force field optimisation")
self.state['ff_opt'] = (SKIPPED, False)
elif self.state['ff_opt'][0] == RUNNING:
job_state = self.job_handler.jobcheck(self.state['ff_opt'][1])
if not job_state:
info("Queue reports force field optimisation has finished")
self.state['ff_opt'] = (FINISHED, False)
else:
# Still running
info("Force field optimisation still in progress")
end_after = True
if self.state['ff_opt'][0] == NOT_RUN or 'ff_opt' in self.options.args:
jobid = self.run_ff_opt()
sys_argv_strip('ff_opt')
end_after = self.postrun(jobid)
self.dump_state()
if self.state['ff_opt'][0] == FINISHED:
self.structure.update_pos(self.options.get('ff_opt_code'),
options=self.options)
self.state['ff_opt'] = (UPDATED, False)
self.dump_state()
# If postrun is submitted then this script is done!
if end_after:
terminate(0)
def step_dft(self):
"""Check the DFT step of the calculation."""
end_after = False
if self.state['dft'][0] not in [UPDATED, SKIPPED]:
if self.options.getbool('no_dft'):
info("Skipping DFT step completely")
info("Job might fail later if you need the ESP")
self.state['dft'] = (SKIPPED, False)
elif self.state['dft'][0] == RUNNING:
job_state = self.job_handler.jobcheck(self.state['dft'][1])
if not job_state:
info("Queue reports DFT step has finished")
self.state['dft'] = (FINISHED, False)
else:
# Still running
info("DFT still in progress")
end_after = True
if self.state['dft'][0] == NOT_RUN or 'dft' in self.options.args:
jobid = self.run_dft()
sys_argv_strip('dft')
end_after = self.postrun(jobid)
self.dump_state()
if self.state['dft'][0] == FINISHED:
self.structure.update_pos(self.options.get('dft_code'))
self.state['dft'] = (UPDATED, False)
self.dump_state()
# If postrun is submitted then this script is done!
if end_after:
terminate(0)
def step_charges(self):
"""Check the charge step of the calculation."""
end_after = False
if self.state['charges'][0] not in [UPDATED, SKIPPED]:
if self.options.getbool('no_charges'):
info("Skipping charge calculation")
self.state['charges'] = (SKIPPED, False)
elif self.state['charges'][0] == RUNNING:
job_state = self.job_handler.jobcheck(self.state['charges'][1])
if not job_state:
info("Queue reports charge calculation has finished")
self.state['charges'] = (FINISHED, False)
else:
info("Charge calculation still running")
end_after = True
if self.state['charges'][0] == NOT_RUN or 'charges' in self.options.args:
jobid = self.run_charges()
sys_argv_strip('charges')
end_after = self.postrun(jobid)
self.dump_state()
if self.state['charges'][0] == FINISHED:
self.structure.update_charges(self.options.get('charge_method'),
self.options)
self.state['charges'] = (UPDATED, False)
self.dump_state()
# If postrun is submitted then this script is done!
if end_after:
terminate(0)
def step_gcmc(self):
"""Check the GCMC step of the calculation."""
end_after = False
jobids = {}
postrun_ids = []
if self.options.getbool('no_gcmc'):
info("Skipping GCMC simulation")
return
elif not self.state['gcmc'] or 'gcmc' in self.options.args:
# The dictionary is empty before any runs
info("Starting gcmc step")
jobids = self.run_fastmc()
sys_argv_strip('gcmc')
self.dump_state()
for tp_point, jobid in jobids.items():
if jobid is True:
self.state['gcmc'][tp_point] = (FINISHED, False)
elif jobid is False:
self.state['gcmc'][tp_point] = (SKIPPED, False)
else:
info("FastMC job in queue. Jobid: %s" % jobid)
self.state['gcmc'][tp_point] = (RUNNING, jobid)
postrun_ids.append(jobid)
# unfinished GCMCs
end_after = True
else:
# when the loop completes write out the state
self.dump_state()
for tp_point in self.state['gcmc']:
tp_state = self.state['gcmc'][tp_point]
if tp_state[0] == RUNNING:
new_state = self.job_handler.jobcheck(tp_state[1])
if not new_state:
info("Queue reports GCMC %s finished" % (tp_point,))
# need to know we have finished to update below
tp_state = (FINISHED, False)
self.state['gcmc'][tp_point] = tp_state
self.dump_state()
else:
info("GCMC %s still running" % (tp_point,))
# unfinished GCMC so exit later
end_after = True
# any states that need to be updated should have been done by now
if tp_state[0] == FINISHED:
startdir = os.getcwd()
# wooki seems slow to copy output files back
# so we give them a few chances to appear
max_attempts = 6
for attempt_count in range(max_attempts):
time.sleep(attempt_count)
try:
self.structure.update_gcmc(tp_point, self.options)
self.state['gcmc'][tp_point] = (UPDATED, False)
self.dump_state()
break
except IOError:
os.chdir(startdir)
else:
error('OUTPUT file never appeared')
if postrun_ids:
self.postrun(postrun_ids)
if end_after:
info("GCMC run has not finished completely")
terminate(0)
def step_properties(self):
"""Run the properties calculations if required."""
if self.state['properties'][0] not in [UPDATED, SKIPPED]:
if self.options.getbool('no_properties'):
info("Skipping all properties calculations")
self.state['properties'] = (SKIPPED, False)
if self.state['properties'][0] == NOT_RUN or 'properties' in self.options.args:
self.calculate_properties()
self.state['properties'] = (UPDATED, False)
self.dump_state()
def step_absl(self):
"""Check the binding site step of the calculation."""
end_after = False
jobids = {}
postrun_ids = []
# check for old simulations with no absl state
# TODO(tdaff): remove eventually
if 'absl' not in self.state:
self.state['absl'] = {}
if self.options.getbool('no_absl'):
info("Skipping ABSL calculation")
return
elif self.options.getbool('no_gcmc'):
info("no_gcmc requested, can't do ABSL, skipping")
return
elif not self.options.getbool('mc_probability_plot'):
info("No probability plot; Skipping ABSL calculation")
return
elif not self.options.getbool('find_maxima'):
info("No TABASCO maxima; Skipping ABSL calculation")
return
elif not self.state['absl'] or 'absl' in self.options.args:
# The dictionary is empty before any runs
info("Starting absl step")
jobids = self.run_absl()
sys_argv_strip('absl')
self.dump_state()
for tp_point, jobid in jobids.items():
if set(jobid) == set([True]):
self.state['absl'][tp_point] = (FINISHED, False)
elif set(jobid) == set([False]):
self.state['absl'][tp_point] = (SKIPPED, False)
else:
info("ABSL job in queue. Jobid: %s" % jobid)
self.state['absl'][tp_point] = (RUNNING, jobid)
postrun_ids.extend(jobid)
# unfinished ABSL calculations
end_after = True
else:
# when the loop completes write out the state
self.dump_state()
for tp_point in self.state['absl']:
tp_state = self.state['absl'][tp_point]
if tp_state[0] == RUNNING:
new_state = set([self.job_handler.jobcheck(job)
for job in tp_state[1]])
if new_state == set([False]):
info("Queue reports ABSL %s finished" % (tp_point,))
# need to know we have finished to update below
tp_state = (FINISHED, False)
self.state['absl'][tp_point] = tp_state
self.dump_state()
else:
info("ABSL %s still running" % (tp_point,))
# unfinished ABSL so exit later
end_after = True
# any states that need to be updated should have been done by now
if tp_state[0] == FINISHED:
startdir = os.getcwd()
# wooki seems slow to copy output files back
# so we give them a few chances to appear
max_attempts = 6
for attempt_count in range(max_attempts):
time.sleep(attempt_count)
try:
self.structure.update_absl(tp_point, self.options)
self.state['absl'][tp_point] = (UPDATED, False)
self.dump_state()
break
except IOError:
os.chdir(startdir)
else:
#TODO(tdaff): does this matter here?
error('ABSL output never appeared')
if postrun_ids:
self.postrun(postrun_ids)
if end_after:
info("ABSL run has not finished completely")
terminate(0)
def import_old(self):
"""Try and import any data from previous stopped simulation."""
job_name = self.options.get('job_name')
job_dir = self.options.get('job_dir')
try:
self.structure.from_file(
job_name,
self.options.get('initial_structure_format'),
self.options)
warning("Ensure that order_atom_types is on for pre-1.4 data")
if self.options.getbool('order_atom_types'):
info("Forcing atom re-ordering by types")
self.structure.order_by_types()
self.state['init'] = (UPDATED, False)
except IOError:
info("No initial structure found to import")
try:
self.structure.update_pos(self.options.get('ff_opt_code'))
self.state['ff_opt'] = (UPDATED, False)
except IOError:
info("No force field optimised structure found to import")
try:
self.structure.update_pos(self.options.get('dft_code'))
self.state['dft'] = (UPDATED, False)
except IOError:
info("No optimized structure found to import")
try:
self.structure.update_charges(self.options.get('charge_method'),
self.options)
self.state['charges'] = (UPDATED, False)
except IOError:
info("No charges found to import")
# Need to generate supercell here on import so that it is set, and
# is based on the cell from dft, if changed
self.structure.gen_supercell(self.options)
guests = [Guest(x) for x in self.options.gettuple('guests')]
if not same_guests(self.structure.guests, guests):
info("Replacing old guests with %s" % " ".join(guest.ident for
guest in guests))
self.structure.guests = guests
else:
# use existing guests that may have data
debug("Retaining previous guests")
guests = self.structure.guests
temps = self.options.gettuple('mc_temperature', float)
presses = self.options.gettuple('mc_pressure', float)
indivs = self.options.gettuple('mc_state_points', float)
for tp_point in state_points(temps, presses, indivs, len(guests)):
try:
self.structure.update_gcmc(tp_point, self.options)
self.state['gcmc'][tp_point] = (UPDATED, False)
except (IOError, OSError):
info("GCMC point %s not found" % str(tp_point))
# Reset directory at end
os.chdir(job_dir)
def postrun(self, jobid):
"""Determine if we need the job handler to post submit itself."""
# update the job tracker
if jobid is not False and jobid is not True:
if self.options.getbool('run_all'):
debug('Submitting postrun script')
os.chdir(self.options.get('job_dir'))
self.job_handler.postrun(jobid)
return True
else:
debug('Postrun script not submitted')
return False
else:
return False
def run_ff_opt(self):
"""Prepare the system and run the selected force field optimisation."""
ff_opt_code = self.options.get('ff_opt_code')
info("Checking connectivity/types")
if self.structure.check_connectivity():
if self.options.getbool('infer_types_from_bonds'):
self.structure.gen_types_from_bonds()
else:
warning("No types; try with infer_types_from_bonds")
else:
info("Bonds and types, provided")
info("Running a %s calculation" % ff_opt_code)
if ff_opt_code == 'gromacs':
jobid = self.run_optimise_gromacs()
elif ff_opt_code == 'gulp':
jobid = self.run_optimise_gulp()
else:
critical("Unknown force field method: %s" % ff_opt_code)
terminate(91)
if jobid is True:
# job run and finished
self.state['ff_opt'] = (FINISHED, False)
else:
info("Running %s job in queue. Jobid: %s" % (ff_opt_code, jobid))
self.state['ff_opt'] = (RUNNING, jobid)
return jobid
def run_dft(self):
"""Select correct method for running the dft/optim."""
dft_code = self.options.get('dft_code')
info("Running a %s calculation" % dft_code)
if dft_code == 'vasp':
jobid = self.run_vasp()
elif dft_code == 'siesta':
jobid = self.run_siesta()
else:
critical("Unknown dft method: %s" % dft_code)
terminate(92)
# update the job tracker
#if jobid is False:
# self.state['dft'] = (NOT_SUBMITTED, False)
# submission skipped
if jobid is True:
# job run and finished
self.state['dft'] = (FINISHED, False)
else:
info("Running %s job in queue. Jobid: %s" % (dft_code, jobid))
self.state['dft'] = (RUNNING, jobid)
return jobid
def run_charges(self):
"""Select correct charge processing methods."""
chg_method = self.options.get('charge_method')
info("Calculating charges with %s" % chg_method)
if chg_method == 'repeat':
# Get ESP
self.esp_to_cube()
jobid = self.run_repeat()
elif chg_method == 'gulp':
jobid = self.run_qeq_gulp()
elif chg_method == 'egulp':
jobid = self.run_qeq_egulp()
else:
critical("Unknown charge calculation method: %s" % chg_method)
terminate(93)
# update the job tracker
if jobid is True:
# job run and finished
self.state['charges'] = (FINISHED, False)
else:
info("Running %s job in queue. Jobid: %s" % (chg_method, jobid))
self.state['charges'] = (RUNNING, jobid)
return jobid
## Methods for specific codes start here
def run_optimise_gromacs(self):
"""Run GROMACS to do a UFF optimisation."""
job_name = self.options.get('job_name')
optim_code = self.options.get('ff_opt_code')
g_verbose = self.options.getbool('gromacs_verbose')
# Run in a subdirectory
optim_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, optim_code))
mkdirs(optim_dir)
os.chdir(optim_dir)
debug("Running in %s" % optim_dir)
metal_geom = self.options.get('gromacs_metal_geometry')
gro_file, top_file, itp_file = self.structure.to_gromacs(metal_geom)
# We use default names so we don't have to specify
# anything extra on the command line
filetemp = open('conf.gro', 'w')
filetemp.writelines(gro_file)
filetemp.close()
filetemp = open('topol.top', 'w')
filetemp.writelines(top_file)
filetemp.close()
filetemp = open('topol.itp', 'w')
filetemp.writelines(itp_file)
filetemp.close()
filetemp = open('grompp.mdp', 'w')
filetemp.writelines(mk_gromacs_mdp(self.structure.cell, mode='bfgs',
verbose=g_verbose))
filetemp.close()
filetemp = open('pcoupl.mdp', 'w')
filetemp.writelines(mk_gromacs_mdp(self.structure.cell, mode='pcoupl',
verbose=g_verbose))
filetemp.close()
# prepare for simulation!
# Codes we need; comment out the trjconv if being quiet
grompp = self.options.get('grompp_exe')
mdrun = self.options.get('mdrun_exe')
if g_verbose:
trjconv = "echo 0 | %s" % self.options.get('trjconv_exe')
else:
trjconv = "#echo 0 | %s" % self.options.get('trjconv_exe')
# everything runs in a script -- to many steps otherwise
# only make the g96 file at the end so we can tell if it breaks
gromacs_faps = open('gromacs_faps', 'w')
gromacs_faps.writelines([
"#!/bin/bash\n\n",
"export OMP_NUM_THREADS=1\n\n",
"# preprocess first bfgs\n",
"%s -maxwarn 2 &>> g.log\n\n" % grompp,
"# bfgs step\n",
"%s -nt 1 &>> g.log\n\n" % mdrun,
"%s -o traject1.gro -f traj.trr &>> g.log\n" % trjconv,
"# overwrite with pcoupl step\n",
"%s -maxwarn 2 -t traj.trr -f pcoupl.mdp &>> g.log\n\n" % grompp,
"# pcoupl step\n",
"%s -nt 1 &>> g.log\n\n" % mdrun,
"%s -o traject2.gro -f traj.trr &>> g.log\n" % trjconv,
"# overwrite with final bfgs\n",
"%s -maxwarn 2 -t traj.trr &>> g.log\n\n" % grompp,
"# generate final structure\n",
"%s -nt 1 -c confout.g96 &>> g.log\n" % mdrun,
"%s -o traject3.gro -f traj.trr &>> g.log\n" % trjconv])
gromacs_faps.close()
os.chmod('gromacs_faps', 0o755)
# Leave the run to the shell
info("Generated gromcas inputs and run script")
if self.options.getbool('no_submit'):
info("GROMACS input files generated; skipping job submission")
jobid = False
else:
jobid = self.job_handler.submit(optim_code, self.options)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def run_optimise_gulp(self):
"""Run GULP to do a UFF optimisation."""
job_name = self.options.get('job_name')
optim_code = 'gulp'
terse = self.options.getbool('gulp_terse')
# put an opt in path to distinguish from the charge calculation
optim_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s_opt' % (job_name, optim_code))
mkdirs(optim_dir)
os.chdir(optim_dir)
debug("Running in %s" % optim_dir)
filetemp = open('%s.gin' % job_name, 'w')
filetemp.writelines(self.structure.to_gulp(optimise=True, terse=terse))
filetemp.close()
if 'GULP_LIB' not in os.environ:
warning("gulp library directory not set; optimisation might fail")
if self.options.getbool('no_submit'):
info("GULP input files generated; skipping job submission")
jobid = False
else:
jobid = self.job_handler.submit(optim_code, self.options,
input_file='%s.gin' % job_name)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def run_vasp(self):
"""Make inputs and run vasp job."""
job_name = self.options.get('job_name')
nproc = self.options.getint('vasp_ncpu')
# Keep things tidy in a subdirectory
dft_code = self.options.get('dft_code')
vasp_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, dft_code))
mkdirs(vasp_dir)
os.chdir(vasp_dir)
debug("Running in %s" % vasp_dir)
info("Running on %i nodes" % nproc)
filetemp = open("POSCAR", "w")
filetemp.writelines(self.structure.to_vasp(self.options))
filetemp.close()
esp_grid = self.esp_grid
#TODO(jlo): self.structure.types gives you each type
# e.g ['C', 'C', 'O'... ]
# self.options.get('...') to get charge or something set a default
# in default.ini
# calcualte nelect
filetemp = open("INCAR", "w")
if self.esp_reduced:
# Let VASP do the grid if we don't need to
filetemp.writelines(mk_incar(self.options, esp_grid=esp_grid))
else:
filetemp.writelines(mk_incar(self.options))
filetemp.close()
filetemp = open("KPOINTS", "w")
filetemp.writelines(mk_kpoints(self.options.gettuple('kpoints', int)))
filetemp.close()
potcar_types = unique(self.structure.types)
filetemp = open("POTCAR", "w")
potcar_dir = self.options.get('potcar_dir')
previous_type = ""
for at_type in self.structure.types:
if at_type == previous_type:
continue
# Try and get the preferred POTCARS
debug("Using %s pseudopotential for %s" %
(VASP_PSEUDO_PREF.get(at_type, at_type), at_type))
potcar_src = path.join(potcar_dir,
VASP_PSEUDO_PREF.get(at_type, at_type),
"POTCAR")
shutil.copyfileobj(open(potcar_src), filetemp)
previous_type = at_type
filetemp.close()
if self.options.getbool('no_submit'):
info("Vasp input files generated; skipping job submission")
# act as if job completed
jobid = False
else:
self.job_handler.env(dft_code, options=self.options)
jobid = self.job_handler.submit(dft_code, self.options)
# Tidy up at the end and pass on job id
os.chdir(self.options.get('job_dir'))
return jobid
def run_siesta(self):
"""Make siesta input and run job."""
job_name = self.options.get('job_name')
nproc = self.options.getint('siesta_ncpu')
# Keep things tidy in a subdirectory
dft_code = self.options.get('dft_code')
siesta_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, dft_code))
mkdirs(siesta_dir)
os.chdir(siesta_dir)
debug("Running in %s" % siesta_dir)
info("Running on %i nodes" % nproc)
filetemp = open('%s.fdf' % job_name, 'w')
filetemp.writelines(self.structure.to_siesta(self.options))
filetemp.close()
psf_types = unique(self.structure.types)
psf_dir = self.options.get('psf_dir')
for at_type in psf_types:
psf_atm = '%s.psf' % at_type
psf_src = path.join(psf_dir, psf_atm)
psf_dest = path.join(siesta_dir, psf_atm)
try:
if not path.exists(psf_atm):
os.symlink(psf_src, psf_dest)
# symlinks not available pre 3.2 on windows
except AttributeError:
shutil.copy(psf_src, siesta_dir)
filetemp.close()
if self.options.getbool('no_submit'):
info("Siesta input files generated; skipping job submission")
jobid = False
else:
# sharcnet does weird things for siesta
self.job_handler.env(dft_code, options=self.options)
jobid = self.job_handler.submit(dft_code, self.options,
input_file='%s.fdf' % job_name)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def run_qeq_gulp(self, fitting=False):
"""Run GULP to calculate charge equilibration charges."""
job_name = self.options.get('job_name')
qeq_code = 'gulp'
if fitting:
qeq_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s_fit' % (job_name, qeq_code))
else:
qeq_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, qeq_code))
mkdirs(qeq_dir)
os.chdir(qeq_dir)
debug("Running in %s" % qeq_dir)
qeq_dict = parse_qeq_params(self.options.gettuple('qeq_parameters'))
filetemp = open('%s.gin' % job_name, 'w')
filetemp.writelines(self.structure.to_gulp(qeq_fit=fitting, qeq_dict=qeq_dict))
filetemp.close()
if self.options.getbool('no_submit'):
info("GULP input files generated; skipping job submission")
jobid = False
elif fitting:
jobid = self.job_handler.submit(qeq_code, self.options,
input_file='%s.gin' % job_name)
info("Running GULP fitting job in queue. Jobid: %s" % jobid)
self.state['qeq_fit'] = (RUNNING, jobid)
else:
jobid = self.job_handler.submit(qeq_code, self.options,
input_file='%s.gin' % job_name)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def run_qeq_egulp(self):
"""Run EGULP to calculate charge equilibration charges."""
job_name = self.options.get('job_name')
qeq_code = 'egulp'
qeq_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, qeq_code))
typed_atoms = self.options.getbool('egulp_typed_atoms')
mkdirs(qeq_dir)
os.chdir(qeq_dir)
debug("Running in %s" % qeq_dir)
filetemp = open('%s.geo' % job_name, 'w')
filetemp.writelines(self.structure.to_egulp(typed_atoms))
filetemp.close()
# EGULP defaults to GULP parameters if not specified
egulp_parameters = self.options.gettuple('qeq_parameters')
if 'mepo' in egulp_parameters:
from parameters import mepo_qeq
info("Using MEPO-QEq base parameters")
egulp_parameters = [x for x in egulp_parameters if x != 'mepo']
for element, parameters in mepo_qeq.items():
# Put MEPO parameters at the beginning so they can be
# overridden
plist = [element, parameters[0], parameters[1]]
egulp_parameters = plist + egulp_parameters
if not egulp_parameters:
# parameters are mandatory in new egulp
egulp_parameters = ('H', QEQ_PARAMS['H'][0], QEQ_PARAMS['H'][1])
else:
info("Custom EGULP parameters selected")
filetemp = open('%s.param' % job_name, 'w')
filetemp.writelines(mk_egulp_params(egulp_parameters))
filetemp.close()
filetemp = open('%s.ini' % job_name, 'w')
filetemp.writelines(mk_egulp_ini(self.options))
filetemp.close()
egulp_args = ['%s.geo' % job_name,
'%s.param' % job_name,
'%s.ini' % job_name]
if self.options.getbool('no_submit'):
info("EGULP input files generated; skipping job submission")
jobid = False
else:
jobid = self.job_handler.submit(qeq_code, self.options,
input_args=egulp_args)
# Tidy up at the end
os.chdir(self.options.get('job_dir'))
return jobid
def esp_to_cube(self):
"""Make the cube for repeat input."""
job_name = self.options.get('job_name')
# No case where the esp source will be different from the dft code
esp_src = self.options.get('dft_code')
repeat_dir = path.join(self.options.get('job_dir'),
'faps_%s_repeat' % job_name)
mkdirs(repeat_dir)
src_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, esp_src))
os.chdir(src_dir)
if esp_src == 'vasp':
esp_to_cube_args = shlex.split(self.options.get('vasp_to_cube'))
info("Converting vasp esp to cube, this might take a minute...")
try:
fix_vasp_wrapped_types('LOCPOT')
except IOError:
error("Couldn't find the LOCPOT file; did VASP fail?")
submit = subprocess.Popen(esp_to_cube_args)
submit.wait()
# Cube should have job_name, but can get truncated;
# therefore we try to look for it first
cube_file = glob('*.cube')
if len(cube_file) == 1:
cube_file = cube_file[0]
elif len(cube_file) > 1:
cube_file = cube_file[0]
warning("More or than one .cube found; using %s" % cube_file)
else:
error("No cube files found; check vasp_to_cube output")
# Move it to the repeat directory and give a proper name
move_and_overwrite(cube_file,
path.join(repeat_dir, job_name + '.cube'))
unneeded_files = self.options.gettuple('vasp_delete_files')
remove_files(unneeded_files)
keep_files = self.options.gettuple('vasp_compress_files')
compress_files(keep_files)
elif esp_src == 'siesta':
esp_to_cube_args = shlex.split(self.options.get('siesta_to_cube'))
esp_grid = self.esp_grid
info("Generating ESP grid of %ix%ix%i" % esp_grid)
siesta_to_cube_input = [
"%s\n" % job_name,
"%f %f %f\n" % (0.0, 0.0, 0.0),
"%i %i %i\n" % esp_grid]
info("Converting siesta esp to cube, this might take a minute...")
submit = subprocess.Popen(esp_to_cube_args, stdin=subprocess.PIPE)
submit.communicate(input=''.join(siesta_to_cube_input))
move_and_overwrite(job_name + '.cube', repeat_dir)
unneeded_files = self.options.gettuple('siesta_delete_files')
remove_files(unneeded_files)
keep_files = self.options.gettuple('siesta_compress_files')
compress_files(keep_files)
os.chdir(self.options.get('job_dir'))
def run_repeat(self):
"""Submit the repeat calc to the queue."""
job_name = self.options.get('job_name')
charge_code = self.options.get('charge_method')
repeat_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, charge_code))
mkdirs(repeat_dir)
os.chdir(repeat_dir)
if self.options.getbool('symmetry'):
mk_repeat(cube_name=job_name + '.cube', symmetry=True)
mk_connectivity_ff(self.structure.symmetry_tree)
else:
mk_repeat(cube_name=job_name + '.cube', symmetry=False)
if self.options.getbool('no_submit'):
info("REPEAT input files generated; skipping job submission")
jobid = False
else:
jobid = self.job_handler.submit(charge_code, self.options)
os.chdir(self.options.get('job_dir'))
return jobid
def run_fastmc(self):
"""Submit a fastmc job to the queue."""
job_name = self.options.get('job_name')
mc_code = self.options.get('mc_code')
# Set the guests before generating the files
# Load here as options may change in each run
# and before changing directory, or it will not find guests.lib
guests = [Guest(x) for x in self.options.gettuple('guests')]
if not same_guests(self.structure.guests, guests):
info("Replacing old guests with %s" % " ".join(guest.ident for
guest in guests))
self.structure.guests = guests
else:
# use existing guests that may have data
debug("Retaining previous guests")
guests = self.structure.guests
gcmc_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, mc_code))
mkdirs(gcmc_dir)
os.chdir(gcmc_dir)
config, field = self.structure.to_config_field(self.options, fastmc=True)
filetemp = open("CONFIG", "w")
filetemp.writelines(config)
filetemp.close()
filetemp = open("FIELD", "w")
filetemp.writelines(field)
filetemp.close()
temps = self.options.gettuple('mc_temperature', float)
presses = self.options.gettuple('mc_pressure', float)
indivs = self.options.gettuple('mc_state_points', float)
jobids = {}
for tp_point in state_points(temps, presses, indivs, len(guests)):
temp = tp_point[0]
press = tp_point[1]
info("Running GCMC: T=%.1f " % temp +
" ".join(["P=%.2f" % x for x in press]))
tp_path = format_tp_path(tp_point)
mkdirs(tp_path)
os.chdir(tp_path)
try_symlink(path.join('..', 'CONFIG'), 'CONFIG')
try_symlink(path.join('..', 'FIELD'), 'FIELD')
filetemp = open("CONTROL", "w")
# Calculate fugacities for the input if required
if self.options.get('equation_of_state').lower() == 'peng-robinson':
info("Using Peng-Robinson EOS gas fugacities")
ideal = {}
for guest, pressure in zip(guests, press):
if not hasattr(guest, 'species'):
try:
guest.species = Guest(guest.ident).species
except AttributeError:
error("Unable to use equation of state with guest"
"%s. Failure imminent." % guest.name)
if not hasattr(guest, 'fugacities'):
guest.fugacities = {}
ideal[guest.species] = pressure
# Apply the correction
fugacities = peng_robinson(ideal, temp)
fuga = []
for guest, pressure in zip(guests, press):
info("Fugacity correction for %s: %f bar -> %f bar" %
(guest.ident, pressure, fugacities[guest.species]))
fuga.append(fugacities[guest.species])
guest.fugacities[tp_point] = fugacities[guest.species]
# Expects single guest not in a list
if len(guests) == 1:
fuga = fuga[0]
else:
info("Using ideal gas fugacities")
for guest, pressure in zip(guests, press):
guest.fugacities[tp_point] = pressure
fuga = press
# make control with fugacities
filetemp.writelines(mk_gcmc_control(temp, fuga, self.options,
guests, self.structure.gcmc_supercell))
filetemp.close()
if self.options.getbool('no_submit'):
info("FastMC input files generated; "
"skipping job submission")
jobids[(temp, press)] = False
else:
jobid = self.job_handler.submit(mc_code, self.options)
jobids[(temp, press)] = jobid
os.chdir('..')
os.chdir(self.options.get('job_dir'))
return jobids
def run_absl(self):
"""Submit absl jobs to the queue."""
job_name = self.options.get('job_name')
guests = self.structure.guests
# fun in the gcmc directories
absl_dir = path.join(self.options.get('job_dir'),
'faps_%s_%s' % (job_name, 'absl'))
mkdirs(absl_dir)
os.chdir(absl_dir)
temps = self.options.gettuple('mc_temperature', float)
presses = self.options.gettuple('mc_pressure', float)
indivs = self.options.gettuple('mc_state_points', float)
jobids = {}
for tp_point in state_points(temps, presses, indivs, len(guests)):
temp = tp_point[0]
press = tp_point[1]
info("Running ABSL: T=%.1f " % temp +
" ".join(["P=%.2f" % x for x in press]))
tp_path = format_tp_path(tp_point)
mkdirs(tp_path)
os.chdir(tp_path)
# make the dummy;
dummy_guest = self.structure.guests[0]
dummy_include = {dummy_guest.ident: [[[x, 0.0, 0.0] for x in
range(dummy_guest.natoms)]]}
with open("CONFIG", "w") as config:
with open("FIELD", "w") as field:
dlp_files = self.structure.to_config_field(
self.options, include_guests=dummy_include, dummy=True)
config.writelines(dlp_files[0])
field.writelines(dlp_files[1])
with open("CONTROL", "w") as control:
control.writelines(mk_dl_poly_control(self.options, dummy=True))
# Keep track of directories so that we can run jobs at once
individual_directories = ['.']
# calculate binding sites here and submit
for guest in self.structure.guests:
binding_sites = calculate_binding_sites(guest, tp_point,
self.structure.cell)
if hasattr(guest, 'binding_sites'):
guest.binding_sites[tp_point] = binding_sites
else:
guest.binding_sites = {tp_point: binding_sites}
for bs_idx, binding_site in enumerate(binding_sites):
bs_directory = "%s_bs_%04d" % (guest.ident, bs_idx)
mkdirs(bs_directory)
os.chdir(bs_directory)
include_guests = {guest.ident: [guest.aligned_to(*binding_site)]}
dlp_files = self.structure.to_config_field(
self.options, include_guests=include_guests)
with open("CONFIG", "w") as config:
config.writelines(dlp_files[0])
if bs_idx > 0:
# symlink on FIELD to save space
zero_directory = "%s_bs_%04d" % (guest.ident, 0)
try_symlink(path.join('..', zero_directory, 'FIELD'),
'FIELD')
try_symlink(path.join('..', zero_directory, 'CONTROL'),
'CONTROL')
else:
# Always put the FIELD and CONTORL in zero to symlink to
with open("FIELD", "w") as field:
field.writelines(dlp_files[1])
with open("CONTROL", "w") as control:
control.writelines(mk_dl_poly_control(self.options))
individual_directories.append(bs_directory)
os.chdir('..')
# Make the script to run all the jobs now, using the individual
# directories
dl_poly_exe = self.options.get('dl_poly_exe')
# Try and delete REVIVE files while running the DL_POLY jobs
# we need to keep a few for processing, like OUTPUT, REVCON, CONFIG
# STATIS and FIELD and CONTROL will hopefully be symlinks, so we
# can't delete them, but REVIVE is never needed
absl_delete_files = self.options.get('absl_delete_files')
if 'REVIVE' in absl_delete_files or '*_bs_*' in absl_delete_files:
rm_line = 'rm REVIVE\n'
else:
rm_line = ''
absl_script = ["#!/bin/bash\n\n", "export FORT_BUFFERED=true\n\n",
"export OMP_NUM_THREADS=1\n\n"]
for directory in individual_directories:
absl_script.extend(["pushd %s > /dev/null\n" % directory,
"%s\n" % dl_poly_exe,
rm_line,
"popd > /dev/null\n"])
absl_faps = open('absl_faps', 'w')
absl_faps.writelines(absl_script)
absl_faps.close()
os.chmod('absl_faps', 0o755)
# Submit this script
if self.options.getbool('no_submit'):
info("ABSL input files generated; skipping job submission")
jobids[(temp, press)] = [False]
else:
jobid = self.job_handler.submit('absl', self.options)
jobids[(temp, press)] = [jobid]
os.chdir('..')
os.chdir(self.options.get('job_dir'))
return jobids
def calculate_properties(self):
"""Calculate general structural properties."""
job_name = self.options.get('job_name')
job_dir = self.options.get('job_dir')
props_dir = path.join(job_dir, 'faps_%s_properties' % job_name)
mkdirs(props_dir)
os.chdir(props_dir)
# Neighbour list is only used by surface area, uncomment if needed
# for anything else
#self.structure.gen_neighbour_list()
# Since this runs before fastmc, and can run without it, check if the
# guests are initialised here
guests = [Guest(x) for x in self.options.gettuple('guests')]
if not same_guests(self.structure.guests, guests):
info("Replacing old guests with %s" % " ".join(guest.ident for
guest in guests))
self.structure.guests = guests
##
# Surface area calculations
##
surf_probes = self.options.gettuple('surface_area_probe', dtype=float)
for probe in surf_probes:
if self.structure.surface_area(probe) is None:
self.structure.surface_area(probe, value=self.calc_surface_area(probe))
# Neighbour list makes .niss too big; remove them
for atom in self.structure.atoms:
atom.neighbours = None
del atom.neighbours
# Zeoplusplus gives fast access to many properties
if self.options.getbool('zeo++'):
try:
self.calculate_zeo_properties()
except (OSError, IOError):
error("Error running zeo++; skipping")
# PLATON can calculate the PXRD pattern
if self.options.getbool('platon_pxrd'):
try:
self.calculate_pxrd()
except (OSError, IOError):
error("Error running platon; skipping")
os.chdir(job_dir)
def calculate_zeo_properties(self):
"""Run the zeo++ and update properties with no error trapping."""
job_name = self.options.get('job_name')
zeofiles = self.structure.to_zeoplusplus()
filetemp = open("%s.cssr" % job_name, 'w')
filetemp.writelines(zeofiles[0])
filetemp.close()
filetemp = open("%s.rad" % job_name, 'w')
filetemp.writelines(zeofiles[1])
filetemp.close()
filetemp = open("%s.mass" % job_name, 'w')
filetemp.writelines(zeofiles[2])
filetemp.close()
probes = set([1.0]) # Always have a helium probe
for guest in self.structure.guests:
if hasattr(guest, 'probe_radius'):
probes.add(guest.probe_radius)
zeo_exe = shlex.split(self.options.get('zeo++_exe'))
zeo_exe += ['-mass', '%s.mass' % job_name, '-r', '%s.rad' % job_name]
cssr_file = ['%s.cssr' % job_name]
# incuded sphere, free sphere, included sphere along free path
zeo_command = zeo_exe + ['-res'] + cssr_file
info("Running zeo++ pore diameters")
debug("Running command: '" + " ".join(zeo_command) + "'")
zeo_process = subprocess.Popen(zeo_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
zeo_process.wait()
zeo_stderr = " ".join(x.strip() for x in zeo_process.stderr.readlines())
debug(zeo_stderr)
if "Voronoi volume check failed" in zeo_stderr:
warning("Structure is likely bad; zeo++ is unable to complete")
warning(zeo_stderr)
self.structure.bad_structure = True
res_file = open('%s.res' % job_name).read().split()
self.structure.pore_diameter = tuple(float(x) for x in res_file[1:])
atom_samples = '%i' % 2000
volume_samples = '%i' % (20*self.structure.cell.volume)
for probe in probes:
zeo_command = zeo_exe + [
'-chan', '%f' % probe,
'-sa', '%f' % probe, '%f' % probe, atom_samples,
'-vol', '%f' % probe, '%f' % probe, volume_samples] + cssr_file
debug("Running command: '" + " ".join(zeo_command) + "'")
zeo_process = subprocess.Popen(zeo_command, stdout=subprocess.PIPE)
zeo_process.wait()
# channel dimensionality
channels = [int(x) for x in open('%s.chan' % job_name).read().split()[5:]]
self.structure.sub_property('dimensionality', probe, channels)
# surface area
for line in open('%s.sa' % job_name):
if 'A^2' in line:
self.structure.sub_property('zeo_surface_area', probe,
value=float(line.split()[-1]))
# accessible volume
for line in open('%s.vol' % job_name):
if 'A^3' in line:
self.structure.sub_property('void_volume', probe,
value=float(line.split()[-1]))
def calculate_pxrd(self):
"""Run platon PXRD and update properties with no error trapping."""
job_name = self.options.get('job_name')
out_cif = self.structure.to_cif()
filetemp = open("%s.faps.cif" % job_name, 'w')
filetemp.writelines(out_cif)
filetemp.close()
platon_exe = self.options.get('platon_exe')
platon_cmd = [platon_exe, '-Q', '-o', '%s.faps.cif' % job_name]
info("Running PLATON PXRD")
debug("Running command: '" + " ".join(platon_cmd) + "'")
platon_process = subprocess.Popen(platon_cmd, stdout=subprocess.PIPE)
platon_process.wait()
cpi_file = open('%s.faps.cpi' % job_name).readlines()
probe = cpi_file[4].strip() # source metal, e.g. Cu, Mo
try:
xrd = [int(x) for x in cpi_file[10:]]
self.structure.sub_property('pxrd', probe=probe, value=xrd)
except ValueError:
warning("PXRD gave weird result, check structure")
# These are big and useless?
remove_files(['%s.faps.lis' % job_name, '%s.faps.eld' % job_name,
'%s.faps.ps' % job_name])
@property
def esp_grid(self):
"""Estimate the esp grid based on resolution and memory."""
# If repeat is unsing double precision, use 4 for single
repeat_prec = 8
# User defined resolution, try to use this
resolution = self.options.getfloat('esp_resolution')
repeat_ncpu = self.options.getint('repeat_ncpu')
if repeat_ncpu == 1:
vmem = self.options.getfloat('serial_memory')
else:
vmem = self.options.getfloat('threaded_memory')
# Nice even grids might scale better in parallel repeat
esp_grid = tuple([int(4*np.ceil(x/(4*resolution)))
for x in self.structure.cell.params[:3]])
memory_guess = prod(esp_grid)*self.structure.natoms*repeat_prec/1e9
self._esp_reduced = False
if memory_guess > vmem:
warning("ESP at this resolution might need up to %.1f GB of "
"memory but calculation will only request %.1f" %
(memory_guess, vmem))
resolution = resolution/pow(vmem/memory_guess, 1.0/3)
esp_grid = tuple([int(4*np.ceil(x/(4*resolution)))
for x in self.structure.cell.params[:3]])
warning("Reduced grid to %.2f A resolution to fit" % resolution)
self._esp_reduced = True
elif resolution != 0.1:
# VASP defaults to grids of around 0.1, so check if user has
# requested a reduced grid
info("User requested esp resolution %f" % resolution)
self._esp_reduced = True
self._esp_grid = esp_grid
return esp_grid
@property
def esp_reduced(self):
"""Has the esp been reduced to fit the memory requirements?"""
if not hasattr(self, '_esp_reduced'):
# generate the esp and check memory requirements
self.esp_grid
return self._esp_reduced
def calc_surface_area(self, rprobe=0.0):
"""Accessible surface area by uniform or Monte Carlo sampling."""
self.structure.gen_neighbour_list()
xyz = []
resolution = self.options.getfloat('surface_area_resolution')
uniform = self.options.getbool('surface_area_uniform_sample')
info("Calculating surface area: %.3f probe, %s points, %.3f res" %
(rprobe, ("random","uniform")[uniform], resolution))
total_area = 0.0
hydrophilic_area = 0.0
# gromacs default of 0.2 seems very constrained
hydrophilic_threshold = 0.3
cell = self.structure.cell.cell
inv_cell = np.linalg.inv(cell.T)
# Pre-calculate and organise the in-cell atoms
atoms = [(atom.ipos(cell, inv_cell).tolist(),
atom.ifpos(inv_cell),
atom.vdw_radius+rprobe,
atom.neighbours,
atom) for atom in self.structure.atoms]
# sigma is the vdw_radius plus distance to center of probe, which
# gives accessible surface area;
all_samples = []
for a1_pos, a1_fpos, a1_sigma, neighbours, atom in atoms:
surface_area = 4*pi*(a1_sigma**2)
nsamples = int(surface_area/resolution)
if not nsamples in all_samples:
debug("Atom type with %i samples" % nsamples)
all_samples.append(nsamples)
ncount = 0
if uniform:
# uniform spiral sample of surface
z_vals = np.linspace(1, -1, nsamples, endpoint=True)
r_vals = sqrt(1-z_vals**2)
t_vals = np.linspace(0, pi*(3-(5**0.5))*nsamples,
nsamples, endpoint=False)
points = array([r_vals*cos(t_vals),
r_vals*sin(t_vals),
z_vals]).transpose()*a1_sigma + a1_pos
else:
# random MC sampling
phi = 2*np.random.random(nsamples)*pi
costheta = np.random.random(nsamples)*2 - 1
theta = arccos(costheta)
points = array([sin(theta)*cos(phi),
sin(theta)*sin(phi),
cos(theta)]).transpose()*a1_sigma + a1_pos
# All points are brought into the cell
points = [dot(inv_cell, x) for x in points]
fpoints = np.mod(points, 1.0)
points = [list(
|
dot(x, cell)
|
numpy.dot
|
#Author: <NAME> (c) 2018
from collections import Counter
import dill
import glob
import gzip
import igraph as ig
import itertools
import leidenalg
#import magic
import matplotlib
from matplotlib import pyplot
import numba
import numpy
import os
import pickle
from plumbum import local
import random
import re
import scipy
from scipy.cluster import hierarchy
import scipy.sparse as sps
from scipy.spatial import distance
import scipy.stats as stats
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import TruncatedSVD
from sklearn import neighbors
from sklearn import metrics
import sys
import umap
def find_nearest_genes(peak_files, out_subdir, refseq_exon_bed):
#get unix utilities
bedtools, sort, cut, uniq, awk = local['bedtools'], local['sort'], local['cut'], local['uniq'], local['awk']
#process the peak files to find nearest genes
nearest_genes = []
for path in sorted(peak_files):
out_path = os.path.join(out_subdir, os.path.basename(path).replace('.bed', '.nearest_genes.txt'))
cmd = (bedtools['closest', '-D', 'b', '-io', '-id', '-a', path, '-b', refseq_exon_bed] |
cut['-f1,2,3,5,9,12'] | #fields are chrom, start, stop, peak sum, gene name, distance
awk['BEGIN{OFS="\t"}{if($6 > -1200){print($1, $2, $3, $6, $5, $4);}}'] |
sort['-k5,5', '-k6,6nr'] |
cut['-f5,6'])()
with open(out_path, 'w') as out:
prev_gene = None
for idx, line in enumerate(str(cmd).strip().split('\n')):
if prev_gene is None or not line.startswith(prev_gene):
# print(line)
line_split = line.strip().split()
prev_gene = line_split[0]
out.write(line + '\n')
nearest_genes.append(out_path)
return nearest_genes
def load_expr_db(db_path):
if os.path.basename(db_path) == 'RepAvgGeneTPM.csv':
with open(db_path) as lines_in:
db_headers = lines_in.readline().strip().split(',')[1:]
db_vals = numpy.loadtxt(db_path, delimiter=',', skiprows=1, dtype=object)[:,1:]
elif os.path.basename(db_path) == 'gexplore_l2_tissue_expr.jonathan_updated.txt.gz':
db_headers, db_vals = load_expr_db2(db_path)
else:
with open(db_path) as lines_in:
db_headers = lines_in.readline().strip().split('\t')
db_vals = numpy.loadtxt(db_path, delimiter='\t', skiprows=1, dtype=object)
print('Loaded DB shape: {!s}'.format(db_vals.shape))
return (db_headers, db_vals)
def load_expr_db2(db_path):
db_vals = numpy.loadtxt(db_path, delimiter='\t', skiprows=1, dtype=object)
gene_set = sorted(set(db_vals[:,0]))
tissue_set = sorted(set(db_vals[:,2]))
db_data = numpy.zeros((len(gene_set), len(tissue_set)))
gene_idx = None
gene_name = ''
gene_vals = []
tissue_idx = []
for idx in range(db_vals.shape[0]):
db_elt = db_vals[idx]
#rule out any genes with 95% CI lower bound of zero
if float(db_elt[6]) <= 0:
continue
if db_elt[0] != gene_name:
if gene_idx is not None:
db_data[gene_idx,tissue_idx] = gene_vals
gene_name = db_elt[0]
gene_idx = gene_set.index(gene_name)
gene_vals = []
tissue_idx = []
#use the bootstrap median value
gene_vals.append(float(db_elt[5]))
tissue_idx.append(tissue_set.index(db_elt[2]))
return (['gene_name'] + tissue_set,
numpy.hstack([numpy.array(gene_set, dtype=object)[:,None], db_data.astype(object)]))
TOPN=500
def get_gene_data(genes_path, gene_expr_db, topn=TOPN):
if isinstance(genes_path, list):
genes_list = genes_path
else:
with open(genes_path) as lines_in:
genes_list = [elt.strip().split()[:2] for elt in lines_in]
gene_idx = [(numpy.where(gene_expr_db[:,0] == elt[0])[0],elt[1]) for elt in genes_list]
gene_idx_sorted = sorted(gene_idx, key=lambda x:float(x[1]), reverse=True)
gene_idx, gene_weights = zip(*[elt for elt in gene_idx_sorted if len(elt[0]) > 0][:topn])
gene_idx = [elt[0] for elt in gene_idx]
gene_data = gene_expr_db[:,1:].astype(float)[gene_idx,:]
denom = numpy.sum(gene_data, axis=1)[:,None] + 1e-8
gene_norm = gene_data/denom
return gene_idx, gene_data, gene_norm, len(genes_list),
|
numpy.array(gene_weights, dtype=float)
|
numpy.array
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import numpy.linalg as la
import fenics
# CLASS: Bisection Mesh Refinement
class BisectionRefinement:
def __init__(self, polyhedron):
self.polyhedron = polyhedron
self.factor = 0.49 # must be < 0.5
self.R0 = self.calc_R0()
# converts the geometry corner c_k to fenics.Point(c_k)
def convert_to_fenics_format(self, z):
x1 = z[0]
x2 = z[1]
x3 = z[2]
return fenics.Point(x1, x2, x3)
# calculates R0 for corners
def calc_R0(self):
num_corners = len(self.polyhedron.corners)
R0 = np.zeros(num_corners)
for j in range(0, num_corners):
d_min = 1e+10
for i in range(0, num_corners):
if i != j:
d = la.norm(self.polyhedron.corners[i] - self.polyhedron.corners[j])
d_min = min(d_min, d)
R0[j] = d_min * self.factor
return R0
# area of triangle; Heron's formula
def area_triangle(self, a, b, c):
p = (a + b + c) / 2
area = np.sqrt(p * (p - a) * (p - b) * (p - c))
return area
# distance of given cell from edge
def distance_from_edge(self, cell, p1, p2, e):
vertices = cell.get_vertex_coordinates()
num_vertices = vertices.shape[0] // 3
d2e = np.zeros(num_vertices)
for k in range(num_vertices):
v = self.convert_to_fenics_format(vertices[k * 3:(k + 1) * 3])
r1 = fenics.Point.distance(v, p1)
r2 = fenics.Point.distance(v, p2)
triArea = self.area_triangle(e, r1, r2)
d2e[k] = 2 * triArea / e
# print(k, d2e[k])
return np.amin(d2e)
# uniform refinement
def uniform(self, h0, mesh):
while mesh.hmax() > h0:
cell_markers = fenics.MeshFunction("bool", mesh, mesh.topology().dim())
cell_markers.set_all(True)
mesh = fenics.refine(mesh, cell_markers)
return mesh
# local refinement
def local(self, deg, h0, mesh):
dim = 3 # space dimensions
TOL = fenics.DOLFIN_EPS_LARGE
singular_edges = self.polyhedron.edges[self.polyhedron.singular_edges - 1, :]
num_sin_edges = len(singular_edges)
# hmax_init = mesh.hmax()
for k in range(num_sin_edges):
if self.polyhedron.refine_flags[k] == 1:
v1 = singular_edges[k][0]
v2 = singular_edges[k][1]
p1 = self.polyhedron.corners[v1]
p2 = self.polyhedron.corners[v2]
R0 = (self.R0[v1] + self.R0[v2]) / 2
p1f = self.convert_to_fenics_format(p1)
p2f = self.convert_to_fenics_format(p2)
e = fenics.Point.distance(p1f, p2f)
gamma = 1. - self.polyhedron.refine_weights[k]
K0 = -np.log2(h0) * (2 * deg + dim) / (2 * gamma + dim - 2) - 1
K = np.ceil(K0)
NLocRef = int(dim * (K + 1) - 1)
print(gamma, K, NLocRef)
for l in range(NLocRef):
weight = 1. - gamma
expo = -2 * l * (deg + weight) / (dim * (2 * deg + dim))
h_min = h0 * np.power(2., expo)
R_max = R0 *
|
np.power(2., -l / dim)
|
numpy.power
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from saccader import utils
def _construct_images(batch_size):
image_shape = (50, 50, 3)
images = tf.convert_to_tensor(
np.random.randn(*((batch_size,) + image_shape)), dtype=tf.float32)
return images
def _construct_locations_list(batch_size, num_times):
locations_list = [
tf.convert_to_tensor(
np.random.rand(batch_size, 2) * 2 - 1, dtype=tf.float32)
for _ in range(num_times)
]
return locations_list
def _count_parameters(vars_list):
count = 0
for v in vars_list:
count += np.prod(v.get_shape().as_list())
return count
class UtilsTest(tf.test.TestCase, parameterized.TestCase):
def test_draw_bounding_boxes(self):
batch_size = 5
images = _construct_images(batch_size)
image_shape = tuple(images.get_shape().as_list()[1:])
num_times = 2
box_shape = (12, 12)
locations_list = _construct_locations_list(batch_size, num_times)
normalized_box_size = box_shape[0] / float(image_shape[0])
images_with_boxes = utils.draw_bounding_boxes(images, locations_list,
normalized_box_size)
self.assertEqual((batch_size,) + image_shape,
self.evaluate(images_with_boxes).shape)
def test_location_diversity(self):
# Zero distance and sqrt(2) distance cases.
locations_list = [
tf.convert_to_tensor([[1, 1]], tf.float32),
tf.convert_to_tensor([[1, 1]], tf.float32),
tf.convert_to_tensor([[0, 0]], tf.float32)
]
diversity = utils.location_diversity(locations_list)
expected_result = np.mean([np.sqrt(2.), np.sqrt(2.), 0])
self.assertAlmostEqual(self.evaluate(diversity), expected_result, 5)
def test_vectors_alignment(self):
# Aligned and orthogonal case
locations_list = [
tf.convert_to_tensor([[1, 1], [1, 1], [-1, 1]], tf.float32)
]
alignment = utils.vectors_alignment(locations_list)
expected_result = np.mean([1, 0, 0])
self.assertAlmostEqual(self.evaluate(alignment), expected_result, 5)
def test_normalize_range(self):
min_value = -2
max_value = 5
x = tf.convert_to_tensor(np.random.randn(100), dtype=tf.float32)
x = utils.normalize_range(x, min_value=min_value, max_value=max_value)
x = self.evaluate(x)
self.assertEqual(x.min(), min_value)
self.assertEqual(x.max(), max_value)
def test_position_channels(self):
corner_locations = [
(-1, -1), # Upper left.
(-1, 1), # Upper right.
(1, -1), # Lower left.
(1, 1), # Lower right.
]
batch_size = len(corner_locations)
images = _construct_images(batch_size)
channels = utils.position_channels(images)
# Corner positions.
upper_left = channels[0][0, 0] # Should be position [-1, -1].
upper_right = channels[1][0, -1] # Should be position [-1, 1].
lower_left = channels[2][-1, 0] # Should be position [1, -1].
lower_right = channels[3][-1, -1] # Should be position [1, 1].
corners = (upper_left, upper_right, lower_left, lower_right)
corner_locations = tf.convert_to_tensor(corner_locations, dtype=tf.float32)
glimpses = tf.image.extract_glimpse(
channels,
size=(1, 1),
offsets=corner_locations,
centered=True,
normalized=True)
# Check shape.
self.assertEqual(channels.shape.as_list(),
images.shape.as_list()[:-1] + [
2,
])
corners, glimpses, corner_locations = self.evaluate((corners, glimpses,
corner_locations))
glimpses = np.squeeze(glimpses)
# Check correct corners
self.assertEqual(tuple(corners[0]), tuple(corner_locations[0]))
self.assertEqual(tuple(corners[1]), tuple(corner_locations[1]))
self.assertEqual(tuple(corners[2]), tuple(corner_locations[2]))
self.assertEqual(tuple(corners[3]), tuple(corner_locations[3]))
# Check match with extract_glimpse function.
self.assertEqual(tuple(corners[0]), tuple(glimpses[0]))
self.assertEqual(tuple(corners[1]), tuple(glimpses[1]))
self.assertEqual(tuple(corners[2]), tuple(glimpses[2]))
self.assertEqual(tuple(corners[3]), tuple(glimpses[3]))
def test_index_to_normalized_location(self):
image_size = 40
ground_truth = np.array([[-1, -1], [-1, 1], [1, -1], [1, 1]],
dtype="float32")
corner_ixs = tf.constant(
[[0, 0], [0, image_size], [image_size, 0], [image_size, image_size]],
dtype=tf.int32)
normalized_locations = utils.index_to_normalized_location(
corner_ixs, image_size)
normalized_locations = self.evaluate(normalized_locations)
self.assertEqual(np.sum(np.abs(normalized_locations - ground_truth)), 0.)
@parameterized.named_parameters(
("uniform_noise_cover", True),
("zero_cover", False),
)
def test_location_guide(self, uniform_noise):
image_shape = (20, 20, 3)
image = tf.constant(np.random.randn(*image_shape), dtype=tf.float32)
image, location, blocked_indicator = utils.location_guide(
image,
image_size=20,
open_fraction=0.2,
uniform_noise=uniform_noise,
block_probability=0.5)
image, location, blocked_indicator = self.evaluate((image, location,
blocked_indicator))
self.assertEqual(image.shape, image_shape)
self.assertEqual(location.shape, (2,))
def test_extract_glimpse(self):
batch_size = 50
glimpse_shape = (8, 8)
images = _construct_images(batch_size)
location_scale = 1. - float(glimpse_shape[0]) / float(
images.shape.as_list()[1])
locations = tf.convert_to_tensor(
2 *
|
np.random.rand(batch_size, 2)
|
numpy.random.rand
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# registration.py
"""
Classes for registering point sets.
Based on:
- Myronenko and <NAME> - 2010 - Point Set Registration Coherent Point Drift
DOI: 10.1109/TPAMI.2010.46
Copyright (c) 2018, <NAME>
"""
import itertools
import numpy as np
import scipy.spatial as spatial
import scipy.spatial.distance as distance
import scipy.linalg as la
from skimage.transform._geometric import _umeyama
# plotting
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# get a logger
import logging
logger = logging.getLogger(__name__)
class BaseCPD(object):
"""Base class for the coherent point drift algorithm.
Based on:
Myronenko and <NAME> - 2010 - Point Set Registration Coherent Point Drift
DOI: 10.1109/TPAMI.2010.46
"""
def __init__(self, X: np.ndarray, Y: np.ndarray):
"""Set up the registration class that will actually perform the CPD algorithm.
Parameters
----------
X : ndarray (N, D)
Fixed point cloud, an N by D array of N original observations in an n-dimensional space
Y : ndarray (M, D)
Moving point cloud, an M by D array of N original observations in an n-dimensional space
"""
# save an internal copy so make sure nothings being mucked with
self.X = self._X = X
self.Y = self._Y = Y
# extract the dimensions
self.N, self.D = self.X.shape
self.M, D = self.Y.shape
assert D == self.D, "Point clouds have different dimensions"
assert self.N, "X has no points"
assert self.M, "Y has no points"
def __repr__(self) -> str:
"""Nice representation of the model."""
basestr = "Model = {}, X = {}, Y = {}".format(self.__class__, self.X.shape, self.Y.shape)
try:
extrastr = ", w = {}, i = {}, Q = {}, B = {}, t = {}".format(
self.w, self.iteration, self.Q, self.B, self.translation
)
except AttributeError:
extrastr = ", registration not run"
return basestr + extrastr
@property
def scale(self):
"""Return the estimated scale of the transformation matrix"""
return self.B.mean(axis=1)
@property
def matches(self) -> np.ndarray:
"""Return X, Y matches."""
return np.where(self.p_old > max(min(self.w, 0.9), np.finfo(float).eps))[::-1]
def _estimate(self):
"""Overload in the child classes."""
raise NotImplementedError
def estimate(self):
"""Estimate the simple transform for matching pairs."""
logger.debug(
"Doing a simple estimation of the transformation for {}".format(self.__class__)
)
self._estimate()
# this assumes it's being called from a child class
self.updateTY()
# the match matrix is just the identity matrix by definition
self.p_old = np.eye(self.N, self.M)
# these need to be filled in for the ploting and str function to work
self.iteration = "N/A"
self.w = 0
self.Q = "N/A"
def plot(self, only2d=False):
"""Plot the results of the registration."""
if self.X.shape[-1] > 1:
if self.X.shape[-1] > 2 and not only2d:
projection = "3d"
s = slice(None, 3)
else:
projection = None
s = slice(None, 2)
fig = plt.figure(figsize=(8, 4))
ax0 = fig.add_subplot(121, projection=projection)
ax1 = fig.add_subplot(122)
axs = (ax0, ax1)
ax0.scatter(*self.Y.T[s], marker=".", c="g")
ax0.scatter(*self.TY.T[s], marker="o", c="b")
ax0.scatter(*self.X.T[s], marker="x", c="r")
ax0.quiver(*self.Y.T[s], *(self.TY.T[s] - self.Y.T[s]), color="orange", pivot="tail")
if projection is None:
ax0.set_aspect("equal")
ax0.set_title(
"RMSE = {:.3f}, i = {}\ntvec = {}".format(
self.rmse, self.iteration, self.translation
)
)
else:
fig, ax1 = plt.subplots(1)
axs = (ax1,)
ax1.matshow(self.p_old)
ax1.set_aspect("auto")
ax1.set_title(
"Num pnts = {}, numcorr = {}".format(len(self.TY), (self.p_old > self.w).sum())
)
return fig, axs
def transform(self, other: np.ndarray) -> np.ndarray:
"""Transform `other` point cloud via the Y -> X registration."""
return other @ self.B.T + self.translation
def updateTY(self):
"""Update the transformed point cloud and distance matrix."""
self.TY = self.transform(self.Y)
# we need to update the distance matrix too
# This gives us a matrix of ||x - T(y)||**2, eq (1)
# But we want the rows to be m and columns n
self.dist_matrix = distance.cdist(self.TY, self.X, "sqeuclidean")
# make sure we have the right shape
assert self.dist_matrix.shape == (self.M, self.N), "Error with dist_matrix"
# these are defined in the paper but not used, included here for completeness
# @property
# def pGMM(self):
# """The probability density of the gaussian mixture model along the fixed points"""
# norm_factor = self.M * (2 * np.pi * self.var) ** (self.D / 2)
# p_mat = np.exp(- self.dist_matrix / 2 / self.var) / norm_factor
# # sum along the fixed points
# return p_mat.sum(0)
# @property
# def p(self):
# """The total probability including the uniform distribution"""
# return self.w / self.N + (1 - self.w) * self.pGMM
def estep(self):
"""Do expectation step were we calculate the posterior probability of the GMM centroids."""
# calculate "P_old" via equation 6
p_mat = np.exp(-self.dist_matrix / 2 / self.var)
c = (2 * np.pi * self.var) ** (self.D / 2)
c *= self.w / (1 - self.w)
c *= self.M / self.N
# sum along the moving points, i.e. along M
denominator = p_mat.sum(0, keepdims=True)
assert denominator.shape == (1, self.N), "Calculation of denominator failed {}".format(
denominator.shape
)
# check if denominator is all zeros, which means p_mat is all zeros
if (denominator <= np.finfo(float).eps).all():
# then the final p should just be a uniform distribution
# should log or warn user this is happening
logger.debug("P_mat is null, resetting to uniform probabilities")
p_old = np.ones_like(p_mat) / self.M
else:
if c < np.finfo(float).eps:
logger.debug("c is small, setting to eps")
c = np.finfo(float).eps
p_old = p_mat / (denominator + c)
# compute Np, make sure it's neither zero nor more than N
self.Np = min(self.N, max(p_old.sum(), np.finfo(float).eps))
# update Q so we can track convergence using equation (5)
self.Q = (p_old * self.dist_matrix).sum() / 2 / self.var + self.Np * self.D * np.log(
self.var
) / 2
# store p_old
self.p_old = p_old
def updateB(self):
"""Update B matrix.
This is the only method that needs to be overloaded for the various linear transformation subclasses,
more will need to be done for non-rigid transformation models.
"""
raise NotImplementedError
def mstep(self):
"""Maximization step.
Update transformation and variance these are the transposes of the equations on p. 2265 and 2266
"""
# calculate intermediate values
mu_x = (self.p_old @ self.X).sum(0, keepdims=True) / self.Np
mu_y = (self.p_old.T @ self.Y).sum(0, keepdims=True) / self.Np
assert mu_x.size == mu_y.size == self.D, "Dimensions on mu's are wrong"
Xhat = self.Xhat = self.X - mu_x
Yhat = self.Yhat = self.Y - mu_y
# calculate A
self.A = A = Xhat.T @ self.p_old.T @ Yhat
# calculate B
B = self.updateB()
# calculate translation
self.translation = mu_x - mu_y @ B.T
# calculate estimate of variance
self.var = np.trace(Xhat.T @ np.diag(self.p_old.sum(0)) @ Xhat) - np.trace(A @ B.T)
self.var /= self.Np * self.D
logger.debug("Variance is {}".format(self.var))
# make sure self.var is positive
if self.var < np.finfo(float).eps:
# self.var = np.finfo(float).eps
self.var = self.tol
logger.warning(
"Variance has dropped below machine precision, setting to {}".format(self.var)
)
# self.var = self.init_var = self.init_var * 2
# self.translation = -self.Y.mean(axis=0) + self.X.mean(axis=0)
# print("Var small resetting to", self.var)
def calc_var(self):
"""Calculate variance in transform."""
return self.dist_matrix.sum() / (self.D * self.N * self.M)
@property
def rmse(self):
"""Return RMSE between X and transformed Y."""
# need to weight the RMSE by the probability matrix ...
return np.sqrt((self.p_old * self.dist_matrix).mean())
# return np.sqrt(((self.X - self.TY)**2).sum(1)).mean()
def calc_init_scale(self):
"""Need to overloaded in child classes."""
raise NotImplementedError
def norm_data(self):
"""Normalize data to mean 0 and unit variance."""
# calculate mean displacement
logger.debug("Normalizing data")
self.ty = self.Y.mean(0, keepdims=True)
self.tx = self.X.mean(0, keepdims=True)
logger.debug("tx = {}, ty = {}".format(self.tx, self.ty))
# move point clouds
self._Y_orig = self.Y
self._X_orig = self.X
self.Y = self.Y - self.ty
self.X = self.X - self.tx
# calculate scale
self.calc_init_scale()
logger.debug("scale_x = {}, scale_y = {}".format(self.scale_x, self.scale_y))
# apply scale
Sx = np.diag(self.scale_x)
Sy = np.diag(self.scale_y)
Sx_1 = np.diag(1 / self.scale_x)
Sy_1 = np.diag(1 / self.scale_y)
self.Y = self.Y @ Sy
self.X = self.X @ Sx
self.translation = (self.ty @ self.B.T + self.translation - self.tx) @ Sx
logger.debug(f"B = {self.B}")
self.B = Sx @ self.B @ Sy_1
logger.debug(f"B = {self.B}")
def unnorm_data(self):
"""Undo the intial normalization."""
logger.debug("Undoing normalization")
logger.debug("tx = {}, ty = {}".format(self.tx, self.ty))
logger.debug("scale_x = {}, scale_y = {}".format(self.scale_x, self.scale_y))
Sx = np.diag(self.scale_x)
Sy = np.diag(self.scale_y)
Sx_1 = np.diag(1 / self.scale_x)
Sy_1 = np.diag(1 / self.scale_y)
# the scale matrices are diagonal so S.T == S
self.Y = self.Y @ Sy_1 + self.ty
self.X = self.X @ Sx_1 + self.tx
assert np.allclose(self.Y, self._Y_orig), "Failed to revert"
assert np.allclose(self.X, self._X_orig), "Failed to revert"
# B doesn't need to be transposed and
self.B = Sx_1 @ self.B @ Sy
self.translation = -self.ty @ self.B.T + self.translation @ Sx_1 + self.tx
def __call__(
self, tol=1e-6, dist_tol=0, maxiters=1000, init_var=None, weight=0, normalization=True
):
"""Perform the actual registration.
Parameters
----------
tol : float
dist_tol : float
Stop the iteration of the average distance between matching points is
less than this number. This is really only necessary for synthetic data
with no noise
maxiters : int
init_var : float
weight : float
B : ndarray (D, D)
translation : ndarray (1, D)
"""
# initialize transform
self.translation = np.ones((1, self.D))
self.B = np.eye(self.D)
self.tol = tol
# update to the initial position
if normalization:
self.norm_data()
self.updateTY()
# set up initial variance
if init_var is None:
init_var = self.calc_var()
self.var = self.init_var = init_var
logger.debug("self.init_var = {}".format(self.var))
# initialize the weight of the uniform distribution
assert 0 <= weight < 1, "Weight must be between 0 and 1"
self.w = weight
for self.iteration in range(maxiters):
# do iterations expectation, maximization followed by transformation
self.estep()
self.mstep()
self.updateTY()
if self.iteration > 0:
# now update Q to follow convergence
# we want to minimize Q so Q_old should be more positive than the new Q
Q_delta = np.abs(self.Q_old - self.Q) # / np.abs(self.Q_old)
if Q_delta < 0:
logger.warning("Q_delta = {}".format(Q_delta))
logger.debug("Q_delta = {}".format(Q_delta))
if Q_delta <= tol:
logger.info("Objective function converged, Q_delta = {:.3e}".format(Q_delta))
break
if self.rmse <= dist_tol:
logger.info("Average distance converged")
break
self.Q_old = self.Q
else:
logger.warning(
(
"Maximum iterations ({}) reached without" + " convergence, final Q = {:.3e}"
).format(self.iteration, self.Q)
)
# update p matrix once more
self.estep()
# unnorm the data and apply the final transformation.
if normalization:
self.unnorm_data()
self.updateTY()
return self.TY
class TranslationCPD(BaseCPD):
"""Coherent point drift with a translation only transformation model."""
def updateB(self):
"""Update step.
Translation only means that B should be identity.
"""
self.B = np.eye(self.D)
return self.B
def calc_init_scale(self):
"""For translation only we need to calculate a uniform scaling."""
anisotropic_scale = np.concatenate((self.X, self.Y)).std(0)
self.scale_x = self.scale_y = 1 / anisotropic_scale
def _estimate(self):
"""Estimate the translation transform."""
self.B, self.translation = np.eye(self.D), (self.X - self.Y).mean(0, keepdims=True)
class SimilarityCPD(BaseCPD):
"""Coherent point drift with a similarity (translation, rotation and isotropic scaling) transformation model."""
# this class is specifically designed so that it can be easily subclassed to represent
# a rigid transformation model.
def calculateR(self):
"""Calculate the estimated rotation matrix, eq. (9)."""
U, S, VT = la.svd(self.A)
c = np.ones_like(S)
c[-1] = la.det(U @ VT)
C = np.diag(c)
self.R = U @ C @ VT
return self.R
def calculateS(self):
"""Calculate the scale factor, Fig 2 p. 2266."""
a = self.Yhat.T @ np.diag(self.p_old.sum(1)) @ self.Yhat
self.s = np.trace(self.A.T @ self.R) / np.trace(a)
return self.s
def updateB(self):
"""Update B: in this case is just the rotation matrix multiplied by the scale factor."""
R = self.calculateR()
s = self.calculateS()
self.B = s * R
return self.B
def calc_init_scale(self):
"""Calculate scale: for similarity we have isotropic scaling for each point cloud."""
# we can prescale by the same anisotropic scaling factor we use in
# TranslationCPD and then augment it by an isotropic scaling factor
# for each point cloud.
anisotropic_scale = np.concatenate((self.X, self.Y)).std()
# self.scale_x = anisotropic_scale / self.X.var()
# self.scale_y = anisotropic_scale / self.Y.var()
# NOTE: the above doesn't work
self.scale_x = self.scale_y = 1 / np.array((anisotropic_scale, anisotropic_scale))
def _umeyama(self):
"""Calculate Umeyama: for similarity we want to have scaling."""
# the call signature for _umeyama is (src, dst)
# which is the reverse of ours
return _umeyama(self.Y, self.X, True)
def _estimate(self):
"""Estimate the similarity transform."""
T = self._umeyama()
D = self.D
# T is in the usual orientation
B = T[:D, :D]
translation = T[:D, -1:].T
assert np.allclose(
T[-1, :], np.concatenate((np.zeros(D), np.ones(1)))
), "Error, T = {}".format(T)
self.B, self.translation = B, translation
class RigidCPD(SimilarityCPD):
"""Coherent point drift with a rigid or Euclidean (translation and rotation) transformation model."""
def calculateS(self):
"""No scaling for this guy."""
return 1
def _umeyama(self):
"""For this class we want to have _umeyama without scaling."""
# the call signature for _umeyama is (src, dst)
# which is the reverse of ours
return _umeyama(self.Y, self.X, False)
# for rigid we also want to avoid anything other than uniform scaling
calc_init_scale = TranslationCPD.calc_init_scale
EuclideanCPD = RigidCPD
class AffineCPD(BaseCPD):
"""Coherent point drift with a similarity (translation, rotation, shear and anisotropic scaling) transformation model."""
def updateB(self):
"""Solve for B using equations in Fig. 3 p. 2266."""
a = self.Yhat.T @ np.diag(self.p_old.sum(1)) @ self.Yhat
# solve B = self.A @ np.inv(a) == B @ a = self.A == a.T @ B.T = self.A.T
# self.B = la.solve(a.T, self.A.T).T
# a is a symmetric matrix
self.B = la.solve(a, self.A.T).T
return self.B
def calc_init_scale(self):
"""Calculate scale."""
# For affine we have anisotropic scaling for each point cloud along each dimension
self.scale_x = 1 / self.X.std(0)
self.scale_y = 1 / self.Y.std(0)
def _estimate(self):
"""Estimate the affine transformation for a set of corresponding points."""
# affine is quite simple, we want to solve the equation A @ Y = X
# or Y.T @ A.T = X.T
# where Y and X are augmented matrices (an extra row of ones)
# https://en.wikipedia.org/wiki/Affine_transformation#Augmented_matrix
aug_X = np.hstack((self.X, np.ones((self.N, 1))))
aug_Y = np.hstack((self.Y, np.ones((self.N, 1))))
# pull the dimension out
D = self.D
# solve for matrix transforming Y to X
T, res, rank, s = la.lstsq(aug_Y, aug_X)
# remember that B = A.T (A not augmented)
B = T[:D, :D].T
# we want to keep the extra dimension for translation
translation = T[-1:, :D]
# make sure that the solution makes sense (last column should be 1 | 0)
assert np.allclose(
T[:, -1], np.concatenate((np.zeros(D), np.ones(1)))
), "Error\nT = {}\nX = {}\nY = {}".format(T, self.X, self.Y)
self.B, self.translation = B, translation
# a dictionary to choose models from
model_dict = {
"translation": TranslationCPD,
"rigid": RigidCPD,
"euclidean": EuclideanCPD,
"similarity": SimilarityCPD,
"affine": AffineCPD,
}
def choose_model(model):
"""Choose model if string."""
if isinstance(model, str):
model = model_dict[model.lower()]
elif not issubclass(model, BaseCPD):
raise ValueError("Model {} is not recognized".format(model))
return model
def auto_weight(X, Y, model, resolution=0.01, limits=0.05, **kwargs):
"""Automatically determine the weight to use in the CPD algorithm.
Parameters
----------
X : ndarray (N, D)
Fixed point cloud, an N by D array of N original observations in an n-dimensional space
Y : ndarray (M, D)
Moving point cloud, an M by D array of N original observations in an n-dimensional space
model : str or BaseCPD child class
The transformation model to use, available types are:
Translation
Rigid
Euclidean
Similarity
Affine
resolution : float
the resolution at which to sample the weights
limits : float or length 2 iterable
The limits of weight to search
kwargs : dictionary
key word arguments to pass to the model function when its called.
"""
# test inputs
model = choose_model(model)
try:
# the user has passed low and high limits
limit_low, limit_high = limits
except TypeError:
# the user has passed a single limit
limit_low = limits
limit_high = 1 - limits
# generate weights to test
ws = np.arange(limit_low, limit_high, resolution)
# container for various registrations
regs = []
# iterate through weights
for w in ws:
kwargs.update(weight=w)
reg = model(X, Y)
reg(**kwargs)
regs.append(reg)
# if the dimension of the data is less than 3 use the 1 norm
# else use the frobenius norm. This is a heuristic based on simulated data.
if reg.D < 3:
norm_type = 1
else:
norm_type = "fro"
# look at all the norms of the match matrices (The match matrix should be sparse
# and the norms we've chosen maximize sparsity)
norm = np.asarray([np.linalg.norm(reg.p_old, norm_type) for reg in regs])
# find the weight that maximizes sparsity
w = ws[norm.argmax()]
# update and run the model
kwargs.update(weight=w)
reg = model(X, Y)
reg(**kwargs)
# return the model to the user
return reg
def nearest_neighbors(fids0, fids1, r=100, transform=lambda x: x, coords=["x0", "y0"]):
"""Find nearest neighbors in both sets."""
idx00, idx01 = closest_point_matches(
fids0[coords].values, transform(fids1[coords].values), r=r
)
idx11, idx10 = closest_point_matches(
transform(fids1[coords].values), fids0[coords].values, r=r
)
fids0_filt = fids0.iloc[idx10]
fids1_filt = fids1.iloc[idx01]
idx0, idx1 = closest_point_matches(
fids0_filt[coords].values, transform(fids1_filt[coords].values), r=r
)
return fids0_filt.iloc[idx0], fids1_filt.iloc[idx1]
# def normed_rmse(reg):
# reg.norm_data()
# reg.updateTY()
# rmse = reg.rmse
# reg.unnorm_data()
# reg.updateTY()
# return rmse
def align(
fids0,
fids1,
atol=1,
rtol=1e-3,
diagnostics=False,
model="translation",
only2d=False,
iters=100,
):
"""Align two slabs fiducials, assumes that z coordinate has been normalized."""
model = choose_model(model)
def register(fids0_filt, fids1_filt, coords):
reg = model(fids0_filt[coords].values, fids1_filt[coords].values)
try:
reg.estimate()
except ValueError:
reg(weight=0.05)
return reg
def sub_func(rmse, transform, coords):
for i in range(iters):
r = max(rmse * 2, 1)
try:
fids0_filt, fids1_filt = nearest_neighbors(
fids0, fids1, r=r, transform=transform, coords=coords
)
except ValueError:
rmse *= 2
continue
reg = register(fids0_filt, fids1_filt, coords)
transform = reg.transform
rmse_new = reg.rmse
rmse_rel = (rmse - rmse_new) / rmse
if rmse_new < atol or rmse_rel < rtol:
break
rmse = rmse_new
else:
logger.error(
"{} failed, rmse = {}, rel = {}, i = {}".format(coords, rmse_new, rmse_rel, i)
)
logger.info(
"{} succeeded, rmse = {}, rel = {}, i = {}".format(coords, rmse_new, rmse_rel, i)
)
if diagnostics:
reg.plot()
return reg, fids0_filt, fids1_filt
reg2d, fids0_filt, fids1_filt = sub_func(50, lambda x: x, ["x0", "y0"])
if only2d:
return reg2d
new_transform = register(fids0_filt, fids1_filt, ["x0", "y0", "z0"]).transform
reg3d, _, _ = sub_func(reg2d.rmse, new_transform, ["x0", "y0", "z0"])
return reg3d
def closest_point_matches(X, Y, method="tree", **kwargs):
"""Keep determine the nearest neighbors in two point clouds.
Parameters
----------
X : ndarray (N, D)
Y : ndarray (M, D)
kwargs
------
r : float
The search radius for nearest neighbors
Returns
-------
xpoints : ndarray
indicies of points with neighbors in x
ypoints : ndarray
indicies of points with neighbors in y
"""
if method.lower() == "tree":
return _keepclosesttree(X, Y, **kwargs)
elif method.lower() == "brute":
return _keepclosestbrute(X, Y, **kwargs)
else:
raise ValueError("Method {} not recognized".format(method))
def _keepclosestbrute(X, Y, r=10, percentile=None):
# calculate the distance matrix
dist_matrix = distance.cdist(X, Y)
# if user requests percentile
if percentile is not None:
r = np.percentile(dist_matrix, percentile * (len(X) + len(Y)) / (len(X) * len(Y)))
logger.debug(
"r = {}, fraction pairs kept = {}".format(r, (dist_matrix < r).sum() / dist_matrix.size)
)
result = [np.unique(a) for a in np.where(dist_matrix < r)]
# log percentages
logger.debug(
"percentage x kept = {}, y kept = {}".format(
*[len(a) / len(aa) for a, aa in zip(result, (X, Y))]
)
)
return result
def _keepclosesttree(X, Y, r=10):
# build the trees
ytree = spatial.cKDTree(Y)
# find nearest neighbors to each point in X in Y, ii are the indices of ytree that match X
dd, ii = ytree.query(X)
# filter
filt = dd < r
xpoints = np.arange(len(X))[filt]
ypoints = ii[filt]
# check for duplicate indexs
uypoints = np.unique(ypoints)
uxpoints = np.unique(xpoints)
if uxpoints.size < xpoints.size or uypoints.size < ypoints.size:
logger.debug("taking unique points")
xpoints, ypoints = uxpoints, uypoints
logger.debug(
"percentage x kept = {}, y kept = {}".format(len(xpoints) / len(X), len(ypoints) / len(Y))
)
return xpoints, ypoints
# These are tools to propogate slab to slab alignment
def to_augmented(B, t):
"""Convert transform matrix and translation vector to an augmented transformation matrix.
https://en.wikipedia.org/wiki/Affine_transformation#Augmented_matrix
"""
# get the dimension of the transformation
d = len(B)
# build augmented matrix
aug_B =
|
np.zeros((d + 1, d + 1))
|
numpy.zeros
|
from functools import cached_property
import os.path
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
class MeasOsc:
"""Wavesurfer 3024z oscilloscope measurement files"""
def __init__(self, path: str):
data = pd.read_csv(
path,
header=None, skiprows=6,
names=["0", "1", "2", "t", "V"],
engine="c"
)
self.t = data["t"].to_numpy()
self.voltage = data["V"].to_numpy()
@property
def zero_level(self) -> float:
return self.voltage[:self.voltage.size//3].mean()
@property
def peak_height(self) -> float:
peak_ind =
|
np.argmax(self.voltage)
|
numpy.argmax
|
import basevcstest
import vcs
import numpy
class TestVCSBoxfill10x10Numpy(basevcstest.VCSBaseTest):
def testBoxfill10x10Numpy(self):
s = numpy.sin(numpy.arange(100))
s =
|
numpy.reshape(s, (10, 10))
|
numpy.reshape
|
import os
import math
import time
from datetime import datetime
import numpy as np
from sqlalchemy import create_engine
# global variable
RADIUS_EARTH = 6371e3
def get_env_variable(var_name):
'''Get the environment variable or return exception.'''
try:
return os.environ[var_name]
except KeyError:
error_msg = 'Set the {} environment variable'.format(var_name)
raise KeyError(error_msg)
def create_db_engine(user, password, host, port, db):
'''Return database engine from user, password, db, host and port'''
url_pattern = 'postgresql://{user}:{password}@{host}:{port}/{db}'
url = url_pattern.format(
user=user,
password=password,
host=host,
port=port,
db=db)
return create_engine(url, client_encoding='utf8')
def distance_two_dimensions_coordinates(this_coordinate, that_coordinate):
'''Measure distance in meters between two 2-d points (latitude, longitude).
Also measure distance in meters between two 3-d points (latitude, longitude, altitude).
For variables naming refer to: https://www.movable-type.co.uk/scripts/latlong.html'''
lat1, lon1, *_rest = this_coordinate
lat2, lon2, *_rest = that_coordinate
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
lat1, lat2 = math.radians(lat1), math.radians(lat2)
a = math.sin(dlat/2)**2 + (
math.cos(lat1) * math.cos(lat2) * (math.sin(dlon/2)**2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = RADIUS_EARTH * c
return d
def distance_three_dimensions_coordinates(this_coordinate, that_coordinate):
'''Measure distance in meters between two 3-d points (latitude, longitude, altitude).
For variables naming refer to: https://www.movable-type.co.uk/scripts/latlong.html'''
this_rect_coordinate = get_cartesian_coordinates(this_coordinate)
that_rect_coordinate = get_cartesian_coordinates(that_coordinate)
distance = np.linalg.norm(
np.array(this_rect_coordinate)-
|
np.array(that_rect_coordinate)
|
numpy.array
|
import numpy as np
from typing import Union
from numpy import vectorize, ndarray
def getOpticsKernels(name: str, alpha: float = 1.):
"""
#TODO: docs
Set of kernels function from physical optics
Parameters
----------
name : {"rectangular", "diffraction", "gaussian", "triangular", "dispersive"}
name of kernels
alpha : float
parameter of kernels
Returns
-------
"""
if name=="rectangular":
return lambda x,y:rectangular(x-y, alpha)
elif name=="diffraction":
return lambda x, y: diffraction(x - y, alpha)
elif name=="gaussian":
return lambda x, y: gaussian(x - y, alpha)
elif name=="triangular":
return lambda x, y: triangular(x - y, alpha)
elif name=="dispersive":
return lambda x, y: dispersive(x - y, alpha)
elif name=="exponential":
return lambda x, y: exponential(x - y, alpha)
else:
raise ValueError('Bad name of kernel')
## Набор дифферернциальных ядер из оптики
# щелеобразная
def rectangular(x : Union[ndarray, float], alpha: float = 1.) -> Union[ndarray, float]:
if type(x) != np.ndarray:
if (np.abs(x)/alpha < 0.5):
return 1./alpha
else:
return 0.
else:
indx = np.abs(x)/alpha < 0.5
return (indx)/alpha
# дифракционная
def diffraction(x : Union[ndarray, float], alpha: float = 1.) -> Union[ndarray, float]:
s0 = alpha/0.886
res = (np.sin(np.pi * x / s0) / (np.pi * x / s0)) ** 2 / (s0)
return res
# гауссова
def gaussian(x : Union[ndarray, float], alpha: float = 1.) -> Union[ndarray, float]:
return (2/alpha)*np.sqrt(np.log(2)/np.pi)*np.exp(-4*
|
np.log(2)
|
numpy.log
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# This is the core script used for all experiments in the Turn-taking prediction
# of the EgoCom paper. This script requires the preprocessed hd5 named as:
# 'egocom_feature_data_normalized_history_{}_future_{}_binary.hdf5'
# These are already pre-computed in the Dataset release, but can be recomputed
# via `nn_preprocess_data_for_training`
# Example calls of this script are provided in `turntaking_script_examples.bash`
# Imports
# Data processing
import pickle
import pandas as pd
import numpy as np
# Multiprocessing
import itertools
import torch
import torch.nn as nn
# Need these to use EgoCom validation dataset
from skorch.helper import predefined_split
from skorch.dataset import Dataset
from skorch.callbacks import Checkpoint
# Other imports
import datetime
import os
import argparse
# Used to parse command line arguments
parser = argparse.ArgumentParser(description='EgoCom Turn-Taking Prediction')
parser.add_argument('--param-idx', default=0, type=int,
help='Specifies which block of hyper parameters'
'you will train here, among the num-param-blocks.'
'The index is zero-based.'
'e.g. if num-param-blocks is 4, then this value can'
'be 0, 1, 2, or 3.')
parser.add_argument('--num-param-blocks', default=1, type=int,
help='If you want to break up hyper-param optimization'
'across multiple GPUs, specify the number of GPUs'
'here and be sure to also specify --param_idx'
'which specifies which block of the parameters'
'you will train here, among the num-param-blocks.')
parser.add_argument('--use-all-perspectives', default=False,
action='store_true',
help='Only applies for binary prediction tasks.'
'If True, combine all three speakers synchronized'
'perspectives, such that at each second of data,'
'all three speakers features are present. This'
'effectively widens the training data to three'
'times but reduces the number of data points'
'by a third')
parser.add_argument('--predict-only-host', default=False, action='store_true',
help='Only applies for binary prediction tasks.'
'If True, only predict the hosts labels using either'
'(1) only the host data or (2) the combined data'
'if use_all_perspectives == True.')
parser.add_argument('--include-prior', default=None, type=str,
help='By default (None) this will run train both a model'
'with a prior and a model without a prior.'
'Set to "true" to include the label of the current'
'speaker when predicting who will be speaking in the'
'the future. Set to "false" to not include prior label'
'information. You can think of this as a prior on'
'the person speaking, since the person who will be'
'speaking is highly related to the person who is'
'currently speaking.')
parser.add_argument('--prediction-task', default='binary', type=str,
help='Set to "multi" to predict the label of the person'
'who will be speaking the future, a multi-class task.'
'Set to "binary" to predict if a given speaker'
'will be speaking in the future.')
parser.add_argument('--epochs', default=20, type=int,
help='Number of epochs for training.')
parser.add_argument('--use-crossval', default=False, action='store_true',
help='Optimize hyper-parameters with cross-validation.'
'This script **no longer** supports cross-validation'
'because EgoCom has a predefined test set.'
'Never set this to True. Included for compatibility.')
parser.add_argument('--seed', default=0, type=int,
help='Seed for stochastic code for reproducibility.')
# Extract argument flags
args = parser.parse_args()
param_idx = args.param_idx
num_param_blocks = args.num_param_blocks
use_all_perspectives = args.use_all_perspectives
predict_only_host = args.predict_only_host
if args.include_prior is None:
include_prior_list = [True, False]
elif args.include_prior.lower() is 'true':
include_prior_list = [True]
elif args.include_prior.lower() is 'false':
include_prior_list = [False]
else:
raise ValueError('--include prior should be None, "true", or "false')
prediction_task = args.prediction_task
epochs = args.epochs
use_crossval = args.use_crossval
seed = args.seed
# Make sure flag values are valid.
assert (use_all_perspectives, predict_only_host) in [
(True, True), (False, True), (False, False)]
# PyTorch and Skorch imports needed based on prediction task.
if prediction_task == 'multi':
from skorch import NeuralNetClassifier
elif prediction_task == 'binary':
from skorch import NeuralNetBinaryClassifier
else:
assert args.prediction_task in ['binary', 'multi']
flag_settings = {
'param_idx': param_idx,
'num_param_blocks': num_param_blocks,
'use_all_perspectives': use_all_perspectives,
'predict_only_host': predict_only_host,
'include_prior': include_prior_list,
'prediction_task': prediction_task,
'epochs': epochs,
'use_crossval': use_crossval,
'seed': seed,
}
print('Running with settings:', flag_settings)
# Location where dataset and pre-processed data is stored.
egocom_loc = "/datasets/cgn/EGOCOM/egocom_features/no_audio/"
# Seed everything for reproducibility
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
|
np.random.seed(seed)
|
numpy.random.seed
|
import gtimer as gt
import numpy as np
import scipy as sp
import torch
import math
import copy
import logging
from robolearn.algorithms.rl_algos import RLAlgorithm
from robolearn.utils.logging import logger
import robolearn.torch.utils.pytorch_util as ptu
# from robolearn.utils.plots.core import subplots
from collections import OrderedDict
from robolearn.algorithms.rl_algos import ConstantPolicyPrior
from robolearn.algorithms.rl_algos import generate_noise
from robolearn.algorithms.rl_algos import IterationData
from robolearn.algorithms.rl_algos import TrajectoryInfo
from robolearn.algorithms.rl_algos import PolicyInfo
from robolearn.algorithms.rl_algos import DynamicsLRPrior
from robolearn.algorithms.rl_algos import DynamicsPriorGMM
from robolearn.algorithms.rl_algos import TrajOptLQR
class MDGPS(RLAlgorithm):
def __init__(self,
env,
local_policies,
global_policy,
cost_fcn,
eval_env=None,
train_cond_idxs=None,
test_cond_idxs=None,
num_samples=1,
test_samples=1,
noisy_samples=True,
noise_hyperparams=None,
seed=10,
base_kl_step=0.1,
global_opt_iters=5000,
global_opt_batch_size=64,
global_opt_lr=1e-5,
traj_opt_prev='nn_pol',
traj_opt_iters=1,
traj_opt_min_eta=1e-8,
traj_opt_max_eta=1e16,
**kwargs):
# TO DEFINE
self._fit_dynamics = True
self._initial_state_var = 1.0e-2
self._global_opt_batch_size = global_opt_batch_size
self._global_opt_iters = global_opt_iters
self._global_opt_ent_reg = 0.0 # For update pol variance
self._global_pol_sample_mode = 'add'
self._global_opt_lr = global_opt_lr
self._global_samples_counter = 0
self._first_global_eval = False
self.base_kl_step = base_kl_step
self._max_step_mult = 3.0
self._min_step_mult = 0.5
self._kl_step_rule = 'laplace'
self._traj_opt_iters = traj_opt_iters
self._max_ent_traj = 0.0
self._traj_opt_prev = traj_opt_prev
self.T = kwargs['max_path_length']
self._num_samples = num_samples
self._test_samples = test_samples
self._train_cond_idxs = train_cond_idxs
self._test_cond_idxs = test_cond_idxs
# Get dimensions from the environment
self.dU = env.action_dim
self.dX = env.obs_dim # TODO: DOING THIS TEMPORALLY
self.dO = env.obs_dim
# Number of initial conditions
self.M = len(local_policies)
exploration_policy = global_policy
RLAlgorithm.__init__(
self,
env=env,
exploration_policy=exploration_policy,
eval_env=eval_env,
eval_policy=global_policy,
eval_sampler=self.sample_global_pol,
**kwargs
)
# Rename for GPS
self.global_policy = self.eval_policy
self.local_policies = local_policies
# Noise to be used with trajectory distributions
self.noise_data = np.zeros((self.num_epochs, self.M,
self._num_samples,
self.T, self.dU))
self._noisy_samples = noisy_samples
if self._noisy_samples:
for ii in range(self.num_epochs):
for cond in range(self.M):
for n in range(self._num_samples):
self.noise_data[ii, cond, n, :, :] = \
generate_noise(self.T, self.dU, noise_hyperparams)
# IterationData objects for each condition.
self.cur = [IterationData() for _ in range(self.M)]
self.prev = [IterationData() for _ in range(self.M)]
# Trajectory Info
for m in range(self.M):
self.cur[m].traj_info = TrajectoryInfo()
if self._fit_dynamics:
sigma_regu = 1e-6
prior = DynamicsPriorGMM(
min_samples_per_cluster=40,
max_clusters=20,
max_samples=20,
strength=1.,
)
self.cur[m].traj_info.dynamics = \
DynamicsLRPrior(prior=prior, sigma_regu=sigma_regu)
self.cur[m].traj_distr = local_policies[m]
# Cost Fcn
self._cost_fcn = cost_fcn
# Global Policy Optimization
self.global_pol_optimizer = torch.optim.Adam(
self.global_policy.parameters(),
lr=self._global_opt_lr,
betas=(0.9, 0.999),
eps=1e-08, # Term added to the denominator for numerical stability
# weight_decay=0.005,
weight_decay=0.5,
amsgrad=True,
)
# Local Trajectory Information
self._local_pol_optimizer = TrajOptLQR(
cons_per_step=False,
use_prev_distr=False,
update_in_bwd_pass=True,
min_eta=traj_opt_min_eta,
max_eta=traj_opt_max_eta,
)
level = logging.INFO
self.logger = logging.getLogger(__name__)
self.logger.setLevel(level)
console = logging.StreamHandler()
self.logger.addHandler(console)
for handler in self.logger.handlers:
handler.setLevel(level)
self.eval_statistics = None
self._return_fig = None
self._return_axs = None
self._return_lines = [None for _ in range(self.n_test_conds)]
# MDGPS data #
# ---------- #
for m in range(self.M):
# Same policy prior type for all conditions
self.cur[m].pol_info = PolicyInfo(
T=self.T,
dU=self.dU,
dX=self.dX,
init_pol_wt=0.01,
)
self.cur[m].pol_info.policy_prior = ConstantPolicyPrior()
def train(self, start_epoch=0):
# Get snapshot of initial stuff
if start_epoch == 0:
self.training_mode(False)
params = self.get_epoch_snapshot(-1)
logger.save_itr_params(-1, params)
self._n_env_steps_total = start_epoch * self.num_train_steps_per_epoch
gt.reset()
gt.set_def_unique(False)
for epoch in gt.timed_for(
range(start_epoch, self.num_epochs),
save_itrs=True,
):
self._start_epoch(epoch)
# self._current_path_builder = PathBuilder()
# Sample from environment using current trajectory distributions
noise = self.noise_data[epoch]
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Sampling from local trajectories...'
% (type(self).__name__, epoch))
paths = self.sample_local_pol(noise=noise)
self._exploration_paths = paths
# self._handle_path(paths)
self._n_env_steps_total += int(self.n_train_conds*self._num_samples*self.T)
# Iterative learning step
gt.stamp('sample')
self._try_to_train()
gt.stamp('train')
# Evaluate if requirements are met
self._try_to_eval(epoch)
gt.stamp('eval')
self._end_epoch()
def _do_training(self):
epoch = self._n_epochs
# batch = self.get_batch()
paths = self.get_exploration_paths()
self.logger.info('')
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Creating Sample List...'
% (type(self).__name__, epoch))
for m, m_train in enumerate(self._train_cond_idxs):
self.cur[m_train].sample_list = SampleList(paths[m])
# Update dynamics model using all samples.
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Updating dynamics linearization...'
% (type(self).__name__, epoch))
self._update_dynamic_model()
# Evaluate sample costs
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Evaluating samples costs...'
% (type(self).__name__, epoch))
self._eval_iter_samples_costs()
# Update Networks
# On the first iteration, need to catch policy up to init_traj_distr.
if self._n_epochs == 1:
self.logger.info("\n"*2)
self.logger.info('%s: itr:%02d | '
'S-step for init_traj_distribution (iter=0)...'
% (type(self).__name__, epoch))
self.new_traj_distr = [self.cur[cond].traj_distr
for cond in range(self.M)]
self._update_global_policy()
# TODO:
self.sample_global_pol()
# Update global policy linearizations.
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Updating global policy linearization...'
% (type(self).__name__, epoch))
self._update_local_policies_fit()
# Update KL step
if self._n_epochs > 1:
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Updating KL step size with GLOBAL policy...'
% (type(self).__name__, epoch))
self._update_kl_step_size()
# C-step
self.logger.info('')
self.logger.info('%s: itr:%02d | '
'Updating trajectories...'
% (type(self).__name__, epoch))
for ii in range(self._traj_opt_iters):
self.logger.info('-%s: itr:%02d | Inner iteration %d/%d'
% (type(self).__name__, epoch, ii+1,
self._traj_opt_iters))
self._update_local_policies()
# S-step
self.logger.info('')
self.logger.info('%s:itr:%02d | ->| S-step |<-'
% (type(self).__name__, epoch))
self._update_global_policy()
# if self.eval_statistics is None:
# """
# Eval should set this to None.
# This way, these statistics are only computed for one batch.
# """
# self.eval_statistics = OrderedDict()
# # self.eval_statistics['Bellman Residual (QFcn)'] = \
# # np.mean(ptu.get_numpy(bellman_residual))
# self.eval_statistics['Surrogate Reward (Policy)'] = \
# np.mean(ptu.get_numpy(surrogate_cost))
def _can_evaluate(self):
return True
def evaluate(self, epoch):
statistics = OrderedDict()
self._update_logging_data()
statistics.update(self.eval_statistics)
self.eval_statistics = None
paths = self.sample_global_pol()
if paths is None:
print("NO LOGGING LAST SAMPLING")
return
cond_returns_mean = np.zeros(len(paths))
cond_returns_std = np.zeros(len(paths))
for cc, cond_path in enumerate(paths):
sample_list = SampleList(cond_path)
true_cost, cost_estimate, cost_compo = \
self._eval_sample_list_cost(sample_list, self._cost_fcn)
cond_returns_mean[cc] = np.mean(np.sum(true_cost, axis=-1))
cond_returns_std[cc] = np.std(np.sum(true_cost, axis=-1))
stat_txt = '[Cond-%02d] Global Mean Return' % cc
statistics[stat_txt] = cond_returns_mean[cc]
stat_txt = '[Cond-%02d] Global Std Return' % cc
statistics[stat_txt] = cond_returns_std[cc]
stat_txt = '[Cond-%02d] Eta' % cc
statistics[stat_txt] = self.cur[cc].eta
# stat_txt = 'Mean Return'
# statistics[stat_txt] = np.mean(cond_returns_mean)
# Record the data
for key, value in statistics.items():
logger.record_tabular(key, value)
self._update_plot(statistics)
def _update_plot(self, statistics):
# if self._return_fig is None:
# # self._return_fig, self._return_axs = subplots(1, self.n_test_conds+1)
# self._return_fig, self._return_axs = plt.subplots(1, self.n_test_conds+1)
# for aa, ax in enumerate(self._return_axs[:-1]):
# self._return_lines = \
# ax.plot(self._n_epochs,
# statistics['[Cond-%02d] Mean Return' % aa],
# color='b',
# marker='o',
# markersize=2
# )
# # plt.show(block=False)
# else:
# for aa, line in enumerate(self._return_lines[:-1]):
# line.set_xdata(
# np.append(line.get_xdata(),
# self._n_epochs)
# )
# line.set_ydata(
# np.append(line.get_ydata(),
# statistics['[Cond-%02d] Mean Return' % aa])
# )
# self._return_fig.canvas.draw()
# plt_pause(0.01)
# self._return_fig, self._return_axs = plt.subplots(1, self.n_test_conds+1)
# for aa, ax in enumerate(self._return_axs[:-1]):
# self._return_lines = \
# ax.plot(self._n_epochs,
# statistics['[Cond-%02d] Mean Return' % aa],
# color='b',
# marker='o',
# markersize=2
# )
# self._return_fig.savefig('tempo/fig%02d.png' % self._n_epochs)
#
# del self._return_fig
# del self._return_axs
# del self._return_lines
pass
def _update_logging_data(self):
if self.eval_statistics is None:
"""
Eval should set this to None.
This way, these statistics are only computed for one batch.
"""
self.eval_statistics = OrderedDict()
def _end_epoch(self):
# TODO: change IterationData to reflect new stuff better
del self.prev
self.prev = copy.deepcopy(self.cur)
for m in range(self.M):
self.prev[m].new_traj_distr = self.new_traj_distr[m]
# NEW IterationData object, and remove new_traj_distr
self.cur = [IterationData() for _ in range(self.M)]
for m in range(self.M):
self.cur[m].traj_info = TrajectoryInfo()
self.cur[m].traj_info.dynamics = \
copy.deepcopy(self.prev[m].traj_info.dynamics)
self.cur[m].step_mult = self.prev[m].step_mult
self.cur[m].eta = self.prev[m].eta
self.cur[m].traj_distr = self.new_traj_distr[m]
self.cur[m].traj_info.last_kl_step = \
self.prev[m].traj_info.last_kl_step
# MDGPS
self.cur[m].pol_info = copy.deepcopy(self.prev[m].pol_info)
self.new_traj_distr = None
RLAlgorithm._end_epoch(self)
def _update_dynamic_model(self):
"""
Instantiate dynamics objects and update prior.
Fit dynamics to current samples.
"""
for m in range(self.M):
cur_data = self.cur[m].sample_list
X = cur_data['observations']
U = cur_data['actions']
# Update prior and fit dynamics.
self.cur[m].traj_info.dynamics.update_prior(X, U)
self.cur[m].traj_info.dynamics.fit(X, U)
# Fm = self.cur[m].traj_info.dynamics.Fm
# fv = self.cur[m].traj_info.dynamics.fv
# T = -2
# N = 0
# oo = X[N, T, :]
# uu = U[N, T, :]
# oo_uu = np.concatenate((oo, uu), axis=0)
# oop1 = Fm[T].dot(oo_uu) + fv[T]
# print('real', X[N, T+1, :])
# print('pred', oop1)
# input('fds')
# Fit x0mu/x0sigma.
x0 = X[:, 0, :]
x0mu = np.mean(x0, axis=0)
self.cur[m].traj_info.x0mu = x0mu
self.cur[m].traj_info.x0sigma = \
np.diag(np.maximum(np.var(x0, axis=0),
self._initial_state_var))
prior = self.cur[m].traj_info.dynamics.get_prior()
if prior:
mu0, Phi, priorm, n0 = prior.initial_state()
N = len(cur_data)
self.cur[m].traj_info.x0sigma += \
Phi + (N*priorm) / (N+priorm) * \
np.outer(x0mu-mu0, x0mu-mu0) / (N+n0)
def _eval_iter_samples_costs(self):
for cond in range(self.M):
sample_list = self.cur[cond].sample_list
true_cost, cost_estimate, cost_compo = \
self._eval_sample_list_cost(sample_list, self._cost_fcn)
# Cost sample
self.cur[cond].cs = true_cost # True value of cost.
# Cost composition
self.cur[cond].cost_compo = cost_compo # Cost 'composition'.
# Cost estimate.
self.cur[cond].traj_info.Cm = cost_estimate[0] # Quadratic term (matrix).
self.cur[cond].traj_info.cv = cost_estimate[1] # Linear term (vector).
self.cur[cond].traj_info.cc = cost_estimate[2] # Constant term (scalar).
def _eval_sample_list_cost(self, sample_list, cost_fcn):
"""
Evaluate costs for a sample_list using a specific cost function.
Args:
cost: self.cost_function[cond]
cond: Condition to evaluate cost on.
"""
# Constants.
T, dX, dU = self.T, self.dX, self.dU
N = len(sample_list)
# Compute cost.
cs = np.zeros((N, T))
cc = np.zeros((N, T))
cv =
|
np.zeros((N, T, dX+dU))
|
numpy.zeros
|
import numpy as np
import scipy.linalg,scipy.misc
import os,time
import tensorflow as tf
import matplotlib.pyplot as plt
import csv
import warp
# load GTSRB data
def loadGTSRB(opt,fname):
if not os.path.exists(fname):
# download and preprocess GTSRB dataset
os.makedirs(os.path.dirname(fname))
os.system("wget -O data/GTSRB_Final_Training_Images.zip http://benchmark.ini.rub.de/Dataset/GTSRB_Final_Training_Images.zip")
os.system("wget -O data/GTSRB_Final_Test_Images.zip http://benchmark.ini.rub.de/Dataset/GTSRB_Final_Test_Images.zip")
os.system("wget -O data/GTSRB_Final_Test_GT.zip http://benchmark.ini.rub.de/Dataset/GTSRB_Final_Test_GT.zip")
os.system("cd data && unzip GTSRB_Final_Training_Images.zip")
os.system("cd data && unzip GTSRB_Final_Test_Images.zip")
os.system("cd data && unzip GTSRB_Final_Test_GT.zip")
# training data
print("preparing training data...")
images,bboxes,labels = [],[],[]
for c in range(43):
prefix = "data/GTSRB/Final_Training/Images/{0:05d}".format(c)
with open("{0}/GT-{1:05d}.csv".format(prefix,c)) as file:
reader = csv.reader(file,delimiter=";")
next(reader)
for line in reader:
img = plt.imread(prefix+"/"+line[0])
rawH,rawW = img.shape[0],img.shape[1]
scaleH,scaleW = float(opt.fullH)/rawH,float(opt.fullW)/rawW
imgResize = scipy.misc.imresize(img,(opt.fullH,opt.fullW,3))
images.append(imgResize)
bboxes.append([float(line[3])*scaleW,float(line[4])*scaleH,
float(line[5])*scaleW,float(line[6])*scaleH])
labels.append(int(line[7]))
trainData = {
"image": np.array(images),
"bbox":
|
np.array(bboxes)
|
numpy.array
|
import numpy as np
#Here we define some extra utilities needed for spin-summed, unrestricted CCSD
def solveccd(F_a,F_b,G,T,nocca,noccb,nvirta,nvirtb,x=4.0):
#Solve for opposite-spin amplitudes
Tnew = np.zeros(np.shape(T))
for i in range(nocca):
for j in range(noccb):
for a in range(nvirta):
aa = a + nocca
for b in range(nvirtb):
bb = b + noccb
d = (F_a[i,i] + F_b[j,j] - F_a[aa,aa] - F_b[bb,bb])
Tnew[i,j,a,b] = G[i,j,a,b]/d
#Damp amplitudes to improve convergence
return(Tnew/x + T*(x-1.0)/x)
def Ecorr(F_a,F_b,Eri_aa,Eri_ab,Eri_bb,T2_aa,T2_ab,T2_bb,T1_a,T1_b,nocca,noccb):
#unrestricted, spin-summation of the CCSD correlation energy
#CCD piece
Eaa = 0.25e0*np.einsum('ijab,abij',T2_aa,Eri_aa[nocca:,nocca:,:nocca,:nocca])
Ebb = 0.25e0*np.einsum('ijab,abij',T2_bb,Eri_bb[noccb:,noccb:,:noccb,:noccb])
Eab = np.einsum('ijab,abij',T2_ab,Eri_ab[nocca:,noccb:,:nocca,:noccb])
# print("E2aa =", Eaa)
# print("E2ab =", Eab)
# print("E2bb =", Ebb)
E2 = Eaa + Ebb + Eab
#linear in singles
E1 = np.einsum('ia,ai',T1_a,F_a[nocca:,:nocca])
E1 += np.einsum('ia,ai',T1_b,F_b[noccb:,:noccb])
#quadratic in singles
E1 += 0.5e0*np.einsum('ia,jb,abij',T1_a,T1_a,Eri_aa[nocca:,nocca:,:nocca,:nocca])
E1 += 0.5e0*np.einsum('ia,jb,abij',T1_a,T1_b,Eri_ab[nocca:,noccb:,:nocca,:noccb])
E1 += 0.5e0*np.einsum('ia,jb,baji',T1_b,T1_a,Eri_ab[nocca:,noccb:,:nocca,:noccb])
E1 += 0.5e0*np.einsum('ia,jb,abij',T1_b,T1_b,Eri_bb[noccb:,noccb:,:noccb,:noccb])
# print("E1 =", E1)
return E1+E2
def diis_setup(diis_start,diis_dim,nocca,noccb,nvirta,nvirtb):
#use direct inversion of the iterative subspace (Pulay Chem Phys Lett 73(390), 1980) to extrapolate CC amplitudes.
#This function sets up the various arrays we need for the extrapolation.
Errors = np.zeros([diis_dim,nocca,noccb,nvirta,nvirtb])
Ts = np.zeros([diis_dim,nocca,noccb,nvirta,nvirtb])
Err_vec = np.zeros([nocca,noccb,nvirta,nvirtb])
return Errors, Ts, Err_vec
def get_Err(F_a,F_b,G,T,nocca,noccb,noccl,noccu,variant):
#Calculate the residual for the CC equations at a given value of T2 amplitudes. Just use full contractions, in case we're
# doing non canonical CC
if (variant == 'roccsd0'):
F_c = 0.5e0*(F_a+F_b)
soa = noccu-nocca
sob = noccu-noccb
Ax = -np.einsum('ca,ijcb->ijab',F_a[nocca:,nocca:],T)
Ax -= np.einsum('cb,ijac->ijab', F_b[noccb:,noccb:],T)
Ax += np.einsum('ki,kjab->ijab', F_a[:nocca,:nocca],T)
Ax += np.einsum('kj,ikab->ijab', F_b[:noccb,:noccb],T)
#Symmetrize if doing singlet-paired CC
if (variant == 'roccsd0'):
Ax[:noccl,:noccl,soa:,sob:] = -
|
np.einsum('ca,ijcb->ijab', F_c[noccu:,noccu:],T[:noccl,:noccl,soa:,sob:])
|
numpy.einsum
|
# Copyright 2017 University of Maryland.
#
# This file is part of Sesame. It is subject to the license terms in the file
# LICENSE.rst found in the top-level directory of this distribution.
import numpy as np
from .observables import *
from .defects import defectsF
def getF(sys, v, efn, efp, veq):
###########################################################################
# organization of the right hand side vector #
###########################################################################
# A site with coordinates (i,j,k) corresponds to a site number s as follows:
# k = s//(Nx*Ny)
# j = s - s//Nx
# i = s - j*Nx - k*Nx*Ny
#
# Rows for (efn_s, efp_s, v_s)
# ----------------------------
# fn_row = 3*s
# fp_row = 3*s+1
# fv_row = 3*s+2
Nx, Ny, Nz = sys.xpts.shape[0], sys.ypts.shape[0], sys.zpts.shape[0]
# right hand side vector
global vec
vec = np.zeros((3*Nx*Ny*Nz,))
def update(fn, fp, fv, sites):
global vec
vec[3*sites] = fn
vec[3*sites+1] = fp
vec[3*sites+2] = fv
###########################################################################
# For all sites in the system #
###########################################################################
# carrier densities
n = sys.Nc * np.exp(+sys.bl + efn + v)
p = sys.Nv * np.exp(-sys.Eg - sys.bl - efp - v)
# equilibrium carrier densities
n_eq = sys.Nc * np.exp(+sys.bl + veq)
p_eq = sys.Nv * np.exp(-sys.Eg - sys.bl - veq)
# bulk charges
rho = sys.rho - n + p
# recombination rates
r = get_bulk_rr(sys, n, p)
# charge defects
if len(sys.defects_list) != 0:
defectsF(sys, sys.defects_list, n, p, rho, r)
# charge devided by epsilon
rho = rho / sys.epsilon
# reshape the array as array[y-indices, x-indices]
_sites = np.arange(Nx*Ny*Nz, dtype=int).reshape(Nz, Ny, Nx)
def currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites):
jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN = 0, 0, 0, 0, 0, 0
jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = 0, 0, 0, 0, 0, 0
if dx.all() != 0:
jnx_s = get_jn(sys, efn, v, sites, sites + 1, dx)
jpx_s = get_jp(sys, efp, v, sites, sites + 1, dx)
if dxm1.all() != 0:
jnx_sm1 = get_jn(sys, efn, v, sites - 1, sites, dxm1)
jpx_sm1 = get_jp(sys, efp, v, sites - 1, sites, dxm1)
if dy.all() != 0:
jny_s = get_jn(sys, efn, v, sites, sites + Nx, dy)
jpy_s = get_jp(sys, efp, v, sites, sites + Nx, dy)
if dym1.all() != 0:
jny_smN = get_jn(sys, efn, v, sites - Nx, sites, dym1)
jpy_smN = get_jp(sys, efp, v, sites - Nx, sites, dym1)
if dz.all() != 0:
jnz_s = get_jn(sys, efn, v, sites, sites + Nx*Ny, dz)
jpz_s = get_jp(sys, efp, v, sites, sites + Nx*Ny, dz)
if dzm1.all() != 0:
jnz_smNN = get_jn(sys, efn, v, sites - Nx*Ny, sites, dzm1)
jpz_smNN = get_jp(sys, efp, v, sites - Nx*Ny, sites, dzm1)
return jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\
jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN
def ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites):
# Drift diffusion Poisson equations that determine fn, fp, fv
# lattice distances
dxbar = (dx + dxm1) / 2.
dybar = (dy + dym1) / 2.
dzbar = (dz + dzm1) / 2.
# compute currents
jnx_s, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\
jpx_s, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = \
currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
# drift diffusion
u = sys.g[sites] - r[sites]
fn = (jnx_s - jnx_sm1) / dxbar + (jny_s - jny_smN) / dybar \
+ (jnz_s - jnz_smNN) / dzbar + u
fp = (jpx_s - jpx_sm1) / dxbar + (jpy_s - jpy_smN) / dybar \
+ (jpz_s - jpz_smNN) / dzbar - u
# Poisson
dv_sm1, dv_sp1, dv_smN, dv_spN, dv_smNN, dv_spNN = 0, 0, 0, 0, 0, 0
v_s = v[sites]
if dx.all() != 0:
dv_sp1 = (v[sites+1] - v_s) / dx
if dxm1.all() != 0:
dv_sm1 = (v_s - v[sites-1]) / dxm1
if dy.all() != 0:
dv_spN = (v[sites+Nx] - v_s) / dy
if dym1.all() != 0:
dv_smN = (v_s - v[sites-Nx]) / dym1
if dz.all() != 0:
dv_spNN = (v[sites+Nx*Ny] - v_s) / dz
if dzm1.all() != 0:
dv_smNN = (v_s - v[sites-Nx*Ny]) / dzm1
fv = (dv_sm1 - dv_sp1) / dxbar + (dv_smN - dv_spN) / dybar\
+ (dv_smNN - dv_spNN) / dzbar - rho[sites]
# update vector
update(fn, fp, fv, sites)
def right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites):
# Boundary conditions on the right contact
# lattice distances and sites
dx = np.array([0])
dxm1 = sys.dx[-1]
dxbar = (dx + dxm1) / 2.
dybar = (dy + dym1) / 2.
dzbar = (dz + dzm1) / 2.
# compute currents
_, jnx_sm1, jny_s, jny_smN, jnz_s, jnz_smNN,\
_, jpx_sm1, jpy_s, jpy_smN, jpz_s, jpz_smNN = \
currents(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
# compute jx_s with continuity equation
jnx_s = jnx_sm1 + dxbar * (r[sites] - sys.g[sites] - (jny_s - jny_smN)/dybar\
- (jnz_s - jnz_smNN)/dzbar)
jpx_s = jpx_sm1 + dxbar * (sys.g[sites] - r[sites] - (jpy_s - jpy_smN)/dybar\
- (jpz_s - jpz_smNN)/dzbar)
# b_n, b_p and b_v values
bn = jnx_s + sys.Scn[1] * (n[sites] - n_eq[sites])
bp = jpx_s - sys.Scp[1] * (p[sites] - p_eq[sites])
bv = 0 # Dirichlet BC
# update right hand side vector
update(bn, bp, bv, sites)
###########################################################################
# inside the system: 0 < i < Nx-1, 0 < j < Ny-1, 0 < k < Nz-1 #
###########################################################################
# We compute fn, fp, fv on the inner part of the system.
# list of the sites inside the system
sites = _sites[1:Nz-1, 1:Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], (Ny-2)*(Nz-2))
dy = np.repeat(sys.dy[1:], (Nx-2)*(Nz-2))
dz = np.repeat(sys.dz[1:], (Nx-2)*(Ny-2))
dxm1 = np.tile(sys.dx[:-1], (Ny-2)*(Nz-2))
dym1 = np.repeat(sys.dy[:-1], (Nx-2)*(Nz-2))
dzm1 = np.repeat(sys.dz[:-1], (Nx-2)*(Ny-2))
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# left boundary: i = 0, 0 <= j <= Ny-1, 0 <= k <= Nz-1 #
###########################################################################
# list of the sites on the left side
sites = _sites[:, :, 0].flatten()
# compute the currents
jnx = get_jn(sys, efn, v, sites, sites + 1, sys.dx[0])
jpx = get_jp(sys, efp, v, sites, sites + 1, sys.dx[0])
# compute an, ap, av
an = jnx - sys.Scn[0] * (n[sites] - n_eq[sites])
ap = jpx + sys.Scp[0] * (p[sites] - p_eq[sites])
av = 0 # to ensure Dirichlet BCs
update(an, ap, av, sites)
###########################################################################
# right boundaries #
###########################################################################
###########################################################################
# right boundary: i = Nx-1, 0 < j < Ny-1, 0 < k < Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[1:Nz-1, 1:Ny-1, Nx-1].flatten()
# lattice distances
dy = np.repeat(sys.dy[1:], Nz-2)
dym1 = np.repeat(sys.dy[:-1], Nz-2)
dz = np.repeat(sys.dz[1:], Ny-2)
dzm1 = np.repeat(sys.dz[:-1], Ny-2)
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = Ny-1, 0 < k < Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[1:Nz-1, Ny-1, Nx-1].flatten()
# lattice distances
dy = np.array([0])
dym1 = np.repeat(sys.dy[-1], Nz-2)
dz = sys.dz[1:]
dzm1 = sys.dz[:-1]
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = 0, 0 < k < Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[1:Nz-1, 0, Nx-1].flatten()
# lattice distances
dy = np.repeat(sys.dy[-1], Nz-2)
dym1 = np.array([0])
dz = sys.dz[1:]
dzm1 = sys.dz[:-1]
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, 0 < j < Ny-1, k = Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[Nz-1, 1:Ny-1, Nx-1].flatten()
# lattice distances
dy = sys.dy[1:]
dym1 = sys.dy[:-1]
dz = np.array([0])
dzm1 = np.repeat(sys.dz[-1], Ny-2)
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, 0 < j < Ny-1, k = 0 #
###########################################################################
# list of the sites on the right side
sites = _sites[0, 1:Ny-1, Nx-1].flatten()
# lattice distances
dy = sys.dy[1:]
dym1 = sys.dy[:-1]
dz = np.repeat(sys.dz[0], Ny-2)
dzm1 = np.array([0])
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = Ny-1, k = 0 #
###########################################################################
# list of the sites on the right side
sites = _sites[0, Ny-1, Nx-1].flatten()
# lattice distances
dy = np.array([0])
dym1 = sys.dy[-1]
dz = sys.dz[0]
dzm1 = np.array([0])
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = Ny-1, k = Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[Nz-1, Ny-1, Nx-1].flatten()
# lattice distances
dy = np.array([0])
dym1 = sys.dy[-1]
dz = np.array([0])
dzm1 = sys.dz[-1]
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = 0, k = Nz-1 #
###########################################################################
# list of the sites on the right side
sites = _sites[Nz-1, 0, Nx-1].flatten()
# lattice distances
dy = sys.dy[0]
dym1 = np.array([0])
dz = np.array([0])
dzm1 = sys.dz[-1]
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# right boundary: i = Nx-1, j = 0, k = 0 #
###########################################################################
# list of the sites on the right side
sites = _sites[0, 0, Nx-1].flatten()
# lattice distances
dy = sys.dy[0]
dym1 = np.array([0])
dz = sys.dz[0]
dzm1 = np.array([0])
# compute the BC and update the right hand side vector
right_bc(sys, efn, efp, v, dy, dym1, dz, dzm1, sites)
###########################################################################
# faces between contacts: 0 < i < Nx-1, j or k fixed #
###########################################################################
# Here we focus on the faces between the contacts.
###########################################################################
# z-face top: 0 < i < Nx-1, 0 < j < Ny-1, k = Nz-1 #
###########################################################################
# list of the sites
sites = _sites[Nz-1, 1:Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], Ny-2)
dy = np.repeat(sys.dy[1:], Nx-2)
dz = np.array([0])
dxm1 = np.tile(sys.dx[:-1], Ny-2)
dym1 = np.repeat(sys.dy[:-1], Nx-2)
dzm1 = np.repeat(sys.dz[-1], (Nx-2)*(Ny-2))
# compute fn, fp, fv and update vector
ddp(sys, efn, efp, v, dx, dxm1, dy, dym1, dz, dzm1, sites)
###########################################################################
# z- face bottom: 0 < i < Nx-1, 0 < j < Ny-1, k = 0 #
###########################################################################
# list of the sites
sites = _sites[0, 1:Ny-1, 1:Nx-1].flatten()
# lattice distances
dx = np.tile(sys.dx[1:], Ny-2)
dy = np.repeat(sys.dy[1:], Nx-2)
dz =
|
np.repeat(sys.dz[0], (Nx-2)*(Ny-2))
|
numpy.repeat
|
# -*- coding: utf-8 -*-
"""
@Author: <NAME>
@Site: https://github.com/liushaoweihua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
class LoggingMetricsHook(tf.train.SessionRunHook):
def __init__(self, metric_ops, label_map_reverse, save_steps, output_dir):
self.metric_ops = metric_ops
self.metrics_num = len({item: metric_ops[item]
for item in metric_ops
if not item.endswith('update')})
self.label_map_reverse = label_map_reverse
self.save_steps = save_steps
self.output_dir = output_dir
self.metric_save_path = os.path.join(self.output_dir, 'train_metrics.txt')
self.figure_save_path = os.path.join(self.output_dir, 'train_metrics.png')
if not tf.gfile.Exists(self.output_dir):
tf.gfile.MakeDirs(self.output_dir)
def begin(self):
self.step = -1
self.metric_results = {metric_name: [] for metric_name in self.metric_ops}
self.metric_writer = tf.gfile.GFile(self.metric_save_path, 'w')
def before_run(self, run_context):
self.step += 1
return tf.train.SessionRunArgs(self.metric_ops)
def after_run(self, run_context, run_values):
if self.step % self.save_steps == 0:
results = run_values.results
for metric_name in results:
self.metric_results[metric_name].append(results[metric_name])
results = {item: results[item] for item in results if not item.endswith('update')}
results_list = {item: results[item] for item in results if isinstance(results[item], list)}
results_scalar = {item: results[item] for item in results if not isinstance(results[item], list)}
logging_info = 'step = %d, %s, %s' \
% (self.step,
', '.join(['%s = %.6f' % item for item in results_scalar.items()]),
', '.join(['%s = %s' % (item[0],
[float(i) for i in list(map(lambda x: '%.6f' % x, item[1]))])
for item in results_list.items()]))
print(logging_info)
self.metric_writer.write(logging_info + '\n')
def end(self, session):
self.metric_writer.close()
self.metric_results = {item: self.metric_results[item]
for item in self.metric_results
if not item.endswith('update')}
fig, axs = plt.subplots(self.metrics_num, 1, sharex=True)
fig.set_size_inches(16, 4.5 * self.metrics_num)
for i, metric_name in enumerate(self.metric_results):
metric_result = self.metric_results[metric_name]
steps = np.arange(0, self.save_steps * len(metric_result), self.save_steps)
p = axs[i].plot(steps, self.metric_results[metric_name])
axs[i].set_ylabel(metric_name)
axs[i].grid(True)
if
|
np.array(self.metric_results[metric_name])
|
numpy.array
|
from warpkern import Anim
import numpy as np
import math
import colorsys
class TestAnim1(Anim):
def __init__(self, ringcount: int, ledcount: int):
self.ringcount = ringcount
self.ledcount = ledcount
self.totalLeds = ringcount * ledcount
self.counter = 0
def tick(self, data: np.array, time: float, dt: float):
self.data = data
self.counter += 1
if self.counter > self.totalLeds:
self.counter = 0
self.data = np.transpose(self.data)
for r in range(self.ringcount):
#ring = np.arange(self.ledcount)
indxs = self.ledcount * r
indxe = self.ledcount * (r + 1)
self.data[1][indxs:indxe] = np.zeros(self.ledcount)
self.data[2][indxs:indxe] =
|
np.zeros(self.ledcount)
|
numpy.zeros
|
import os
import sys
import numpy as np
from pickle import dump, load
import matplotlib.pyplot as plt
import pandas as pd
from information_metrics import *
from scipy.stats import spearmanr, pearsonr
from matplotlib.lines import Line2D
sites_params = pd.read_csv( '../../DATA/EEGS/sel2_sites_params.csv')
sites = sites_params['site_id'].values
sel = 'revision_0'
res_dir = '../../DATA/EEGS/fitted_models_%s' % sel
fig_dir = '../../PROJECTS/Stomatal_conductance_eval/NP_figures'
timescale = 'd'
mod_list = ['WUE_%s' % timescale, 'CM_%s4' % timescale, 'SOX_%s3' % timescale]
mod_list_labels = [r'WUE', r'CM', r'SOX']
mod_list_nparams= [2, 4, 3]
model_colors = ['#04ABC2', '#B7CE63', '#FDE10D']
markers = [ 'o', 's', '^']
target_m = 'LE'
target_o = 'LE_F_MDS'
source_1 = 'S'
source_2 = 'VPD_a'
sensitivity_var = source_1
n_bins = 15
whis = [5, 95]
def get_result_dict():
result = {}
for l in ['site', 'general']:
result[l] = {}
for k in [ 'Wet', 'Dry', 'Mesic', 'Full']:
result[l][k] = {}
for a in ['aic', 'a_p', 'a_ft', 'a_fu1', 'a_fu2', 'a_fs', 'a_fr', 'a_fp']:
result[l][k][a] = []
for site_i in sites:
data_file = [os.path.join(res_dir, fi) for fi in os.listdir(res_dir)
if fi.endswith('gen.csv') and (site_i in fi)][0]
data_0 = pd.read_csv(data_file, header = 0, index_col = 0, parse_dates = True,
infer_datetime_format = True,)
vv = [source_1, source_2, target_o, ]
for mi in mod_list:
vv.append('%s_%s' % (target_m, mi))
vv.append('%s_%s_gen' % (target_m, mi))
data_0 = data_0[vv].dropna()
p25 = np.percentile(data_0[sensitivity_var].values, 25)
p75 = np.percentile(data_0[sensitivity_var].values, 75)
data_dry = data_0[data_0[sensitivity_var] <= p25]
data_med = data_0[(data_0[sensitivity_var] < p75) & (data_0[sensitivity_var] > p25)]
data_wet = data_0[data_0[sensitivity_var] >= p75]
for tag, dd in zip(['Mesic', 'Wet', 'Dry', 'Full'], [data_med, data_wet, data_dry, data_0]):
site_crit = []
for mi, nparams in zip(mod_list, mod_list_nparams):
target_g = '%s_%s_gen' % (target_m, mi)
aic = aikaike_criterion_rss(dd[target_o].values, dd[target_g].values, nparams)
site_crit.append(aic)
x_s = list(np.sort(site_crit))
result['general'][tag]['aic'].append([x_s.index(xi) for xi in site_crit])
for tag, dd in zip(['Mesic', 'Wet', 'Dry', 'Full'], [data_med, data_wet, data_dry, data_0]):
site_crit = []
for mi, nparams in zip(mod_list, mod_list_nparams):
target = '%s_%s' % (target_m, mi)
aic = aikaike_criterion_rss(dd[target_o].values, dd[target].values, nparams)
site_crit.append(aic)
x_s = list(np.sort(site_crit))
result['site'][tag]['aic'].append([x_s.index(xi) for xi in site_crit])
for tag, dd in zip(['Mesic', 'Wet', 'Dry', 'Full'], [data_med, data_wet, data_dry, data_0]):
site_crit = []
for mi in mod_list:
target_g = '%s_%s_gen' % (target_m, mi)
a = cal_it_performance(dd, target_g, target_o, source_1, source_2, nbins=n_bins, norm=1)
site_crit.append(a)
site_crit = list(zip(*site_crit))
for ai, k in zip(site_crit, ['a_p', 'a_fu1', 'a_fu2', 'a_fs', 'a_fr', 'a_ft', 'a_fp']):
result['general'][tag][k].append(ai)
for tag, dd in zip(['Mesic', 'Wet', 'Dry' ,'Full'], [data_med, data_wet, data_dry, data_0]):
site_crit = []
for mi in mod_list:
target = '%s_%s' % (target_m, mi)
a = cal_it_performance(dd, target, target_o, source_1, source_2, nbins=n_bins, norm=1)
site_crit.append(a)
site_crit = list(zip(*site_crit))
for ai, k in zip(site_crit, ['a_p', 'a_fu1', 'a_fu2', 'a_fs', 'a_fr', 'a_ft', 'a_fp']):
result['site'][tag][k].append(ai)
for l in ['site', 'general']:
for k in ['Wet', 'Dry', 'Mesic', 'Full']:
for a in ['aic', 'a_p', 'a_ft', 'a_fu1', 'a_fu2', 'a_fs', 'a_fr', 'a_fp']:
result[l][k][a] = list(zip(*result[l][k][a]))
return result
def fig_prediction(fig_name):
fig_name = os.path.join(fig_dir, fig_name)
fig = plt.figure(figsize=(5.5, 5.5))
result = get_result_dict()
positions=range(1, len(mod_list_labels) + 1)
ax = fig.add_subplot(3, 3, 7)
ax.axhline(0, linestyle=':', color='k')
ax = fig.add_subplot(3, 3, 8)
ax.axhline(0, linestyle=':', color='k')
ax = fig.add_subplot(3, 3, 9)
ax.axhline(0, linestyle=':', color='k')
for spi, (a_i, a_i_name, ymin, ymax) in enumerate([['aic', '(a) AIC rank', 0.5, len(mod_list_labels) + 0.5],
['a_p', r'(b) $A_{p}$', 0.4, 0.85],
['d_a_p', r'(c) $\Delta$ $A_{p}$', -6, 6],
]):
for cii, condition in enumerate(['Dry', 'Mesic', 'Wet']):
ax = fig.add_subplot(3, 3, 1 + cii + 3 * spi)
if a_i == 'aic':
ax.title.set_text(condition)
for i, c in enumerate(model_colors):
if a_i == 'd_a_p':
x = (np.array(result['site'][condition]['a_p'][i]) - np.array(result['general'][condition]['a_p'][i])) * 100
else:
x = result['general'][condition][a_i][i]
if a_i == 'aic':
x = np.array(x) + 1
ax.boxplot([x, ], positions=[positions[i], ], showfliers=False,
whis=whis, widths=0.5, patch_artist=True,
boxprops=dict(facecolor=c, color=c),
medianprops=dict(color='w', lw=0), flierprops=dict(markeredgecolor=c),
capprops=dict(color=c, lw=2), whiskerprops=dict(color=c, lw=2))
else:
ax.boxplot([x, ], positions=[positions[i], ], showfliers=False,
whis=whis, widths=0.5, patch_artist=True,
boxprops=dict(facecolor=c, color=c),
medianprops=dict(color='w', lw=2), flierprops=dict(markeredgecolor=c),
capprops=dict(color=c, lw=2), whiskerprops=dict(color=c, lw=2))
ax.set_xlim([0.5, len(mod_list_labels) + 0.5])
if a_i == 'aic':
ax.set_xticks(positions)
ax.set_yticks(range(1, len(mod_list_labels) + 1))
ax.set_xticklabels([])
elif a_i == 'a_p':
ax.set_xticks(positions)
ax.set_xticklabels([])
ax.set_yticks([0.4, 0.6, 0.8])
else:
ax.set_xticks(positions)
ax.set_yticks([-5, 0, 5])
ax.set_xticklabels(mod_list_labels, fontsize=10)
if condition != 'Dry':
ax.set_yticklabels([])
else:
ax.set_ylabel(a_i_name, fontsize=12)
ax.get_yaxis().set_label_coords(-0.25,0.5)
ax.tick_params(direction='inout')
ax.tick_params(direction='inout')
ax.set_ylim([ymin, ymax])
plt.tight_layout()
plt.savefig('%s.png' % fig_name, dpi=600)
plt.savefig('%s.eps' % fig_name)
def fig_function(fig_name):
fig_name = os.path.join(fig_dir, fig_name)
fig = plt.figure(figsize=(5.5, 8))
result = get_result_dict()
positions=range(1, len(mod_list_labels) + 1)
iter_criteria = [['a_ft', r'(a) $A_{f, T}$', -0.1, 0.85],
['a_fu1', r'(b) $A_{f, \theta}$', -0.125, 0.125],
['a_fu2', r'(c) $A_{f, D}$', -0.05, 0.35],
['a_fs', r'(d) $A_{f, S}$', -0.35, 0.05],
['a_fr', r'(e) $A_{f, R}$', -0.007, 0.007],
]
for spi, (a_i, a_i_name, ymin, ymax) in enumerate(iter_criteria):
for cii, condition in enumerate(['Dry', 'Mesic', 'Wet']):
ax = fig.add_subplot(5, 3, 1 + cii + 3 * spi)
if a_i == 'a_ft':
ax.title.set_text(condition)
ax.axhline(0, linestyle=':', color='k')
for i, c in enumerate(model_colors):
x = result['general'][condition][a_i][i]
ax.boxplot([x, ], positions=[positions[i], ], showfliers=False,
whis=whis, widths=0.5, patch_artist=True,
boxprops=dict(facecolor=c, color=c),
medianprops=dict(color='w', lw=2), flierprops=dict(markeredgecolor=c),
capprops=dict(color=c, lw=2), whiskerprops=dict(color=c, lw=2))
ax.tick_params(direction='inout')
ax.set_ylim([ymin, ymax])
ax.set_xlim([0.5, len(mod_list_labels) + 0.5])
if a_i == 'a_fr':
ax.set_xticks(positions)
ax.set_xticklabels(mod_list_labels, fontsize=10)
else:
ax.set_xticks(positions)
ax.set_xticklabels([])
if condition != 'Dry':
ax.set_yticklabels([])
else:
ax.set_ylabel(a_i_name, fontsize=12)
ax.get_yaxis().set_label_coords(-0.35,0.5)
if (a_i == 'a_fr') and (condition == 'Dry'):
ax.set_yticks([-0.005, 0, 0.005])
ax.set_yticklabels([-0.005, '0', 0.005])
plt.tight_layout()
plt.savefig('%s.png' % fig_name, dpi=600)
plt.savefig('%s.eps' % fig_name)
def fig_scatter_A(fig_name):
fig_name = os.path.join(fig_dir, fig_name)
fig = plt.figure(figsize=(7.5, 7.5))
pft_list = ['NF', 'BF', 'G-C3', 'C-C3']
pft_subix = [3, 4, 7, 8]
mod_list_ = ['SOX_d3', 'CM_d4', 'WUE_d']
colors_ = [model_colors[2], model_colors[1], model_colors[0]]
markers_ = [markers[2], markers[1], markers[0]]
legend_elements = [Line2D([0], [0], marker= markers[0], linestyle='', color=model_colors[0], label=r'$WUE$'),
Line2D([0], [0], marker=markers[1], linestyle='', color=model_colors[1], label=r'$CM$'),
Line2D([0], [0], marker=markers[2], linestyle='', color=model_colors[2], label=r'$SOX$')]
for mod, color, marker in zip(mod_list_, colors_, markers_):
results = []
for site_i in sites:
pft = sites_params[(sites_params['site_id'] == site_i)]['pft'].values[0]
subix = pft_subix[pft_list.index(pft)]
data_file = [os.path.join(res_dir, fi) for fi in os.listdir(res_dir)
if fi.endswith('gen.csv') and (site_i in fi)][0]
data_0 = pd.read_csv(data_file, header = 0, index_col = 0, parse_dates = True,
infer_datetime_format = True,)
target_g = 'LE_%s_gen' % mod
vv = ['LE_F_MDS', 'S', 'VPD_a', target_g ]
data_0 = data_0[vv].dropna()
p25 = np.percentile(data_0['S'].values, 25)
p75 = np.percentile(data_0['S'].values, 75)
data_dry = data_0[data_0['S'] <= p25]
data_med = data_0[(data_0['S'] < p75) & (data_0['S'] > p25)]
data_wet = data_0[data_0['S'] >= p75]
x_fp = []
x_ft = []
x_p = []
for dd in [data_wet, data_dry, data_med]:
a_p, a_fu1, a_fu2, a_fs, a_fr, a_ft, a_f = cal_it_performance(dd, target_g, 'LE_F_MDS', 'S', 'VPD_a', nbins=15, norm=1)
x_fp.append(np.abs(a_fu1) + np.abs(a_fu2) + np.abs(a_fs) + np.abs(a_fr))
x_ft.append(np.abs(a_ft))
x_p.append(np.abs(a_p))
x_fp = np.mean(x_fp)
x_ft = np.mean(x_ft)
x_p = np.mean(x_p)
results.append([x_fp, x_ft, x_p])
ax = fig.add_subplot(2, 2, 1)
ax.scatter(x_fp, x_p, marker=marker, color=color, s=30)
ax = fig.add_subplot(2, 2, 3)
ax.scatter(x_ft, x_p, marker=marker, color=color, s=30)
ax = fig.add_subplot(2, 2, 4)
ax.scatter(x_ft, x_fp, marker=marker, color=color, s=30)
ax = fig.add_subplot(4, 4, subix)
ax.scatter(x_fp, x_p, marker=marker, color=color, s=20)
x_fp, x_ft, x_p = list(zip(*results))
#print (mod[:3], np.round(spearmanr(x_fp, x_p)[0], 2), np.round(spearmanr(x_ft, x_p)[0], 2), np.round(spearmanr(x_fp, x_ft)[0], 2))
ax = fig.add_subplot(2, 2, 1)
ax.set_xlim([0, 0.8])
ax.set_ylim([0.35, 0.85])
ax.set_xticks([0, 0.3, 0.6])
ax.set_yticks([0.4, 0.6, 0.8])
ax.tick_params(direction='inout')
ax.set_ylabel('$A_{p}$', fontsize=14)
ax.set_xlabel(r'$A_{f,P}$', fontsize=14)
plt.legend(handles=legend_elements, frameon=False, ncol=1, fontsize=10, loc='lower right')
ax.text(0.03, 0.80, '(a)', fontsize=14)
ax = fig.add_subplot(2, 2, 3)
ax.set_xlim([0, 0.8])
ax.set_ylim([0.35, 0.85])
ax.set_xticks([0, 0.3, 0.6])
ax.set_yticks([0.4, 0.6, 0.8])
ax.tick_params(direction='inout')
ax.set_ylabel('$A_{p}$', fontsize=14)
ax.set_xlabel('$|A_{f,T}|$', fontsize=14)
ax.text(0.03, 0.80, '(b)', fontsize=14)
ax = fig.add_subplot(2, 2, 4)
ax.set_xlim([0, 0.8])
ax.set_ylim([0, 0.8])
ax.set_xticks([0, 0.3, 0.6])
ax.set_yticks([0, 0.3, 0.6])
ax.tick_params(direction='inout')
ax.set_xlabel('$|A_{f,T}|$', fontsize=14)
ax.set_ylabel(r'$A_{f,P}$', fontsize=14)
ax.text(0.03, 0.73, '(c)', fontsize=14)
for pft, xi in zip(['Needleleaf', 'Broadleaf', 'Grass', 'Crop'], pft_subix):
ax = fig.add_subplot(4, 4, xi)
ax.set_xlim([0, 0.8])
ax.set_ylim([0.35, 0.85])
ax.set_xticks([0, 0.3, 0.6])
ax.set_yticks([0.4, 0.6, 0.8])
ax.tick_params(direction='inout')
if (xi == 3) or (xi == 4):
ax.set_xticklabels([])
if (xi == 8) or (xi == 4):
ax.set_yticklabels([])
ax.plot(0,0, marker='', lw=0, label=pft)
ax.legend(loc='lower right', fontsize=10, frameon=False)
plt.savefig('%s.png' % fig_name, dpi=600)
plt.savefig('%s.eps' % fig_name)
def delta_cond(fig_name=None):
result = get_result_dict()
fig = plt.figure(figsize=(5.5, 10.5))
positions=range(1, len(mod_list_labels) + 1)
for spi, (a_i, a_i_name, ymin, ymax) in enumerate([['a_p', r'(a) $\Delta A_{p}$', None, None],
['a_ft', r'(b) $\Delta A_{f, T}$', None, None],
['a_fp', r'(c) $\Delta A_{f}$', None, None],
['a_fu1', r'(d) $\Delta A_{f, \theta}$', None, None],
['a_fu2', r'(e) $\Delta A_{f, D}$', None, None],
['a_fs', r'(f) $\Delta A_{f, S}$', None, None],
['a_fr', r'(g) $\Delta A_{f, R}$', None, None],
]):
for cii, condition in enumerate(['Wet - Dry', 'Mesic - Dry', 'Mesic - Wet']):
ax = fig.add_subplot(7, 3, 1 + cii + 3 * spi)
ax.axhline(0, linestyle=':', color='k')
if a_i == 'a_p':
ax.title.set_text(condition)
for i, c in enumerate(model_colors):
if condition == 'Wet - Dry':
x = (np.array(result['general']['Wet'][a_i][i]) - np.array(result['general']['Dry'][a_i][i]))
elif condition == 'Mesic - Dry':
x = (np.array(result['general']['Mesic'][a_i][i]) - np.array(result['general']['Dry'][a_i][i]))
elif condition == 'Mesic - Wet':
x = (np.array(result['general']['Mesic'][a_i][i]) - np.array(result['general']['Wet'][a_i][i]))
ax.boxplot([x, ], positions=[positions[i], ], showfliers=False,
whis=whis, widths=0.5, patch_artist=True,
boxprops=dict(facecolor=c, color=c),
medianprops=dict(color='w', lw=2), flierprops=dict(markeredgecolor=c),
capprops=dict(color=c, lw=2), whiskerprops=dict(color=c, lw=2))
print('%s\t%s\t%s\t%-5.1f\t%-5.1f\t%-5.1f\t%-5.1f'%(a_i, mod_list[i], condition, np.mean(x)*100,
|
np.median(x)
|
numpy.median
|
__author__ = 'sibirrer'
import numpy as np
class Catalogue(object):
"""
class which analyses data and fits a psf
"""
def get_source_cat(self, HDUFile):
"""
:param HDUFile:
:return: catalogue
"""
return HDUFile[2]
def get_background(self, HDUFile):
"""
filters the mean and rms value of the background computed by sextractor
:param cat:
:return: mean, rms
"""
mean, rms = False, False
list = HDUFile[1].data[0][0]
for line in list:
line = line.strip()
line = line.split()
if len(line) > 0:
if line[0] == 'SEXBKGND' or line[0] == 'SEXBKGND=':
mean = float(line[1])
if line[0] == 'SEXBKDEV' or line[0] == 'SEXBKDEV=':
rms = float(line[1])
if mean is False or rms is False:
raise ValueError('no mean and rms value found in list.')
return mean, rms
def estimate_star_thresholds(self, cat):
"""
estimates the cuts in the different sextractor quantities
:param cat:
:return:
"""
mag = np.array(cat.data['MAG_BEST'],dtype=float)
size = np.array(cat.data['FLUX_RADIUS'],dtype=float)
#ellipticity = cat.data['ELLIPTICITY']
kwargs_cuts = {}
mag_max = min(np.max(mag), 34)
mag_min = np.min(mag)
delta_mag = mag_max - mag_min
kwargs_cuts['MagMaxThresh'] = mag_max - 0.7*delta_mag
kwargs_cuts['MagMinThresh'] = mag_min #+ 0.01*delta_mag
mask = (mag<mag_max-0.5*delta_mag)
kwargs_cuts['SizeMinThresh'] = max(0, np.min(size[mask]))
kwargs_cuts['SizeMaxThresh'] = max(0, np.min(size[mask])+4)
kwargs_cuts['EllipticityThresh'] = 0.1
kwargs_cuts['ClassStarMax'] = 1.
kwargs_cuts['ClassStarMin'] = 0.5
return kwargs_cuts
def estimate_galaxy_thresholds(self, cat):
"""
estimates the cuts for selecting some galaxies
:param selfself:
:return:
"""
mag =
|
np.array(cat.data['MAG_BEST'],dtype=float)
|
numpy.array
|
#%% [markdown]
# Functions which need passing to CHL
import glob
from logger import log_init, log, log_dict
import os
os.environ['NUMBA_DISABLE_JIT'] = "0"
import pickle
import platform
from shutil import rmtree
import time
import matplotlib
import numpy as np
from colorama import Fore, Style, init
from numba import jit, njit
from config import Config
from network2 import Network
from methods import mean_squared_error
from printing import (Lexicon, generate_rpm_2_by_2_matrix,
generate_rpm_2_by_3_matrix, generate_rpm_3_by_3_matrix,
is_running_from_ipython, is_paperspace, target, test_matrix)
if not is_running_from_ipython():
if "Darwin" not in platform.platform():
# Must be run before importing matplotlib.pyplot
matplotlib.use('agg')
import seaborn as sns
sns.set(font_scale=0.8)
sns.set_style("whitegrid")
import matplotlib.pyplot as plt
if is_paperspace():
print('{"chart": "Total Loss", "axis": "Epoch"}')
print('{"chart": "Pattern accuracy", "axis": "Epoch"}')
print('{"chart": "Transformation accuracy", "axis": "Epoch"}')
# Colorama init() fixes Windows console, but prevents colours in IPython
#init()
class Plots:
fig1: plt.Figure
ax1: plt.Axes
ax2: plt.Axes
ax3: plt.Axes
@njit
def calculate_error(p1, p2):
"""Loss function loss(target, prediction)"""
return mean_squared_error(p1, p2[:len(p1)])
# features_error = mean_squared_error(p1[6:11], p2[6:11])
# shape_error = cross_entropy(p1[0:6], p2[0:6])
# #loss = 2 * features_error + 0.5 * shape_error
# loss = features_error + shape_error
# return loss
@njit
def calculate_transformation_error(t1, t2):
"""Loss function loss(target, prediction)"""
return mean_squared_error(t1, t2)
@njit
def closest_node_index(node: np.array, nodes: np.ndarray) -> int:
deltas = np.subtract(nodes, node)
distance = np.sum(deltas ** 2, axis=1)
return np.argmin(distance)
@njit
def closest_node(node: np.array, nodes: np.ndarray) -> np.array:
index: int = closest_node_index(node, nodes)
return nodes[index]
def color_on(color: str, condition: bool) -> str:
if condition:
return color
else:
return ''
def color_off() -> str:
return Fore.RESET
def calculate_is_correct(p1, p2, targets):
closest = closest_node(p1, targets)
return np.allclose(closest, p2[:len(p1)])
def collect_statistics(network: Network, E: np.ndarray, P: np.ndarray, A: np.ndarray, epoch: int, data: dict):
"""Reporting function collect_statistics(
E = loss by epoch,
P = num training patterns correct
A = num test patterns [analogies] correct)"""
if epoch == 0:
log(f'Experiment: {network.config.experiment_name}')
log(f'Description: {network.config.experiment_description}')
log()
log('Configuration:')
log_dict(vars(network.config))
checkpoint_frequency = 500
plot_frequency = 500
statistics_frequency = 50 # report every n epochs
if epoch % checkpoint_frequency == 0:
checkpoint = {
'epoch' : epoch,
'network' : network,
'E' : E,
'P' : P,
'A' : A,
'data' : data
}
with open(f'{get_checkpoints_folder(network.config)}/checkpoint.{epoch:05}.pickle', 'wb') as f:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(checkpoint, f, pickle.HIGHEST_PROTOCOL)
f.close()
# prevent memory leak
del checkpoint
if epoch % statistics_frequency == 0:
if not 'a' in data:
data['a'] = []
if not 'by0' in data:
data['by0'] = []
if not 'by1' in data:
data['by1'] = []
if not 'by2' in data:
data['by2'] = []
if not 'by3' in data:
data['by3'] = []
if not 'aby0' in data:
data['aby0'] = []
if not 'aby1' in data:
data['aby1'] = []
if not 'aby2' in data:
data['aby2'] = []
if not 'aby3' in data:
data['aby3'] = []
if not 'eby0' in data:
data['eby0'] = []
if not 'eby1' in data:
data['eby1'] = []
if not 'eby2' in data:
data['eby2'] = []
if not 'eby3' in data:
data['eby3'] = []
if not 'eaby0' in data:
data['eaby0'] = []
if not 'eaby1' in data:
data['eaby1'] = []
if not 'eaby2' in data:
data['eaby2'] = []
if not 'eaby3' in data:
data['eaby3'] = []
if not '2by2' in data:
data['2by2'] = []
if not '2by2_loss' in data:
data['2by2_loss'] = []
if not '2by2s' in data:
data['2by2s'] = []
if not '2by2s_loss' in data:
data['2by2s_loss'] = []
if not '2by2v' in data:
data['2by2v'] = []
if not '2by2v_loss' in data:
data['2by2v_loss'] = []
if not '2by3' in data:
data['2by3'] = []
if not '2by3_loss' in data:
data['2by3_loss'] = []
if not '3by3' in data:
data['3by3'] = []
if not '3by3_loss' in data:
data['3by3_loss'] = []
if not 't_error' in data:
data['t_error'] = []
if not 'tf' in data:
data['tf'] = []
if not 'tby0' in data:
data['tby0'] = []
if not 'tby1' in data:
data['tby1'] = []
if not 'tby2' in data:
data['tby2'] = []
if not 'tby3' in data:
data['tby3'] = []
if not '22by0' in data:
data['22by0'] = []
if not '22by1' in data:
data['22by1'] = []
if not '22by2' in data:
data['22by2'] = []
if not '22by3' in data:
data['22by3'] = []
if not '22by0s' in data:
data['22by0s'] = []
if not '22by1s' in data:
data['22by1s'] = []
if not 'o_error' in data:
data['o_error'] = []
if not 'a_error' in data:
data['a_error'] = []
e = 0. # total loss for this epoch
sum_t_error = 0. # loss for transformation
sum_o_error = 0. # loss for output
sum_a_error = 0. # loss for analogies
min_error = network.config.min_error
max_epochs = network.config.max_epochs
num_correct = 0
num_correct_by_num_modifications = [0, 0, 0, 0]
num_correct_22_by_num_modifications = [0, 0, 0, 0]
num_correct_22s_by_size = [0, 0]
is_max_num_correct_by_num_modifications = [False, False, False, False]
is_max_num_correct_22_by_num_modifications = [False, False, False, False]
is_max_num_analogies_correct_by_num_modifications = [False, False, False, False]
is_min_e_by_num_modifications = [False, False, False, False]
is_min_e_analogies_by_num_modifications = [False, False, False, False]
is_max_num_correct_22s_by_size = [False, False]
e_by_num_modifications = [0., 0., 0., 0.]
num_analogies_correct = 0
num_analogies_correct_by_num_modifications = [0, 0, 0, 0]
e_analogies_by_num_modifications = [0., 0., 0., 0.]
num_total_patterns_by_num_modifications = [0, 0, 0, 0]
num_total_22_by_num_modifications = [0, 0, 0, 0]
num_total_22s_by_size = [0, 0]
num_transformations_correct = 0
num_total_transformations_by_type = [0, 0, 0, 0]
num_correct_by_transformation = [0, 0, 0, 0]
is_max_num_correct_by_transformation = [False, False, False, False]
targets = np.asarray([target(p)[:network.n_inputs] for p in network.patterns])
analogy_targets = np.asarray([target(a)[:network.n_inputs] for a in network.analogies])
#a_targets = np.asarray([target(a) for a in network.analogies])
for p, a, c in zip(network.patterns, network.analogies, np.asarray(network.candidates)):
t = target(p)
t_error = 0 # the amount of error for the current transformation
o_error = 0 # the amount of error for the current output object
process_transformation_error = True
process_analogy_error = True
process_2_by_2 = True
process_2_by_2_bysize = True and hasattr(network, 'tuples_22s')
process_2_by_3 = True
process_3_by_3 = True
process_2_by_2_vertical = False
# Calculate loss on the training data.
# Present the network with input and transformation.
# Clamp input and transformation.
# Let the network settle.
# Calculate loss (i.e., distance of prediction from target)
# r = network.calculate_transformation(p, t)
# o_error = calculate_transformation_error(p[-network.n_transformation:], network.t[0])
# is_correct = False
r = network.calculate_response(p)
o_error = calculate_error(r, t)
is_correct = calculate_is_correct(r, t, targets)
sum_o_error += o_error
num_modifications = (p[-4:] != 0.5).sum()
num_total_patterns_by_num_modifications[num_modifications] += 1
if is_correct:
num_correct += 1
num_correct_by_num_modifications[num_modifications] += 1
e_by_num_modifications[num_modifications] += o_error + t_error
if process_transformation_error:
# Prime the network, that is, present object p and output target(p).
# Do not present any transformation. Set the transformation to rest.
# Clamp input and output. Do not clamp transformation.
# Let the network settle.
target_tf = p[-network.n_transformation:]
num_total_transformations_by_type = np.add(num_total_transformations_by_type, [x != 0.5 for x in target_tf])
tf = network.calculate_transformation(p, t)
is_correct_tf = calculate_is_correct(tf, target_tf, network.transformations)
if is_correct_tf:
num_transformations_correct += 1
num_correct_by_transformation = np.add(num_correct_by_transformation, [x != 0.5 for x in target_tf])
t_error = calculate_transformation_error(tf, target_tf)
sum_t_error += t_error
# total error for object + transformation
e += o_error + t_error
if process_analogy_error:
# Now calculate the response of the primed network for new input a.
# Clamp input only. Set output to rest.
# (Leech paper says to set transformation to rest too.)
# Let the network settle.
r, t = complete_analogy_22(network, p, a)
a_error = calculate_error(r, t)
at_error = calculate_transformation_error(a[-network.n_transformation:], network.t[0])
is_correct = calculate_is_correct(r, t, analogy_targets)
num_modifications = (p[-4:] != 0.5).sum()
if is_correct:
num_analogies_correct += 1
num_analogies_correct_by_num_modifications[num_modifications] += 1
e_analogies_by_num_modifications[num_modifications] += a_error + at_error
sum_a_error += a_error
E.append(e)
P.append(num_correct)
A.append(num_analogies_correct)
data['a'].append(100 * num_analogies_correct / len(network.analogies))
data['tf'].append(num_transformations_correct)
data['t_error'].append(sum_t_error)
data['o_error'].append(sum_o_error)
data['a_error'].append(sum_a_error)
percentage_breakdown = [100*x[0]/x[1] if x[1] > 0 else 0 for x in zip(num_correct_by_num_modifications, num_total_patterns_by_num_modifications)]
for i, x in enumerate(percentage_breakdown):
label = f'by{i}'
data[label].append(percentage_breakdown[i])
is_max_num_correct_by_num_modifications[i] = percentage_breakdown[i] > 0.0 and percentage_breakdown[i] == max(data[label])
percentage_breakdown = [100*x[0]/x[1] if x[1] > 0 else 0 for x in zip(num_analogies_correct_by_num_modifications, num_total_patterns_by_num_modifications)]
for i, x in enumerate(percentage_breakdown):
label = f'aby{i}'
data[label].append(percentage_breakdown[i])
is_max_num_analogies_correct_by_num_modifications[i] = percentage_breakdown[i] > 0.0 and percentage_breakdown[i] == max(data[label])
for i, x in enumerate(e_by_num_modifications):
label = f'eby{i}'
data[label].append(e_by_num_modifications[i])
is_min_e_by_num_modifications[i] = any(data[label]) and e_by_num_modifications[i] == min(data[label])
for i, x in enumerate(e_analogies_by_num_modifications):
label = f'eaby{i}'
data[label].append(e_analogies_by_num_modifications[i])
is_min_e_analogies_by_num_modifications[i] = any(data[label]) and e_analogies_by_num_modifications[i] == min(data[label])
for i, x in enumerate(num_correct_by_transformation):
label = f'tby{i}'
data[label].append(num_correct_by_transformation[i])
is_max_num_correct_by_transformation[i] = num_correct_by_transformation[i] > 0 and num_correct_by_transformation[i] == max(data[label])
correct_by_num_modifications = [f'{color_on(Fore.GREEN, x[2])}{x[0]}{color_off()}/{x[1]} {color_on(Fore.GREEN, x[2])}{100*x[0]/x[1] if x[1] > 0 else 0:.1f}%{color_off()}' for x in zip(num_correct_by_num_modifications, num_total_patterns_by_num_modifications, is_max_num_correct_by_num_modifications)]
analogies_by_num_modifications = [f'{color_on(Fore.GREEN, x[2])}{x[0]}{color_off()}/{x[1]} {color_on(Fore.GREEN, x[2])}{100*x[0]/x[1] if x[1] > 0 else 0:.1f}%{color_off()}' for x in zip(num_analogies_correct_by_num_modifications, num_total_patterns_by_num_modifications, is_max_num_analogies_correct_by_num_modifications)]
loss_by_num_modifications = [f'{color_on(Fore.RED, x[1])}{x[0]:.3f}{color_off()}' for x in zip(e_by_num_modifications, is_min_e_by_num_modifications)]
loss_analogies_by_num_modifications = [f'{color_on(Fore.RED, x[1])}{x[0]:.3f}{color_off()}' for x in zip(e_analogies_by_num_modifications, is_min_e_analogies_by_num_modifications)]
correct_transformations_by_type = [f'{color_on(Fore.GREEN, x[2])}{x[0]}{color_off()}/{x[1]} {color_on(Fore.GREEN, x[2])}{100*x[0]/x[1] if x[1] > 0 else 0:.1f}%{color_off()}' for x in zip(num_correct_by_transformation, num_total_transformations_by_type, is_max_num_correct_by_transformation)]
tuples_22 = network.tuples_22
if hasattr(network, 'tuples_22s'):
tuples_22s = network.tuples_22s
tuples_23 = network.tuples_23
tuples_33 = network.tuples_33
log()
log(f'Epoch = {epoch} of {max_epochs}, Loss = {color_on(Fore.RED, e == min(E[1:]))}{e:.3f}{color_off()}, O/T = {color_on(Fore.RED, sum_o_error == min(data["o_error"]))}{sum_o_error:.3f}{color_off()}/{color_on(Fore.RED, sum_t_error == min(data["t_error"]))}{sum_t_error:.3f}{color_off()}, Terminating when < {min_error * len(network.patterns):.3f}')
log(f'Patterns = {color_on(Fore.GREEN, num_correct == max(P))}{num_correct:>5}{color_off()}/{len(network.patterns):>5}, breakdown = {" ".join(correct_by_num_modifications)}')
log(f' Loss = {color_on(Fore.RED, any(data["o_error"]) and sum_o_error == min(data["o_error"]))}{sum_o_error:>11.3f}{color_off()}, breakdown = {" ".join(loss_by_num_modifications)}')
log(f'Transforms = {color_on(Fore.GREEN, num_transformations_correct == max(data["tf"]))}{num_transformations_correct:>5}{color_off()}/{len(network.patterns):>5}, breakdown = {" ".join(correct_transformations_by_type)} (sz, rt, sh, no)')
log(f' Loss = {color_on(Fore.RED, any(data["t_error"]) and sum_t_error == min(data["t_error"]))}{sum_t_error:>11.3f}{color_off()}')
log(f'Analogies = {color_on(Fore.GREEN, num_analogies_correct == max(A))}{num_analogies_correct:>5}{color_off()}/{len(network.analogies):>5}, breakdown = {" ".join(analogies_by_num_modifications)}')
log(f' Loss = {color_on(Fore.RED, any(data["a_error"]) and sum_a_error == min(data["a_error"]))}{np.sum(e_analogies_by_num_modifications):>11.3f}{color_off()}, breakdown = {" ".join(loss_analogies_by_num_modifications)}')
if process_2_by_2:
#matrix, test, transformation1, transformation2, analogy
num_correct_22 = 0
loss_22 = 0
patterns_22, analogies_22, candidates_22 = [np.concatenate((item[2], item[3])) for item in network.tuples_22], [np.concatenate((item[4], item[3])) for item in tuples_22], np.asarray([item[1] for item in tuples_22])
#targets_2_by_3 = np.asarray([target(np.concatenate([target(a), t2])) for a, t2 in zip(analogies_23, transformations2)])
for p, a, candidates_for_pattern in zip(patterns_22, analogies_22, candidates_22):
prediction, actual = complete_analogy_22(network, p, a)
loss_22 += calculate_error(prediction, actual)
is_correct_22 = calculate_is_correct(prediction, actual, candidates_for_pattern)
num_modifications = (p[-4:] != 0.5).sum()
num_total_22_by_num_modifications[num_modifications] += 1
if is_correct_22:
num_correct_22 += 1
num_correct_22_by_num_modifications[num_modifications] += 1
percentage_breakdown = [100*x[0]/x[1] if x[1] > 0 else 0 for x in zip(num_correct_22_by_num_modifications, num_total_22_by_num_modifications)]
for i, x in enumerate(percentage_breakdown):
label = f'22by{i}'
data[label].append(percentage_breakdown[i])
is_max_num_correct_22_by_num_modifications[i] = percentage_breakdown[i] > 0.0 and percentage_breakdown[i] == max(data[label])
correct_22_by_num_modifications = [f'{color_on(Fore.GREEN, x[2])}{x[0]}{color_off()}/{x[1]} {color_on(Fore.GREEN, x[2])}{100*x[0]/x[1] if x[1] > 0 else 0:.1f}%{color_off()}' for x in zip(num_correct_22_by_num_modifications, num_total_22_by_num_modifications, is_max_num_correct_22_by_num_modifications)]
data['2by2'].append(num_correct_22)
data['2by2_loss'].append(loss_22)
log(f'2x2 = {color_on(Fore.GREEN, num_correct_22 == max(data["2by2"]))}{num_correct_22:>5}{color_off()}/{100:>5}, breakdown = {" ".join(correct_22_by_num_modifications)}')
log(f' Loss = {color_on(Fore.RED, loss_22 == min(data["2by2_loss"]))}{loss_22:>11.3f}{color_off()}')
if process_2_by_2_bysize:
#matrix, test, transformation1, transformation2, analogy
num_correct_22s = 0
loss_22s = 0
patterns_22s, analogies_22s, candidates_22s = [np.concatenate((item[2], item[3])) for item in network.tuples_22s], [np.concatenate((item[4], item[3])) for item in tuples_22s],
|
np.asarray([item[1] for item in tuples_22s])
|
numpy.asarray
|
# -*- coding: utf-8 -*-
'''МОДУЛЬ РАСЧЕТА ТОКОВ КОРОТКОГО ЗАМЫКАНИЯ (М Р Т К З)
Версия 3.15
г.Саратов 27.01.2021
История изменений
27.01.2021
- Рефакторинг кода, исключено применение промежуточного массива для суммирования
B/2 подключенных к узлу ветвей и собственной Y узла;
- Реализован метод позволяющий выявить висящие, не связанные с землей узлы, ветви,
взаимоиндукции и несимметрии что в приводит к вырожденности (сингулярности)
формируемой СЛАУ, т.е. к невозможности решения СЛАУ.
Параллельные ветви с нулевым сопротивлением (ШСВ и СВ), также приводят к
вырожденности, но данный метод не позволяет их выявить!,
для предотвращения вырожденности можно предложить использовать некоторое
сопротивление на СВ и ШСВ вместо 0, например 0.001 Ом.
mdl.Test4Singularity();
- Выявлена и устранена логическая ошибка в условии вывода результатов токов
в проводимости узла.
22.01.2021
- С учетом реализации возможности ввода проводимости в узле скорректирован вывод
результатов расчетов расчета по узлу
- В таблицах вывода результатов заменен знак < на знак ∠
18.01.2021
- Реализована поддержка проводимостей Y (См) подключенных к узлу, например компенсирующих
реакторов (ранее, да и сейчас их можно также представить ветвями на землю);
- Реализована поддержка источников тока J(А), что необходимо для учета в схеме
замещения сети ветровых и солнечных электростанция и других ЭС со звеном
постоянного тока с последующим инвертированием к промышленной частоте 50 Гц.
Q(model,name,Y=(Y1,Y2,Y0))
Q(model,name,J=(J1,J2,J0))
Q(model,name,Y=(Y1,Y2,Y0),J=(J1,J2,J0))
03.12.2020
- Оптимизирован метод mdl.Calc, в части понижения потребляемой памяти и повышения
быстродействия, за счет отказа от использования списков python для формирования
координатной разреженной матрицы, вместо этого используются вектора numpy;
- Произведен переход на расчет фазных, междуфазных (линейных) и прочих величин
с помощью матричного умножения результатов в симметричных составляющих на
преобразующие матрицы (Ms2f - для вычисления фазных величин, Ms2ff - для
вычисления междуфазных (линейных) величин);
- При формировании разреженной матрицы в ходе перебора типов несимметрий для
повышения быстродействия на первое место поставлены КЗ 'N0' и обрыв 'N0'
как наиболее часто встречающиеся в модели;
- Оптимизирован код, по выводу результатов расчетов, изменен порядок вывода
результата, сначала идут фазные значения, потом симметричные составляющие
и наконец междуфазные (линейные) величины. Уменьшено дублирование кода.
28.11.2020
1. В класс Model добавлен метод ImportFromPVL(PVL_Sech), предназначенный для
импорта результатов расчетов параметров схемы замещения ВЛ с помощью модуля PVL
mdl.ImportFromPVL(PVL_Sech)
где PVL_Sech - ссылка на сечение (объект класса sech модуля PVL)
18.11.2020
1. Добавлены описания к функциям, классам и методам МРТКЗ,
в том числе дано подробное описание алгоритма метода mdl.Calc()
2. Добавлены специальные нeсимметрии вида:
- КЗ по нулевой последовательности - 'N0' для моделирования заземления нейтрали за
трансформатором Yg/D или за соответствующей парой обмоток тр-ра
- Обрыв по нулевой последовательности - 'N0' для моделирования сети с изолированной
нейтралью, устанавливается на ветви разделяющей сеть с глухо или эффективно
заземленной нейтралью и сетью с изолированной нейтралью
3. Выверено моделирование ветвей с поперечной емкостной проводимостью B,
для этого если pl62w+ или аналогичное ПО выдает (Например)
B1 = В2 = 90 мкСм (1/Ом*10^-6), B0 = 60 мкСм (1/Ом*10^-6)
то при создании ветви надо заполнять параметры емкостной проводимости
B=(90e-6j,90e-6j,60e-6j)
4. Выверено моделирование трансформаторных ветвей
T=(Ktrans,GrT) - безразмерные параметры трансформаторной ветви:
Ktrans - коэффициент трансформации силового трансформатора, например 115/11
GrT - группа обмоток обмотки подключенной к узлу 2 (от 0 до 11)
Так например для трансформатора с номинальными напряжениями обмоток 115 и 10,5 кВ
и схемой соединения обмоток Y/D-11 надо заполнять параметры трансформаторной ветви
T=(115/10.5,11)
5. Добавлены новые методы класса Model
- AddNQ для группового добавления узлов (сечения узлов)
mdl.AddNQ(NQ,Nname)
где NQ - количество создаваемых узлов
Nname - общее имя узлов, к уникальному имени узлу будет добавляться
номер из сечения от 1 до NQ
- AddNP для группового добавления ветвей (сечения ветвей)
в том числе с поперечной емкостной проводимостью B и
взаимоиндукцией нулевой последовательности
mdl.AddNP(self,Nname,listq1,listq2,Z12,Z0) - без учета емкостной проводимости
mdl.AddNP(self,Nname,listq1,listq2,Z12,Z0,B12,B0) - с учетом емкостной проводимости
где listq1 и listq2 - сечения (списки) узлов
Nname - общее имя сечения ветвей, к уникальному имени ветви будет добавляться
номер из сечения от 1 до N, где N - количество создаваемых ветвей
Z12 - вектор numpy.ndarray значений сопротивлений ветвей прямой/обратной последовательности
Z0 - квадратная матрица numpy.ndarray значений сопротивлений ветвей и
взаимоиндукций нулевой последовательности
B12 - вектор numpy.ndarray значений поперечной емкостной проводимости
прямой/обратной последовательности
B0 - квадратная матрица numpy.ndarray значений поперечной емкостной
проводимости нулевой последовательности
'''
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import spsolve
Kf = -1j*np.pi/6
r2d = 180/np.pi
a = -0.5 + 0.5j*np.sqrt(3)
a2 = -0.5 - 0.5j*np.sqrt(3)
a0 = 1.0+0.0j
vA = np.array([a0,a0,a0])
vB = np.array([a2,a ,a0])
vC = np.array([a, a2,a0])
vAB = vA - vB
vBC = vB - vC
vCA = vC - vA
vABC = np.concatenate((vA,vB,vC))
vBCA = np.concatenate((vB,vC,vA))
vCAB = np.concatenate((vC,vA,vB))
Ms2f = np.array([vA,vB,vC])
Ms2ff = np.array([vAB,vBC,vCA])
arr012 = np.array([0,1,2])
arr000 = np.array([0,0,0])
arr111 = arr000+1
arr222 = arr000+2
arr_111 = -arr111
class Q:
'''Класс трехфазного электрического узла, необходим для формирования расчетной
модели и получения результатов расчета
Создание узла с помощью конструктора
Q(model,name,desc='')
Q(model,name)
где:
model - объект расчетной модели в которой создается узел
name - краткое название узла, обращение к узлу по его имени не предусмотрено
desc - Примечание или любая другая текстовая информация, можно не задавать.
Результатом конструктора узла является объект узла, который используется для
формирования расчетной модели и вывода результатов расчетов
Пользовательские функции для объекта узла q
Вывод на экран параметров узла - его номера и названия
q.par()
Вывод сводной таблицы результатов расчетов для узла q
q.res()
Вывод конкретного параметра ParName в виде компексного числа
для последующего использования в расчетах
q.res(ParName)
где ParName может принимать значения:
'U1','U2','U0','3U0','UA','UB','UC','UABC','UAB','UBC','UCA','UAB_BC_CA'
Вывод конкретного параметра ParName в заданной форме Form:
q.res(ParName,Form)
где Form может принимать значения
'R' - Активная составляющая
'X' - Реактивная составляющая
'M' - Модуль комплексного числа
'<f' - Фаза вектора в градусах
'R+jX' - Текстовый вид комплексного числа
'M<f' - Текстовый вид комплексного числа
Еще один способ получения конкректного параметра результата в виде
компексного числа для его последующего использования в расчетах
q.ParName
где ParName может принимать значения:
U1,U2,U0,UA,UB,UC,UABC,UAB,UBC,UCA,UAB_BC_CA'''
def __init__(self,model,name,Y=(0,0,0),J=(0,0,0),desc=''):
''' Конструктор объекта узла
Q(model,name,desc='')
Q(model,name)
Q(model,name,Y=(Y1,Y2,Y0))
Q(model,name,J=(J1,J2,J0))
Q(model,name,Y=(Y1,Y2,Y0),J=(J1,J2,J0))
где:
model - объект расчетной модели в которой создается узел
name - краткое название узла, обращение к узлу по его имени не предусмотрено
Y = (Y1,Y2,Y0) - проводимость в узле на землю, См
J = (J1,J2,J0) - источник тока подключенный к узлу, А
(положительное направление источника тока - "в узел")
desc - Примечание или любая другая текстовая информация, можно не задавать.
Результатом конструктора узла является объект узла, который используется для
формирования расчетной модели и вывода результатов расчетов'''
if not isinstance(model, Model):
raise TypeError('Ошибка при добавлении узла -', name, '\n',
'Аргумент model должен иметь тип Model!')
model.nq += 1
model.bq.append(self)
self.id = model.nq
self.model = model
self.name = name
self.Y = Y
self.J = J
self.desc = desc
self.plist = []
self.kn = None
def Test4Singularity(self):
if self.singulare:
self.singulare = False
for pk in self.plist:
if pk.q1 is pk.q2:
pass
elif pk.q1 is self:
if isinstance(pk.q2, Q):
pk.q2.Test4Singularity()
elif pk.q2 is self:
if isinstance(pk.q1, Q):
pk.q1.Test4Singularity()
def addp(self,kp):
'''Служебный метод, предназачен для информирования узла о подключенных к нему ветвей'''
self.plist.append(kp)
def update(self):
'''Служебный метод, предназачен для проверки информации о подключенных к узлу ветвей'''
temp_plist = self.plist
self.plist = []
for kp in temp_plist:
if (kp.q1 is self) or (kp.q2 is self):
self.plist.append(kp)
def setn(self,kn):
'''Служебный метод, предназачен для информирования узла о наличии КЗ в данном узле'''
self.kn = kn
def par(self):
'''Вывод на экран параметров узла - его номера и названия'''
print('Узел №', self.id, ' - ', self.name)
def getres(self):
'''Служебный метод, возвращает результат расчета по данному узлу -
напряжения прямой, обратной и нулевой последовательностей, тоже что и q.res('U120')'''
if self.model is None:
raise ValueError('Ошибка при выводе результатов расчетов Узла №', self.id, ' - ', self.name, '\n',
'Узел не принадлежит какой либо модели!')
if self.model.X is None:
raise ValueError('Ошибка при выводе результатов расчетов Узла №', self.id, ' - ', self.name, '\n',
'Не произведен расчет электрических величин!')
qId = 3*(self.model.np+self.id-1)
return self.model.X[qId:qId+3]
def res(self,parname='',subpar=''):
'''Вывод сводной таблицы результатов расчетов для узла q
q.res()
Вывод конкретного параметра ParName в виде компексного числа
для последующего использования в расчетах
q.res(ParName)
где ParName может принимать значения:
'U1','U2','U0','3U0','UA','UB','UC','UABC','UAB','UBC','UCA','UAB_BC_CA'
Вывод конкретного параметра ParName в заданной форме Form:
q.res(ParName,Form)
где Form может принимать значения
'R' - Активная составляющая
'X' - Реактивная составляющая
'M' - Модуль комплексного числа
'<f' - Фаза вектора в градусах
'R+jX' - Текстовый вид комплексного числа
'M<f' - Текстовый вид комплексного числа'''
u120 = self.getres()
i120 = np.array(self.Y) * u120
if parname=='':
print('Узел № {} - {}'.format(self.id, self.name))
print(StrU(u120))
if (i120 != np.zeros(3)).any():
print("Значения токов проводимости узла")
print(StrI(i120))
else:
res = mselectz[parname](u120,i120)
if isinstance(res, np.ndarray):
res = mform3[subpar](res,parname)
else:
res = mform1[subpar](res,parname)
return res
def __getattr__(self, attrname):
'''Еще один способ получения конкректного параметра результата в виде
компексного числа для его последующего использования в расчетах
q.ParName
где ParName может принимать значения:
U1,U2,U0,UA,UB,UC,UABC,UAB,UBC,UCA,UAB_BC_CA'''
u120 = self.getres()
i120 = np.array(self.Y) * u120
return mselectz[attrname](u120,i120)
def __repr__(self):
'''Еще один способ вывода сводной таблицы результатов расчетов для узла q
В командной строке интерпретара набрать название переменной объекта узла и нажать Enter
q Enter'''
u120 = self.getres()
i120 = np.array(self.Y) * u120
strres = []
strres.append("Узел № {} - {}\n".format(self.id, self.name))
strres.append(StrU(u120))
if (i120 != np.zeros(3)).any():
strres.append("Значения токов проводимости узла")
strres.append(StrI(i120))
return ''.join(strres)
class P:
'''Класс трехфазной ветви, необходим для формирования расчетной модели
и получения результатов расчета
Создание ветви с помощью конструктора
P(model,name,q1,q2,Z) - простая ветвь
P(model,name,q1,q2,Z,desc='Примечание') - ветвь с текстовым примечанием
P(model,name,q1,q2,Z,E=(E1,E2,E0)) - ветвь представляющая энергосистему, генератор (Вольт - фазные)
P(model,name,q1,q2,Z,B=(B1,B2,B0)) - ветвь c наличием поперечной емкостной проводимостью B/2 (См)
P(model,name,q1,q2,Z,T=(Ktrans,GrT)) - ветвь представляющая трансформатор
где:
model - объект расчетной модели в которой создается ветвь
name - краткое название ветви, обращение к ветви по ее имени не предусмотрено
q1,q2 - число 0, что означает подключение ветви соответствующим концом к земле,
объект узла принадлежащего той же расчетной модели
desc - Примечание или любая другая текстовая информация, можно не задавать.
Z=(Z1,Z2,Z0) - комплексные сопротивление ветви (Ом) прямой, обратной и нулевой последовательностей
E=(E1,E2,E0) - комплексные фазные значения Э.Д.С. (Вольт) прямой, обратной и нулевой последовательностей
B=(B1,B2,B0) - комплексные значения поперечной емкостной проводимости B (См)
прямой, обратной и нулевой последовательностей,
если pl62w+ или аналогичная выдает Например
B1 = В2 = 90 мкСм (1/Ом*10^-6), B0 = 60 мкСм (1/Ом*10^-6)
то при создании ветви надо заполнять параметры ветви
B=(90e-6j,90e-6j,60e-6j)
T=(Ktrans,GrT) - безразмерные параметры трансформаторной ветви:
Ktrans - коэффициент трансформации силового трансформатора
GrT - группа обмоток обмотки подключенной к узлу 2 (от 0 до 11)
Результатом конструктора ветви является объект ветви, который используется для
формирования расчетной модели и вывода результатов расчетов
Изменить параметры ветви p можно с помощью метода
p.edit(name,q1,q2,Z)
p.edit(name,q1,q2,Z,desc='Примечание')
p.edit(name,q1,q2,Z,E=(E1,E2,E0))
p.edit(name,q1,q2,Z,B=(B1,B2,B0))
p.edit(name,q1,q2,Z,T=(Ktrans,GrT))
Пользовательские функции для объекта ветви p
Вывод на экран параметров ветви - ее номера, названия, номеров и наименований узлов к которым она подключена,
электрических параметров Z,E,B и T
p.par()
Вывод сводной таблицы результатов расчетов для ветви p
со стороны 1-ого и 2-ого узла соответственно (направление токов и пр. в линию)
p.res1()
p.res2()
Вывод конкретного параметра ParName в виде компексного числа
для последующего использования в расчетах
со стороны 1-ого и 2-ого узла соответственно (направление токов и пр. в линию)
p.res1(ParName)
p.res2(ParName)
где ParName может принимать значения:
'U1','U2','U0','3U0','UA','UB','UC','UABC','UAB','UBC','UCA','UAB_BC_CA',
'I1','I2','I0','3I0','IA','IB','IC','IABC','IAB','IBC','ICA','IAB_BC_CA',
'Z1','Z2','Z0','ZA','ZB','ZC','ZABC','ZAB','ZBC','ZCA','ZAB_BC_CA',
'S1','S2','S0','SA','SB','SC','SABC','SAB','SBC','SCA','SAB_BC_CA','S'
Вывод конкретного параметра ParName в заданной форме Form:
p.res1(ParName,Form)
p.res2(ParName,Form)
где Form может принимать значения
'R' - Активная составляющая
'X' - Реактивная составляющая
'M' - Модуль комплексного числа
'<f' - Фаза вектора в градусах
'R+jX' - Текстовый вид комплексного числа
'M<f' - Текстовый вид комплексного числа
Еще один способ получения конкректного параметра результата в виде
компексного числа для его последующего использования в расчетах
p.ParName
где ParName может принимать значения:
значения токов от 1-ого ко 2-ому узлу без учета емкостной проводимости
I1,I2,I0,I120,IA,IB,IC,IABC,IAB,IBC,ICA,IAB_BC_CA
со стороны 1-ого узла
q1U1,q1U2,q1U0,q1U120,q1UA,q1UB,q1UC,q1UABC,q1UAB,q1UBC,q1UCA,q1UAB_BC_CA,
q1I1,q1I2,q1I0,q1I120,q1IA,q1IB,q1IC,q1IABC,q1IAB,q1IBC,q1ICA,q1IAB_BC_CA,
q1Z1,q1Z2,q1Z0,q1Z120,q1ZA,q1ZB,q1ZC,q1ZABC,q1ZAB,q1ZBC,q1ZCA,q1ZAB_BC_CA,
q1S1,q1S2,q1S0,q1S120,q1SA,q1SB,q1SC,q1SABC,q1SAB,q1SBC,q1SCA,q1SAB_BC_CA,q1S
со стороны 2-ого узла
q2U1,q2U2,q2U0,q1U120,q2UA,q2UB,q2UC,q2UABC,q2UAB,q2UBC,q2UCA,q2UAB_BC_CA,
q2I1,q2I2,q2I0,q1I120,q2IA,q2IB,q2IC,q2IABC,q2IAB,q2IBC,q2ICA,q2IAB_BC_CA,
q2Z1,q2Z2,q2Z0,q1Z120,q2ZA,q2ZB,q2ZC,q2ZABC,q2ZAB,q2ZBC,q2ZCA,q2ZAB_BC_CA,
q2S1,q2S2,q2S0,q1S120,q2SA,q2SB,q2SC,q2SABC,q2SAB,q2SBC,q2SCA,q2SAB_BC_CA,q2S
'''
def __init__(self,model,name,q1,q2,Z,E=(0, 0, 0),T=(1, 0),B=(0, 0, 0),desc=''):
''' Конструктор ветви
P(model,name,q1,q2,Z) - простая ветвь
P(model,name,q1,q2,Z,desc='Примечание') - ветвь с текстовым примечанием
P(model,name,q1,q2,Z,E=(E1,E2,E0)) - ветвь представляющая энергосистему, генератор (Вольт - фазные)
P(model,name,q1,q2,Z,B=(B1,B2,B0)) - ветвь c наличием поперечной емкостной проводимостью B/2 (См)
P(model,name,q1,q2,Z,T=(Ktrans,GrT)) - ветвь представляющая трансформатор
где:
model - объект расчетной модели в которой создается ветвь
name - краткое название ветви, обращение к ветви по ее имени не предусмотрено
q1,q2 - число 0, что означает подключение ветви соответствующим концом к земле,
объект узла принадлежащего той же расчетной модели
desc - Примечание или любая другая текстовая информация, можно не задавать.
Z=(Z1,Z2,Z0) - комплексные сопротивление ветви (Ом) прямой, обратной и нулевой последовательностей
E=(E1,E2,E0) - комплексные фазные значения Э.Д.С. (Вольт) прямой, обратной и нулевой последовательностей
B=(B1,B2,B0) - комплексные значения поперечной емкостной проводимости B (См)
прямой, обратной и нулевой последовательностей,
если pl62w+ или аналогичная выдает Например
B1 = В2 = 90 мкСм (1/Ом*10^-6), B0 = 60 мкСм (1/Ом*10^-6)
то при создании ветви надо заполнять параметры ветви
B=(90e-6j,90e-6j,60e-6j)
T=(Ktrans,GrT) - безразмерные параметры трансформаторной ветви:
Ktrans - коэффициент трансформации силового трансформатора
GrT - группа обмоток обмотки подключенной к узлу 2 (от 0 до 11)
Результатом конструктора ветви является объект ветви, который используется для
формирования расчетной модели и вывода результатов расчетов'''
if not isinstance(model, Model):
raise TypeError('Ошибка при добавлении ветви -', name, '\n',
'Аргумент model должен иметь тип Model!')
if isinstance(q1, int):
if q1 != 0:
raise ValueError('Ошибка при добавлении ветви -', name, '\n',
'Для подключения ветви к земле q1=0')
elif isinstance(q1, Q):
if not q1.model is model:
raise ValueError('Ошибка при добавлении ветви -', name, '\n',
'Узел q1 должен принадлежать той-же модели!')
else:
raise TypeError('Ошибка при добавлении ветви -', name, '\n',
'Аргумент q1 должен иметь тип Q или int!')
if isinstance(q2, int):
if q2 != 0:
raise ValueError('Ошибка при добавлении ветви -', name, '\n',
'Для подключения ветви к земле q2=0')
elif isinstance(q2, Q):
if not q2.model is model:
raise ValueError('Ошибка при добавлении ветви -', name, '\n',
'Узел q2 должен принадлежать той-же модели!')
else:
raise TypeError('Ошибка при добавлении ветви -', name, '\n',
'Аргумент q2 должен иметь тип Q или int!')
if q1 is q2:
print('Предупреждение! при добавлении ветви -', name, '\n',
'Ветвь подключается обоими концами к одному и тому же узлу!')
model.np += 1
model.bp.append(self)
self.id = model.np
self.model = model
self.name = name
self.desc = desc
self.q1 = q1
if isinstance(q1, Q):
q1.addp(self)
self.q2 = q2
if isinstance(q2, Q):
q2.addp(self)
self.Z = Z
self.E = E
self.T = T
self.B = B
self.mlist = []
self.kn = None
def edit(self,name,q1,q2,Z,E=(0, 0, 0),T=(1, 0),B=(0, 0, 0),desc=''):
'''Изменить параметры ветви можно с помощью метода
p.edit(name,q1,q2,Z)
p.edit(name,q1,q2,Z,desc='Примечание')
p.edit(name,q1,q2,Z,E=(E1,E2,E0))
p.edit(name,q1,q2,Z,B=(B1,B2,B0))
p.edit(name,q1,q2,Z,T=(Ktrans,GrT))'''
if isinstance(q1, int):
if q1 != 0:
raise ValueError('Ошибка при редактировании ветви №', self.id, ' - ', self.name, '\n',
'Для подключения ветви к земле q1=0')
elif isinstance(q1, Q):
if not q1.model is self.model:
raise ValueError('Ошибка при редактировании ветви №', self.id, ' - ', self.name, '\n',
'Узел q1 должен принадлежать той-же модели!')
else:
raise TypeError('Ошибка при редактировании ветви №', self.id, ' - ', self.name, '\n',
'Аргумент q1 должен иметь тип Q или int!')
if isinstance(q2, int):
if q2 != 0:
raise ValueError('Ошибка при редактировании ветви №', self.id, ' - ', self.name, '\n',
'Для подключения ветви к земле q2=0')
elif isinstance(q2, Q):
if not q2.model is self.model:
raise ValueError('Ошибка при редактировании ветви №', self.id, ' - ', self.name, '\n',
'Узел q2 должен принадлежать той-же модели!')
else:
raise TypeError('Ошибка при редактировании ветви №', self.id, ' - ', self.name, '\n',
'Аргумент q2 должен иметь тип Q или int!')
if q1 is q2:
print('Предупреждение! при добавлении ветви -', name, '\n',
'Ветвь подключается обоими концами к одному и тому же узлу!')
self.name = name
self.desc = desc
self.q1 = q1
if isinstance(q1, Q):
q1.addp(self)
q1.update()
self.q2 = q2
if isinstance(q2, Q):
q2.addp(self)
q2.update()
self.Z = Z
self.E = E
self.T = T
self.B = B
def addm(self,mid):
'''Служебный метод, предназачен для информирования ветви
о подключенных к ней взаимоиндуктивностей'''
self.mlist.append(mid)
def setn(self,kn):
'''Служебный метод, предназачен для информирования ветви
о наличии на ней обрыва'''
self.kn=kn
def par(self):
'''Вывод на экран параметров ветви - ее номера, названия, номеров и наименований узлов к которым она подключена,
электрических параметров Z,E,B и T
p.par()'''
if isinstance(self.q1, Q):
q1id = self.q1.id
q1name = self.q1.name
else:
q1id = 0; q1name = 'Земля'
if isinstance(self.q2, Q):
q2id = self.q2.id
q2name = self.q2.name
else:
q2id = 0
q2name = 'Земля'
print('Ветвь № {} - {} : {}({}) <=> {}({})'.format(self.id,self.name,q1id,q1name,q2id,q2name))
print('Z = {}; E = {}; T = {}; B = {}'.format(self.Z,self.E,self.T,self.B))
def getres(self):
'''Служебный метод, возвращает результат расчета по данной ветви
без учета наличия поперечной проводимости и направления от узла 1 к узлу 2
токов прямой, обратной и нулевой последовательностей, тоже что и p.res1('U120') если B=0'''
if self.model is None:
raise ValueError('Ошибка при выводе результатов расчетов Ветви №', self.id, ' - ', self.name, '\n',
'Ветвь не принадлежит какой либо модели!')
if self.model.X is None:
raise ValueError('Ошибка при выводе результатов расчетов Ветви №', self.id, ' - ', self.name, '\n',
'Не произведен расчет электрических величин!')
pId = 3*(self.id-1)
return self.model.X[pId:pId+3]
def getresq1(self,i120):
'''Служебный метод, возвращает результат расчета по данной ветви
c учетом наличия поперечной проводимости и направления от узла 1 к узлу 2
токов прямой, обратной и нулевой последовательностей, тоже что и p.res1('U120')'''
if isinstance(self.q1, Q):
u120 = self.q1.getres()
i120 += u120 * self.B/2
else:
u120 = np.zeros(3,dtype=np.complex)
return [u120, i120]
def getresq2(self,i120):
'''Служебный метод, возвращает результат расчета по данной ветви
c учетом наличия поперечной проводимости и направления от узла 2 к узлу 1
токов прямой, обратной и нулевой последовательностей, тоже что и p.res2('U120')'''
if isinstance(self.q2, Q):
u120 = self.q2.getres()
else:
u120 = np.zeros(3,dtype=np.complex)
Kt = self.T[0]*np.exp(Kf*self.T[1]*np.ones(3))
if self.T[1] % 2 != 0:
Kt[1] = np.conj(Kt[1])
i120 = -Kt * i120 + u120 * self.B/2
return [u120, i120]
def res1(self,parname='',subpar=''):
'''Вывод сводной таблицы результатов расчетов для ветви p
со стороны 1-ого узла (направление токов и пр. в линию)
p.res1()
Вывод конкретного параметра ParName в виде компексного числа
для последующего использования в расчетах
со стороны 1-ого узла (направление токов и пр. в линию)
p.res1(ParName)
где ParName может принимать значения:
'U1','U2','U0','3U0','UA','UB','UC','UABC','UAB','UBC','UCA','UAB_BC_CA',
'I1','I2','I0','3I0','IA','IB','IC','IABC','IAB','IBC','ICA','IAB_BC_CA',
'Z1','Z2','Z0','ZA','ZB','ZC','ZABC','ZAB','ZBC','ZCA','ZAB_BC_CA',
'S1','S2','S0','SA','SB','SC','SABC','SAB','SBC','SCA','SAB_BC_CA','S'
Вывод конкретного параметра ParName в заданной форме Form:
p.res1(ParName,Form)
где Form может принимать значения
'R' - Активная составляющая
'X' - Реактивная составляющая
'M' - Модуль комплексного числа
'<f' - Фаза вектора в градусах
'R+jX' - Текстовый вид комплексного числа
'M<f' - Текстовый вид комплексного числа '''
i120 = self.getres()
if isinstance(self.q1, Q):
q1id = self.q1.id
q1name = self.q1.name
else:
q1id = 0
q1name = 'Земля'
u120,i120 = self.getresq1(i120)
if parname=='':
print("Ветвь № {} - {}".format(self.id, self.name))
print("Значения токов по ветви со стороны узла №{} - {}".format(q1id, q1name))
print(StrI(i120))
print("Значения напряжения в узле №{} - {}".format(q1id, q1name))
print(StrU(u120))
else:
res = mselectz[parname](u120,i120)
if isinstance(res, np.ndarray):
res = mform3[subpar](res,parname)
else:
res = mform1[subpar](res,parname)
return res
def res2(self,parname='',subpar=''):
'''Вывод сводной таблицы результатов расчетов для ветви p
со стороны 2-ого узла (направление токов и пр. в линию)
p.res1()
Вывод конкретного параметра ParName в виде компексного числа
для последующего использования в расчетах
со стороны 2-ого узла (направление токов и пр. в линию)
p.res2(ParName)
где ParName может принимать значения:
'U1','U2','U0','3U0','U120','UA','UB','UC','UABC','UAB','UBC','UCA','UAB_BC_CA',
'I1','I2','I0','3I0','I120','IA','IB','IC','IABC','IAB','IBC','ICA','IAB_BC_CA',
'Z1','Z2','Z0','Z120','ZA','ZB','ZC','ZABC','ZAB','ZBC','ZCA','ZAB_BC_CA',
'S1','S2','S0','S120','SA','SB','SC','SABC','SAB','SBC','SCA','SAB_BC_CA','S'
Вывод конкретного параметра ParName в заданной форме Form:
p.res2(ParName,Form)
где Form может принимать значения
'R' - Активная составляющая
'X' - Реактивная составляющая
'M' - Модуль комплексного числа
'<f' - Фаза вектора в градусах
'R+jX' - Текстовый вид комплексного числа
'M<f' - Текстовый вид комплексного числа '''
i120 = self.getres()
if isinstance(self.q2, Q):
q2id = self.q2.id
q2name = self.q2.name
else:
q2id = 0
q2name = 'Земля'
u120,i120 = self.getresq2(i120)
if parname=='':
print("Ветвь № {} - {}".format(self.id, self.name))
print("Значения токов по ветви со стороны узла №{} - {}".format(q2id, q2name))
print(StrI(i120))
print("Значения напряжения в узле №{} - {}".format(q2id, q2name))
print(StrU(u120))
else:
res = mselectz[parname](u120,i120)
if isinstance(res, np.ndarray):
res = mform3[subpar](res,parname)
else:
res = mform1[subpar](res,parname)
return res
def __repr__(self):
'''Еще один способ вывода сводной таблицы результатов расчетов для ветви p
В командной строке интерпретара набрать название переменной объекта ветви и нажать Enter
p Enter, выводятся результаты с обоих концов ветви'''
i120p = self.getres()
if isinstance(self.q1, Q):
q1id = self.q1.id
q1name = self.q1.name
else:
q1id = 0
q1name = 'Земля'
u120,i120 = self.getresq1(i120p)
strres = []
strres.append("Ветвь № {} - {}\n".format(self.id, self.name))
strres.append("Значения токов по ветви со стороны узла №{} - {}\n".format(q1id, q1name))
strres.append(StrI(i120))
strres.append("Значения напряжения в узле №{} - {}\n".format(q1id, q1name))
strres.append(StrU(u120))
if isinstance(self.q2, Q):
q2id = self.q2.id
q2name = self.q2.name
else:
q2id = 0
q2name = 'Земля'
u120,i120 = self.getresq2(i120p)
strres.append("Значения токов по ветви со стороны узла №{} - {}\n".format(q2id, q2name))
strres.append(StrI(i120))
strres.append("Значения напряжения в узле №{} - {}\n".format(q2id, q2name))
strres.append(StrU(u120))
return (''.join(strres))
def __getattr__(self, attrname):
'''Еще один способ получения конкректного параметра результата в виде
компексного числа для его последующего использования в расчетах
p.ParName
где ParName может принимать значения:
значения токов от 1-ого ко 2-ому узлу без учета емкостной проводимости
I1,I2,I0,I120,IA,IB,IC,IABC,IAB,IBC,ICA,IAB_BC_CA
со стороны 1-ого узла
q1U1,q1U2,q1U0,q1U120,q1UA,q1UB,q1UC,q1UABC,q1UAB,q1UBC,q1UCA,q1UAB_BC_CA,
q1I1,q1I2,q1I0,q1I120,q1IA,q1IB,q1IC,q1IABC,q1IAB,q1IBC,q1ICA,q1IAB_BC_CA,
q1Z1,q1Z2,q1Z0,q1Z120,q1ZA,q1ZB,q1ZC,q1ZABC,q1ZAB,q1ZBC,q1ZCA,q1ZAB_BC_CA,
q1S1,q1S2,q1S0,q1S120,q1SA,q1SB,q1SC,q1SABC,q1SAB,q1SBC,q1SCA,q1SAB_BC_CA,q1S
со стороны 2-ого узла
q2U1,q2U2,q2U0,q1U120,q2UA,q2UB,q2UC,q2UABC,q2UAB,q2UBC,q2UCA,q2UAB_BC_CA,
q2I1,q2I2,q2I0,q1I120,q2IA,q2IB,q2IC,q2IABC,q2IAB,q2IBC,q2ICA,q2IAB_BC_CA,
q2Z1,q2Z2,q2Z0,q1Z120,q2ZA,q2ZB,q2ZC,q2ZABC,q2ZAB,q2ZBC,q2ZCA,q2ZAB_BC_CA,
q2S1,q2S2,q2S0,q1S120,q2SA,q2SB,q2SC,q2SABC,q2SAB,q2SBC,q2SCA,q2SAB_BC_CA,q2S'''
i120p = self.getres()
if not attrname[:2] in ('q1', 'q2'):
res = mselectz[attrname](np.zeros(3),i120p)
elif attrname[:2] == 'q1':
u120,i120 = self.getresq1(i120p)
res = mselectz[attrname[2:]](u120,i120)
elif attrname[:2] == 'q2':
u120,i120 = self.getresq2(i120p)
res = mselectz[attrname[2:]](u120,i120)
return res
class M:
'''Класс взаимоиндукции нулевой последовательности,
необходим для формирования расчетной модели
Создание ветви с помощью конструктора
M(model,name,p1,p2,M12,M21) - взаимоиндукция
M(model,name,p1,p2,M12,M21,desc='Примечание') - взаимоиндукция с текстовым примечанием
где:
model - объект расчетной модели в которой создается взаимоиндукция
name - краткое название взаимоиндукции, обращение к ветви по ее имени не предусмотрено
p1,p2 - объекты ветви принадлежащего той же расчетной модели между которыми создается взаимоиндукция
desc - Примечание или любая другая текстовая информация, можно не задавать.
M12 - взаимоиндукция влияния ветви p2 на ветвь p1
M21 - взаимоиндукция влияния ветви p1 на ветвь p2
Результатом конструктора ветви является объект взаимоиндукции, который используется для
формирования расчетной модели
Изменить параметры взаимоиндукции m можно с помощью метода
m.edit(name,M12,M21)
Пользовательские функции для объекта взаимоиндукции m
Вывод на экран параметров ветви - ее номера, названия, номеров и наименований ветвей
между которыми создана взаимоиндукция, электрических параметров M12,M21
m.par()'''
def __init__(self,model,name,p1,p2,M12,M21,desc=''):
''' Конструктор взаимоиндукции
Создание ветви с помощью конструктора
M(model,name,p1,p2,M12,M21) - взаимоиндукция
M(model,name,p1,p2,M12,M21,desc='Примечание') - взаимоиндукция с текстовым примечанием
где:
model - объект расчетной модели в которой создается взаимоиндукция
name - краткое название взаимоиндукции, обращение к ветви по ее имени не предусмотрено
p1,p2 - объекты ветви принадлежащего той же расчетной модели между которыми создается взаимоиндукция
desc - Примечание или любая другая текстовая информация, можно не задавать.
M12 - взаимоиндукция влияния ветви p2 на ветвь p1
M21 - взаимоиндукция влияния ветви p1 на ветвь p2
'''
if not isinstance(model, Model):
raise TypeError('Ошибка при добавлении взаимоиндукции -', name, '\n',
'Аргумент model должен иметь тип Model!')
if not isinstance(p1, P):
raise TypeError('Ошибка при добавлении взаимоиндукции -', name, '\n',
'Аргумент p1 должен иметь тип P!')
if not isinstance(p2, P):
raise TypeError('Ошибка при добавлении взаимоиндукции -', name, '\n',
'Аргумент p2 должен иметь тип P!')
if not p1.model is model:
raise ValueError('Ошибка при добавлении взаимоиндукции -', name, '\n',
'Ветвь p1 должна принадлежать той-же модели!')
if not p2.model is model:
raise ValueError('Ошибка при добавлении взаимоиндукции -', name, '\n',
'Ветвь p2 должна принадлежать той-же модели!')
if p1 is p2:
raise ValueError('Ошибка при добавлении взаимоиндукции -', name, '\n',
'Взаимоиндукция подключается к одной и той же ветви!')
model.nm += 1
model.bm.append(self)
self.id = model.nm
self.model = model
self.name = name
self.desc = desc
self.p1 = p1
p1.addm(self)
self.p2 = p2
p2.addm(self)
self.M12 = M12
self.M21 = M21
def edit(self,name,M12,M21):
''' Редактирование взаимоиндукции
m.edit(model,name,M12,M21)'''
self.name = name
self.M12 = M12
self.M21 = M21
def par(self):
'''Вывод на экран параметров ветви - ее номера, названия, номеров и наименований ветвей
между которыми создана взаимоиндукция, электрических параметров M12,M21
m.par()'''
print('Взаимоиндукция № {} - {} : {}({}) <=> {}({})'.format(self.id,self.name,self.p1.id,self.p1.name,self.p2.id,self.p2.name))
print('M12 = {}; M21 = {}'.format(self.M12,self.M21))
class N:
'''Класс продольной (обрыв) или поперечной (КЗ) несимметрии,
необходим для формирования расчетной модели и получения результатов расчета
Создание несимметрии с помощью конструктора
N(model,name,qp,SC) - несимметрия
N(model,name,qp,SC,desc='Примечание') - несимметрия с текстовым примечанием
N(model,name,qp,SC,r=Rd) - несимметрия в виде КЗ с переходным сопротивлением
где:
model - объект расчетной модели в которой создается несимметрия
name - краткое название несимметрии, обращение к несимметрии по ее имени не предусмотрено
qp - объект узла (КЗ) или ветви (обрыв) в котором создается несимметрия
desc - Примечание или любая другая текстовая информация, можно не задавать.
SC - вид КЗ, обрыва может принимать значения:
'A0','B0','C0' - металлические однофазные КЗ на землю или обрыв соответствующей фазы
'A0r','B0r','C0r' - однофазные КЗ на землю через переходное сопротивление
'AB','BC','CA' - металлические двухфазные КЗ или обрыв соответствующих фаз
'ABr','BCr','CAr' - двухфазные КЗ через переходное сопротивление
'AB0','BC0','CA0' - металлические двухфазные КЗ на землю
'ABC' - трехфазное КЗ без земли или обрыв трех фаз
'ABC0' - трехфазное КЗ на землю
'N0' - Заземление в узле в схеме нулевой последовательности
или обрыв по нулевой последовательности на ветви
Результатом конструктора несимметрии является объект несимметрии, который используется для
формирования расчетной модели и вывода результатов расчетов
Изменить параметры несимметрии n можно с помощью метода
n.edit(name,SC)
n.edit(name,SC,desc='')
n.edit(name,SC,r=0)
Пользовательские функции для объекта несимметрии n
Вывод на экран параметров несимметрии - ее номера, названия,
номера и наименования узла или ветви к которым она подключена,
вида несимметрии
n.par()
Вывод сводной таблицы результатов расчетов для несимметрии n
n.res()
Вывод конкретного параметра ParName в виде компексного числа
для последующего использования в расчетах
n.res(ParName)
где ParName может принимать значения:
'U1','U2','U0','3U0','U120','UA','UB','UC','UABC','UAB','UBC','UCA','UAB_BC_CA',
'I1','I2','I0','3I0','I120','IA','IB','IC','IABC','IAB','IBC','ICA','IAB_BC_CA',
'Z1','Z2','Z0','Z120','ZA','ZB','ZC','ZABC','ZAB','ZBC','ZCA','ZAB_BC_CA',
'S1','S2','S0','S120','SA','SB','SC','SABC','SAB','SBC','SCA','SAB_BC_CA','S'
Вывод конкретного параметра ParName в заданной форме Form:
n.res(ParName,Form)
где Form может принимать значения
'R' - Активная составляющая
'X' - Реактивная составляющая
'M' - Модуль комплексного числа
'<f' - Фаза вектора в градусах
'R+jX' - Текстовый вид комплексного числа
'M<f' - Текстовый вид комплексного числа
Еще один способ получения конкректного параметра результата в виде
компексного числа для его последующего использования в расчетах
n.ParName
где ParName может принимать значения:
U1,U2,U0,UA,UB,UC,UABC,UAB,UBC,UCA,UAB_BC_CA
I1,I2,I0,IA,IB,IC,IABC,IAB,IBC,ICA,IAB_BC_CA
Z1,Z2,Z0,Z120,ZA,ZB,ZC,ZABC,ZAB,ZBC,ZCA,ZAB_BC_CA,
S1,S2,S0,S120,SA,SB,SC,SABC,SAB,SBC,SCA,SAB_BC_CA,S'''
def __init__(self,model,name,qp,SC,r=0,desc=''):
''' Конструктор повреждения (КЗ или обрыва)'''
if not isinstance(model, Model):
raise TypeError('Ошибка при добавлении несимметрии -', name, '\n',
'Аргумент model должен иметь тип Model!')
if not isinstance(qp, (Q,P)):
raise TypeError('Ошибка при добавлении несимметрии -', name, '\n',
'Аргумент qp должен иметь тип Q или P!')
if not qp.model is model:
raise ValueError('Ошибка при добавлении несимметрии -', name, '\n',
'Узел/Ветвь qp должны принадлежать той-же модели!')
model.nn += 1
model.bn.append(self)
self.id = model.nn
self.model = model
self.name = name
self.desc = desc
self.qp = qp
qp.setn(self)
self.SC = SC
self.r = r
def edit(self, name,SC,r=0,desc=''):
'''Изменить параметры несимметрии n можно с помощью метода
n.edit(name,SC)
n.edit(name,SC,desc='')
n.edit(name,SC,r=0)'''
self.name = name
self.desc = desc
self.SC = SC
self.r = r
def par(self):
'''Вывод на экран параметров несимметрии - ее номера, названия,
номера и наименования узла или ветви к которым она подключена,
вида несимметрии
n.par()'''
if isinstance(self.qp, Q):
print('КЗ № {} - {} : {} (r={}) в узле № {}({})'.format(self.id,self.name,self.SC,self.r,self.qp.id,self.qp.name))
elif isinstance(self.qp, P):
print('Обрыв № {} - {} : {} на ветви № {}({})'.format(self.id,self.name,self.SC,self.qp.id,self.qp.name))
def getres(self):
'''Служебный метод, возвращает результат расчета по данной несимметрии
для КЗ - токи КЗ прямой, обратной и нулевой последовательностей;
для обрывов - напряжения продольной несимметрии прямой, обратной и нулевой последовательностей.'''
if self.model is None:
raise ValueError('Ошибка при выводе результатов расчетов несимметрии №', self.id, ' - ', self.name, '\n',
'Несимметрия не принадлежит какой либо модели!')
if self.model.X is None:
raise ValueError('Ошибка при выводе результатов расчетов несимметрии №', self.id, ' - ', self.name, '\n',
'Не произведен расчет электрических величин!')
nId = 3*(self.model.np+self.model.nq+self.id-1)
return self.model.X[nId:nId+3]
def res(self,parname='',subpar=''):
'''Вывод сводной таблицы результатов расчетов для несимметрии n
n.res()
Вывод конкретного параметра ParName в виде компексного числа
для последующего использования в расчетах
n.res(ParName)
где ParName может принимать значения:
'U1','U2','U0','3U0','U120','UA','UB','UC','UABC','UAB','UBC','UCA','UAB_BC_CA',
'I1','I2','I0','3I0','I120','IA','IB','IC','IABC','IAB','IBC','ICA','IAB_BC_CA',
'Z1','Z2','Z0','Z120','ZA','ZB','ZC','ZABC','ZAB','ZBC','ZCA','ZAB_BC_CA',
'S1','S2','S0','S120','SA','SB','SC','SABC','SAB','SBC','SCA','SAB_BC_CA','S'
Вывод конкретного параметра ParName в заданной форме Form:
n.res(ParName,Form)
где Form может принимать значения
'R' - Активная составляющая
'X' - Реактивная составляющая
'M' - Модуль комплексного числа
'<f' - Фаза вектора в градусах
'R+jX' - Текстовый вид комплексного числа
'M<f' - Текстовый вид комплексного числа'''
if isinstance(self.qp, Q):
u120 = self.qp.getres()
i120 = self.getres()
if parname=='':
print('КЗ № {} - {} - {}'.format(self.id, self.name, self.SC))
print('В Узле № {} - {}'.format(self.qp.id, self.qp.name))
print(StrU(u120))
print('Суммарный ток КЗ в Узле № {} - {}'.format(self.qp.id, self.qp.name))
print(StrI(i120))
print('Подтекание токов по ветвям')
self.qp.update()
for kp in self.qp.plist:
i120 = kp.getres()
if self.qp is kp.q1:
u120,i120 = kp.getresq1(i120)
elif self.qp is kp.q2:
u120,i120 = kp.getresq2(i120)
i120 = -i120
print('Ветвь № {} - {}'.format(kp.id, kp.name))
print(StrI(i120, 0))
else:
res = mselectz[parname](u120,i120)
if isinstance(res, np.ndarray):
res = mform3[subpar](res,parname)
else:
res = mform1[subpar](res,parname)
return res
def __repr__(self):
'''Еще один способ вывода сводной таблицы результатов расчетов для несимметрии n
В командной строке интерпретара набрать название переменной объекта несимметрии n и нажать Enter
n Enter'''
if isinstance(self.qp, Q):
u120 = self.qp.getres()
i120 = self.getres()
strres = []
strres.append('КЗ №{} - {} - {}\n'.format(self.id, self.name, self.SC))
strres.append('В Узле № {} - {}\n'.format(self.qp.id, self.qp.name))
strres.append(StrU(u120))
strres.append('\nСуммарный ток КЗ в Узле № {} - {}\n'.format(self.qp.id, self.qp.name))
strres.append(StrI(i120))
strres.append('\nПодтекание токов по ветвям')
for kp in self.qp.plist:
i120p = kp.getres()
if self.qp is kp.q1:
_,i120p = kp.getresq1(i120p)
elif self.qp is kp.q2:
_,i120p = kp.getresq2(i120p)
i120p = -i120p
strres.append('\nВетвь № {} - {}\n'.format(kp.id, kp.name))
strres.append(StrI(i120p,0))
strres = ''.join(strres)
elif isinstance(self.qp, P):
strres = self.qp.__repr__()
return (strres)
def __getattr__(self, attrname):
'''Еще один способ получения конкректного параметра результата в виде
компексного числа для его последующего использования в расчетах
n.ParName
где ParName может принимать значения:
U1,U2,U0,UA,UB,UC,UABC,UAB,UBC,UCA,UAB_BC_CA
I1,I2,I0,IA,IB,IC,IABC,IAB,IBC,ICA,IAB_BC_CA
Z1,Z2,Z0,Z120,ZA,ZB,ZC,ZABC,ZAB,ZBC,ZCA,ZAB_BC_CA,
S1,S2,S0,S120,SA,SB,SC,SABC,SAB,SBC,SCA,SAB_BC_CA,S'''
if isinstance(self.qp, Q):
u120 = self.qp.getres()
i120 = self.getres()
elif isinstance(self.qp, P):
u120 = self.getres()
i120 = self.qp.getres()
res = mselectz[attrname](u120,i120)
return res
class Model:
'''Класс представляющий расчетную модель электрической сети,
необходим для формирования и хранения расчетной модели, выполнения расчетов
Конструктор расчетной модели сети
Model()
Model(desc='Примечание')
Пользовательские функции для модели mdl
Обнуление количества и очистка списков (таблиц) узлов, ветвей,
взаимоиндукций, несимметрий...
mdl.Clear()'''
def __init__(self,desc=''):
''' Конструктор расчетной модели'''
self.desc = desc
self.nq = 0
self.np = 0
self.nm = 0
self.nn = 0
self.bq = []
self.bp = []
self.bm = []
self.bn = []
self.X = None
def AddNQ(self,NQ,Nname):
'''Множественное создание узлов
NQ - количество создаваемых узлов
Nname - общее наименование узлов'''
listq = []
for ij in range(NQ):
listq.append(Q(self,'{} - №{}'.format(Nname,ij+1)))
return listq
def AddNP(self,Nname,listq1,listq2,Z12,Z0,B12=None,B0=None):
'''Множественное создание ветвей и взаимоиндуктивностей
NQ - количество создаваемых узлов
Nname - общее наименование сечения ветвей
listq1 - список объектов узлов к которым буду подключаться ветви
listq2 - другой список объектов узлов к которым буду подключаться ветви
Z12 - вектор np.ndarray значений сопротивлений ветвей прямой/обратной последовательности
Z0 - квадратная матрица np.ndarray значений сопротивлений ветвей и взаимоиндукций нулевой последовательности
B12 - вектор np.ndarray значений поперечной емкостной проводимости прямой/обратной последовательности
B0 - квадратная матрица np.ndarray значений поперечной емкостной проводимости нулевой последовательности
AddNP(Nname,listq1,listq2,Z12,Z0) - при отсутствии поперечной емкостной проводимости
AddNP(Nname,listq1,listq2,Z12,Z0,B12,B0) - при наличии поперечной емкостной проводимости'''
listp = []
listm = []
nq1 = len(listq1)
nq2 = len(listq2)
if nq1 != nq2:
raise ValueError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Количество узлов с обоих сторон должно совпадать!')
if not isinstance(Z12, np.ndarray):
raise TypeError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Аргумент Z12 должен иметь тип np.ndarray!')
if not isinstance(Z0, np.ndarray):
raise TypeError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Аргумент Z0 должен иметь тип np.ndarray!')
if nq1 != Z12.shape[0]:
raise ValueError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Количество сопротивлений Z12 должно соответствовать количеству узлов!')
if nq1 != Z0.shape[0] or nq1 != Z0.shape[1]:
raise ValueError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Количество сопротивлений Z0 должно соответствовать количеству узлов!')
if isinstance(B12, np.ndarray) and isinstance(B0, np.ndarray):
if not isinstance(B12, np.ndarray):
raise TypeError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Аргумент B12 должен иметь тип np.ndarray!')
if not isinstance(B0, np.ndarray):
raise TypeError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Аргумент B0 должен иметь тип np.ndarray!')
if nq1 != B12.shape[0]:
raise ValueError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Количество сопротивлений B12 должно соответствовать количеству узлов!')
if nq1 != B0.shape[0] or nq1 != B0.shape[1]:
raise ValueError('Ошибка при добавлении сечения ветвей -', Nname, '\n',
'Количество сопротивлений B0 должно соответствовать количеству узлов!')
for ij in range(nq1):
#(self,model,name,q1,q2,Z,E=(0, 0, 0),T=(1, 0),B=(0, 0, 0),desc='')
listp.append(P(self,'{} - №{}'.format(Nname,ij+1),listq1[ij],listq2[ij]),Z=(Z12[ij],Z12[ij],Z0[ij,ij]),B=(B12[ij],B12[ij],B0[ij,ij]))
for ij2 in range(ij):
listm.append(M(self,'{} - №{}<=>№{}'.format(Nname,ij+1,ij2+1),listp[ij],listp[ij2],Z0[ij,ij2],Z0[ij2,ij]))
else:
for ij in range(nq1):
listp.append(P(self,'{} - №{}'.format(Nname,ij+1),listq1[ij],listq2[ij]),Z=(Z12[ij],Z12[ij],Z0[ij,ij]))
for ij2 in range(ij):
listm.append(M(self,'{} - №{}<=>№{}'.format(Nname,ij+1,ij2+1),listp[ij],listp[ij2],Z0[ij,ij2],Z0[ij2,ij]))
return listp + listm
def ImportFromPVL(self,PVL_Sech):
'''Импорт сечений ветвей из PVL'''
listp = []
listm = []
PVL_Sech.calc()
z1 = PVL_Sech.Len * PVL_Sech.Z1
z0 = PVL_Sech.Len * PVL_Sech.Z0
b1 = PVL_Sech.Len * PVL_Sech.B1
b0 = PVL_Sech.Len * PVL_Sech.B0
for ij,pk in enumerate(PVL_Sech.bp):
p1 = P(self, pk.name, pk.q1, pk.q2,
(z1[ij,0],z1[ij,0],z0[ij,ij]),
B=(b1[ij,0],b1[ij,0],b0[ij,ij]) )
listp.append(p1)
for ij2,pk2 in enumerate(PVL_Sech.bp[0:ij]):
mname = '{} - №{}<=>№{}'.format(PVL_Sech.name,pk.name,pk2.name)
p2 = listp[ij2]
m = M(self,mname,p1,p2,z0[ij,ij2],z0[ij2,ij])
listm.append(m)
return listp + listm
def Clear(self):
'''Полная очистка расчетной модели
Обнуление количества и очистка списков (таблиц) узлов, ветвей,
взаимоиндукций, несимметрий...
mdl.Clear()'''
self.X = None
self.nq = 0
self.np = 0
self.nm = 0
self.nn = 0
for kq in self.bq:
kq.model = None
kq.plist = []
kq.kn = None
for kp in self.bp:
kp.model = None
kp.q1 = None
kp.q2 = None
kp.mlist = []
kp.kn = None
for km in self.bm:
km.model = None
km.p1 = None
km.p2 = None
for kn in self.bn:
kn.model = None
kn.qp = None
self.bq = []
self.bp = []
self.bm = []
self.bn = []
def ClearN(self):
'''Очистка всех несимметрий (КЗ и обрывов) в расчетной модели
за исключением типа 'N0' - заземлений и обрывов по нулевой последовательности
mdl.ClearN()'''
self.X = None
self.nn = 0
oldbn = self.bn
self.bn = []
for kn in oldbn:
if kn.SC == 'N0':
self.nn += 1
self.bn.append(kn)
kn.id = self.nn
else:
kn.model = None
kn.qp.kn = None
def List(self):
'''Вывод на экран составляющих расчетную модель узлов, ветвей,
взаимоиндукций, несимметрий и их параметров...
По сути является поочередным применением метода par() ко всем элементам
расчетной модели
mdl.List()'''
print('Количество узлов = {}; ветвей = {}; взаимоиндуктивностей = {}; несимметрий = {}'.format(self.nq,self.np,self.nm,self.nn))
for kq in self.bq:
kq.par()
for kp in self.bp:
kp.par()
for km in self.bm:
km.par()
for kn in self.bn:
kn.par()
def Test4Singularity(self):
'''Тестирование модели на условия приводящие к вырожденности
(сингулярности) матрицы уравнений узловых напряжений и токов ветвей
mdl.Test4Singularity()'''
for kq in self.bq:
kq.singulare = True
for kp in self.bp:
if isinstance(kp.q1, int) and isinstance(kp.q2, Q):
kp.q2.Test4Singularity()
elif isinstance(kp.q1, Q) and isinstance(kp.q2, int):
kp.q1.Test4Singularity()
listq = []
listp = []
for kq in self.bq:
if kq.singulare:
listq.append(kq)
for kp in self.bp:
if (kp.q1 in listq) or (kp.q2 in listq):
listp.append(kp)
if listq or listp:
print('\nСписок висящих узлов\n')
for kq in listq:
kq.par()
print('\nСписок висящих ветвей\n')
for kp in listp:
kp.par()
print('\nСписок взаимоиндукций между ветвями, хотя-бы одна из которых является висящей\n')
for km in self.bm:
if (km.p1 in listp) or (km.p2 in listp):
km.par()
print('\nСписок КЗ на висящем узле или обрывов на висящих ветвях\n')
for kn in self.bn:
if isinstance(kn.qp, Q): # Короткие замыкания
if kn.qp in listq:
kn.par()
if isinstance(kn.qp, P): # Обрывы
if kn.qp in listp:
kn.par()
raise ValueError('Выявлены висящие узлы, ветви!!! \nВыполнение расчетов электрических параметров невозможно! \nУдалите или закоментируйте висящие узлы, ветви,\n, взаимоиндукции, КЗ и обрывы!')
def Calc(self):
'''Главный метод модуля МРТКЗ mdl.Calc()
Осуществляет формирование разреженной системы линейных алгебраических уравнений (СЛАУ)
и последующее ее решение с помощью алгоритма библиотеки scipy - spsolve(LHS,RHS)
LHS * X = RHS
где LHS - разреженная квадратная матрица
RHS - вектор столбец
X - искомый результат расчета
Для каждого узла и ветви формируется по три уравнения:
для схемы прямой, обратной и нулевой последовательностей
Для каждой несимметрии формируется по три уравнения:
уравнения граничных условий, определяющих несимметрию
Размерность (количество уравнений) равняется 3*(np+nq+nn), где:
np - количество ветвей в расчетной модели;
nq - количество узлов в расчетной модели;
nn - количество несимметрий в расчетной модели.
Вышеуказанное уравнение представляет собой систему матричных уравнений
без учета уравнений описывающих несимметрии:
Z*Ip + At*Uq = E
A*Ip + (-B/2)*Uq = -J
где:
1-ое уравнение сформировано по 2-ому закону Кирхгофа (Zp*Ip - (Uq1 - Uq2) = Ep)
2-ое уравнение сформировано по 1-ому закону Кирхгофа (сумма токов в узле равна нулю)
Z - квадратная матрица сопротивлений ветвей и взаимных индуктивностей
прямой, обратной и нулевой последовательностей, размерность - (3*np,3*np)
A - матрица соединений, размерность - (3*nq,3*np)
At - транспонированная матрица соединений, размерность - (3*np,3*nq)
(B/2) - квадратная диагональная матрица сумм поперечных проводимостей B/2,
подключенных к узлу прямой, обратной и нулевой последовательностей, размерность - (3*nq,3*nq)
E - вектор столбец Э.Д.С. ветвей прямой, обратной и нулевой последовательностей, размерность - (3*np,1)
J - вектор столбец источников тока подключенных к узлам
Ip - искомый вектор столбец значений токов ветвей
прямой, обратной и нулевой последовательностей, размерность - (3*np,1)
Uq - искомый вектор столбец значений напряжений узлов
прямой, обратной и нулевой последовательностей, размерность - (3*nq,1)
На каждую несимметрию дополнительно пишется по три уравнения -
граничных условий (для понимания указаны в фазных величинах):
Короткие замыкания
А0 => Uka=0;Ikb=0;Ikc=0
B0 => Ukb=0;Ikc=0;Ika=0
C0 => Ukc=0;Ika=0;Ikb=0
А0r => Uka-r*Ika=0;Ikb=0;Ikc=0
B0r => Ukb-r*Ikb=0;Ikc=0;Ika=0
C0r => Ukc-r*Ikc=0;Ika=0;Ikb=0
АB => Uka-Ukb=0;Ika+Ikb=0;Ikc=0
BC => Ukb-Ukc=0;Ikb+Ikc=0;Ika=0
CА => Ukc-Uka=0;Ikc+Ika=0;Ikb=0
АBr => Uka-Ukb-r*Ika=0;Ika+Ikb=0;Ikc=0
BCr => Ukb-Ukc-r*Ikb=0;Ikb+Ikc=0;Ika=0
CАr => Ukc-Uka-r*Ikc=0;Ikc+Ika=0;Ikb=0
АB0 => Uka=0;Ukb=0;Ikc=0
BC0 => Ukb=0;Ukc=0;Ika=0
CА0 => Ukc=0;Uka=0;Ikb=0
АBC => Uk1=0;Uk2=0;Ik0=0
АBC0 => Uk1=0;Uk2=0;Uk0=0
Заземление нейтрали N0 => Ik1=0;Ik2=0;Uk0=0
Обрывы
А0 => Ia=0;dUb=0;dUc=0
B0 => Ib=0;dUc=0;dUa=0
C0 => Ic=0;dUa=0;dUb=0
АB => Ia=0;Ib=0;dUc=0
BC => Ib=0;Ic=0;dUa=0
CА => Ic=0;Ia=0;dUb=0
АBC => I1=0;I2=0;I0=0
Обрыв ветви по нулевой последовательности N0 => dU1=0;dU2=0;I0=0
а также в новых столбцах по каждой из последовательностей прописывается:
- Для КЗ в уравнение по 1-ому закону Кирхгофа
A*Ip + (-B/2)*Uq - Ik = 0, где Ik - ток поперечной несимметрии
- Для обрывов в уравнение по 2-ому закону Кирхгофа
Z*Ip + At*Uq + dU = E, где dU - напряжение продольной несимметрии
Разреженная матрица LHS формируется в два этапа
Этап 1. формируется координатная версия резреженной матрицы в cdata, ri и ci,
в которых хранятся значения ненулевых элеметнов матрицы, их номера строк и столбцов
Этап 2. формируется CSC (Разреженный столбцовый формат) матрица LHS с помощью метода scipy
Решение разреженной СЛАУ осуществляется с помощью метода spsolve(LHS,RHS) библиотеки scipy'''
# self.Test4Singularity()
n = 3*(self.nq+self.np+self.nn)# Размерность СЛАУ
maxnnz = 3*self.nq + 15*self.np + 2*self.nm + 15*self.nn# Максимальное кол-во ненулевых элементов разреженной матрицы
RHS = np.zeros(n, dtype=np.complex)# Вектор правой части СЛАУ, в него записывается э.д.с. ветвей и J узлов
ij = 3*self.nq # Текущий адрес записи, в результате количество использованной памяти
cdata = np.zeros(maxnnz, dtype=np.cdouble)# Вектор для хранения ненулевых элементов СЛАУ
ri = np.zeros(maxnnz, dtype=np.int)# Вектор для хранения номеров строк ненулевых элементов СЛАУ
ci = np.zeros(maxnnz, dtype=np.int)# Вектор для хранения номеров столбцов ненулевых элементов СЛАУ
ri[0:ij] = 3*self.np + np.arange(ij)
ci[0:ij] = ri[0:ij]
for kp in self.bp: # Перебор всех ветвей
pId = 3*(kp.id-1)#Здесь и далее номер строки, столбца относящегося к прямой последовательности ветви
lpId = pId+arr012#[pId,pId+1,pId+2]
#Запись сопротивлений ветви в разреженную матрицу
Dij = 3
ri[ij:ij+Dij] = lpId
ci[ij:ij+Dij] = lpId
cdata[ij:ij+Dij] = np.array(kp.Z)
ij += Dij
#Запись Э.Д.С. ветви в RHS
RHS[pId:pId+3] = np.array(kp.E)
#Расчет комплексных коэф-ов трансформации прямой, обратной и нулевой последовательностей
Kt1 = kp.T[0] * np.exp(Kf*kp.T[1])
if kp.T[1] % 2 == 0:
Kt2 = Kt1
else:
Kt2 = np.conj(Kt1)
Kt0 = Kt1
if isinstance(kp.q1, Q):
qId = 3*(self.np + kp.q1.id - 1)#Здесь и далее номер строки, столбца относящегося к прямой последовательности узла
lqId = qId+arr012#[qId,qId+1,qId+2]
qbId = 3*(kp.q1.id-1)
#Cуммирование B/2 подключенных ветвей к узлу
cdata[qbId:qbId+3] -= np.array(kp.B)/2
#Запись матриц соединений A и At в разреженную матрицу (для q1 -> -1)
Dij = 6
ri[ij:ij+Dij] = np.concatenate((lpId,lqId))#[pId,pId+1,pId+2,qId,qId+1,qId+2]
ci[ij:ij+Dij] = np.concatenate((lqId,lpId))#[qId,qId+1,qId+2,pId,pId+1,pId+2]
cdata[ij:ij+Dij] = np.concatenate((arr_111,arr_111))#[-1.0,-1.0,-1.0,-1.0,-1.0,-1.0]
ij += Dij
if isinstance(kp.q2, Q):
qId = 3*(self.np + kp.q2.id - 1)
lqId = qId+arr012#[qId,qId+1,qId+2]
qbId = 3*(kp.q2.id-1)
#Cуммирование B/2 подключенных ветвей к узлу
cdata[qbId:qbId+3] -= np.array(kp.B)/2
#Запись матриц соединений A и At в разреженную матрицу (для q2 -> 1 или Кт для трансформаторов)
Dij = 6
ri[ij:ij+Dij] = np.concatenate((lpId,lqId))#[pId,pId+1,pId+2,qId,qId+1,qId+2]
ci[ij:ij+Dij] = np.concatenate((lqId,lpId))#[qId,qId+1,qId+2,pId,pId+1,pId+2]
cdata[ij:ij+Dij] = np.array([Kt2,Kt1,Kt0,Kt1,Kt2,Kt0])
ij += Dij
for km in self.bm: # Перебор всех взаимоиндукций
pId1 = 3*(km.p1.id-1)+2
pId2 = 3*(km.p2.id-1)+2
#Запись сопротивлений взаимоиндукции в разреженную матрицу
Dij = 2
ri[ij:ij+Dij] = np.array([pId1,pId2])
ci[ij:ij+Dij] = np.array([pId2,pId1])
cdata[ij:ij+Dij] = np.array([km.M12,km.M21])
ij += Dij
for kq in self.bq:# Перебор всех узлов
qId = 3*(self.np + kq.id-1)
qbId = 3*(kq.id-1)
cdata[qbId:qbId+3] -= np.array(kq.Y)
RHS[qId:qId+3] = -np.array(kq.J)
for kn in self.bn: # Перебор всех несимметрий
nId = 3*(self.nq+self.np+kn.id-1)#Здесь и далее номер строки, столбца относящегося к несимметрии
lnId = nId + arr012
if isinstance(kn.qp, Q): # Короткие замыкания
qId = 3*(self.np+kn.qp.id-1)
lqId = qId + arr012
#Запись в разреженную матрицу в уравнения по 1-ому закону Кирхгофа наличие КЗ в узле
Dij = 3
ri[ij:ij+Dij] = lqId#[qId,qId+1,qId+2]
ci[ij:ij+Dij] = lnId#[nId,nId+1,nId+2]
cdata[ij:ij+Dij] = arr_111#[-1.0,-1.0,-1.0]
ij += Dij
if kn.SC=='N0' : #Заземление нейтрали Ik1=0;Ik2=0;Uk0=0
Dij = 3
ri[ij:ij+Dij] = lnId#[nId,nId+1,nId+2]
ci[ij:ij+Dij] = np.array([nId,nId+1,qId+2])
cdata[ij:ij+Dij] = vA#[1.0,1.0,1.0]
ij += Dij
elif kn.SC in ('A0','B0','C0'):
#Запись в разреженную матрицу граничных условий для КЗ
Dij = 9
ri[ij:ij+Dij] = nId + np.concatenate((arr000,arr111,arr222))#[nId,nId,nId,nId+1,nId+1,nId+1,nId+2,nId+2,nId+2]
ci[ij:ij+Dij] = np.concatenate((lqId,lnId,lnId))#[qId,qId+1,qId+2,nId,nId+1,nId+2,nId,nId+1,nId+2]
if kn.SC == 'A0':# Uka=0;Ikb=0;Ikc=0
cdata[ij:ij+Dij] = vABC#[1.0,1.0,1.0,a2,a,1.0,a,a2,1.0]
elif kn.SC == 'B0':# Ukb=0;Ikc=0;Ika=0
cdata[ij:ij+Dij] = vBCA#[a2,a,1.0,1.0,1.0,1.0,a,a2,1.0]
else : # 'C0' # Ukc=0;Ika=0;Ikb=0
cdata[ij:ij+Dij] = vCAB#[a,a2,1.0,1.0,1.0,1.0,a2,a,1.0]
ij += Dij
elif kn.SC in ('A0r','B0r','C0r'):
Dij = 12
ri[ij:ij+Dij] = nId + np.concatenate((arr000,arr111,arr222,arr000))#[nId,nId,nId,nId+1,nId+1,nId+1,nId+2,nId+2,nId+2,nId,nId,nId]
ci[ij:ij+Dij] = np.concatenate((lqId,lnId,lnId,lnId))#[qId,qId+1,qId+2,nId,nId+1,nId+2,nId,nId+1,nId+2,nId,nId+1,nId+2]
if kn.SC == 'A0r':# Uka-r*Ika=0;Ikb=0;Ikc=0
cdata[ij:ij+Dij] = np.concatenate((vABC, -kn.r*vA))#np.array(vA+vB+vC+[-kn.r,-kn.r,-kn.r])
elif kn.SC == 'B0r':# Ukb-r*Ikb=0;Ikc=0;Ika=0
cdata[ij:ij+Dij] = np.concatenate((vBCA, -kn.r*vB))#np.array(vB+vC+vA+[-kn.r*a2,-kn.r*a,-kn.r])#
else : # 'C0r'# Ukc-r*Ikc=0;Ika=0;Ikb=0
cdata[ij:ij+Dij] = np.concatenate((vCAB, -kn.r*vC))#np.array(vC+vA+vB+[-kn.r*a,-kn.r*a2,-kn.r])
ij += Dij
elif kn.SC in ('AB','BC','CA'):
Dij = 5
ri[ij:ij+Dij] = nId + np.array([0,0,1,1,2])
ci[ij:ij+Dij] = np.array([qId,qId+1,nId,nId+1,nId+2])
if kn.SC == 'AB':# Uka-Ukb=0;Ika+Ikb=0;Ikc=0
cdata[ij:ij+Dij] = np.array([1.0-a2,1.0-a,1.0+a2,1.0+a,1.0])
elif kn.SC == 'BC':# Ukb-Ukc=0;Ikb+Ikc=0;Ika=0
cdata[ij:ij+Dij] = np.array([a2-a,a-a2,a2+a,a+a2,1.0])
else : # 'CA'# Ukc-Uka=0;Ikc+Ika=0;Ikb=0
cdata[ij:ij+Dij] = np.array([a-1.0,a2-1.0,a+1.0,a2+1.0,1.0])
ij += Dij
elif kn.SC in ('ABr','BCr','CAr'):
Dij = 7
ri[ij:ij+Dij] = nId + np.array([0,0,1,1,2,0,0])
ci[ij:ij+Dij] = np.array([qId,qId+1,nId,nId+1,nId+2,nId,nId+1])
if kn.SC == 'ABr':# Uka-Ukb-r*Ika=0;Ika+Ikb=0;Ikc=0
cdata[ij:ij+Dij] = np.array([1.0-a2,1.0-a,1.0+a2,1.0+a,1.0,-kn.r,-kn.r])
elif kn.SC == 'BCr':# Ukb-Ukc-r*Ikb=0;Ikb+Ikc=0;Ika=0
cdata[ij:ij+Dij] = np.array([a2-a,a-a2,a2+a,a+a2,1.0,-kn.r*a2,-kn.r*a])
else : # 'CAr'# Ukc-Uka-r*Ikc=0;Ikc+Ika=0;Ikb=0
cdata[ij:ij+Dij] = np.array([a-1.0,a2-1.0,a+1.0,a2+1.0,1.0,-kn.r*a,-kn.r*a2])
ij += Dij
elif kn.SC in ('AB0','BC0','CA0'):
Dij = 9
ri[ij:ij+Dij] = nId + np.concatenate((arr000,arr111,arr222))#np.array([nId,nId,nId,nId+1,nId+1,nId+1,nId+2,nId+2,nId+2])
ci[ij:ij+Dij] = np.concatenate((lqId,lqId,lnId))#np.array([qId,qId+1,qId+2,qId,qId+1,qId+2,nId,nId+1,nId+2])
if kn.SC == 'AB0':# Uka=0;Ukb=0;Ikc=0
cdata[ij:ij+Dij] = vABC#[1.0,1.0,1.0,a2,a,1.0,a,a2,1.0]
elif kn.SC == 'BC0':# Ukb=0;Ukc=0;Ika=0
cdata[ij:ij+Dij] = vBCA#[a2,a,1.0,a,a2,1.0,1.0,1.0,1.0]
else : # 'CA0'# Ukc=0;Uka=0;Ikb=0
cdata[ij:ij+Dij] = vCAB#[a,a2,1.0,1.0,1.0,1.0,a2,a,1.0]
ij += Dij
elif kn.SC == 'ABC':# Uk1=0;Uk2=0;Ik0=0
Dij = 3
ri[ij:ij+Dij] = lnId#[nId,nId+1,nId+2]
ci[ij:ij+Dij] = np.array([qId,qId+1,nId+2])
cdata[ij:ij+Dij] = vA#[1.0,1.0,1.0]
ij += Dij
elif kn.SC == 'ABC0' : # Uk1=0;Uk2=0;Uk0=0
Dij = 3
ri[ij:ij+Dij] = lnId#[nId,nId+1,nId+2]
ci[ij:ij+Dij] = lqId#[qId,qId+1,qId+2]
cdata[ij:ij+Dij] = vA#[1.0,1.0,1.0]
ij += Dij
else :
raise TypeError('Неизвестный вид КЗ!')
elif isinstance(kn.qp, P): #Обрывы
pId = 3*(kn.qp-1)
lpId = pId+arr012
#Запись в разреженную матрицу в уравнения по 2-ому закону Кирхгофа о наличии обрыва на ветви
Dij = 3
ri[ij:ij+Dij] = lpId#[pId,pId+1,pId+2]
ci[ij:ij+Dij] = lnId#[nId,nId+1,nId+2]
cdata[ij:ij+Dij] = vA#[1.0,1.0,1.0]
ij += Dij
if kn.SC == 'N0': #Обрыв ветви по нулевой последовательности dU1=0;dU2=0;I0=0
Dij = 3
ri[ij:ij+Dij] = lnId#[nId, nId+1, nId+2]
ci[ij:ij+Dij] = np.array([nId, nId+1, pId+2])
cdata[ij:ij+Dij] = vA#[1.0,1.0,1.0]
ij += Dij
elif kn.SC in ('A0','B0','C0'):
Dij = 9
ri[ij:ij+Dij] = nId + np.concatenate((arr000,arr111,arr222))#[nId,nId,nId,nId+1,nId+1,nId+1,nId+2,nId+2,nId+2]
ci[ij:ij+Dij] = np.concatenate((lpId,lnId,lnId))#[pId,pId+1,pId+2,nId,nId+1,nId+2,nId,nId+1,nId+2]
if kn.SC == 'A0':# Ia=0;dUb=0;dUc=0
cdata[ij:ij+Dij] = vABC#[1.0,1.0,1.0,a2,a,1.0,a,a2,1.0]
elif kn.SC=='B0':# Ib=0;dUc=0;dUa=0
cdata[ij:ij+Dij] = vBCA#[a2,a,1.0,a,a2,1.0,1.0,1.0,1.0]
else : # 'C0'# Ic=0;dUa=0;dUb=0
cdata[ij:ij+Dij] = vCAB#[a,a2,1.0,1.0,1.0,1.0,a2,a,1.0]
ij += Dij
elif kn.SC in ('AB','BC','CA'):
Dij = 9
ri[ij:ij+Dij] = nId + np.concatenate((arr000,arr111,arr222))#[nId,nId,nId,nId+1,nId+1,nId+1,nId+2,nId+2,nId+2]
ci[ij:ij+Dij] = np.concatenate((lpId,lpId,lnId))#[pId,pId+1,pId+2,pId,pId+1,pId+2,nId,nId+1,nId+2]
if kn.SC == 'AB':# Ia=0;Ib=0;dUc=0
cdata[ij:ij+Dij] = vABC#[1.0,1.0,1.0,a2,a,1.0,a,a2,1.0]
elif kn.SC == 'BC':# Ib=0;Ic=0;dUa=0
cdata[ij:ij+Dij] = vBCA#[a2,a,1.0,a,a2,1.0,1.0,1.0,1.0]
else : # 'CA'# Ic=0;Ia=0;dUb=0
cdata[ij:ij+Dij] = vCAB#[a,a2,1.0,1.0,1.0,1.0,a2,a,1.0]
ij += Dij
elif kn.SC == 'ABC' : # I1=0;I2=0;I0=0
Dij = 3
ri[ij:ij+Dij] = lnId#[nId, nId+1, nId+2]
ci[ij:ij+Dij] = lpId#[pId, pId+1, pId+2]
cdata[ij:ij+Dij] = vA#[1.0,1.0,1.0]
ij += Dij
else: raise TypeError('Неизвестный вид обрыва!')
else: raise TypeError('Неизвестный вид несимметрии!')
#Формирование CSC разреженной матрицы (Разреженный столбцовый формат)
LHS = csc_matrix((cdata[0:ij], (ri[0:ij], ci[0:ij])), shape=(n, n))
#решение разреженной СЛАУ с помощью функции из состава scipy
self.X = spsolve(LHS,RHS)
return self.X
mselectz=dict({'U120' : lambda uq,ip: uq,
'U1' : lambda uq,ip: uq[0],
'U2' : lambda uq,ip: uq[1],
'U0' : lambda uq,ip: uq[2],
'3U0' : lambda uq,ip: 3*uq[2],
'UA' : lambda uq,ip: vA @ uq,
'UB' : lambda uq,ip: vB @ uq,
'UC' : lambda uq,ip: vC @ uq,
'UAB' : lambda uq,ip: vAB @ uq,
'UBC' : lambda uq,ip: vBC @ uq,
'UCA' : lambda uq,ip: vCA @ uq,
'UABC' : lambda uq,ip: Ms2f @ uq,
'UAB_BC_CA' : lambda uq,ip: Ms2ff @ uq,
'I120' : lambda uq,ip: ip,
'I1' : lambda uq,ip: ip[0],
'I2' : lambda uq,ip: ip[1],
'I0' : lambda uq,ip: ip[2],
'3I0' : lambda uq,ip: 3*ip[2],
'IA' : lambda uq,ip: vA @ ip,
'IB' : lambda uq,ip: vB @ ip,
'IC' : lambda uq,ip: vC @ ip,
'IAB' : lambda uq,ip: vAB @ ip,
'IBC' : lambda uq,ip: vBC @ ip,
'ICA' : lambda uq,ip: vCA @ ip,
'IABC' : lambda uq,ip: Ms2f @ ip,
'IAB_BC_CA' : lambda uq,ip: Ms2ff @ ip,
'Z120' : lambda uq,ip: uq / ip,
'Z1' : lambda uq,ip: uq[0] / ip[0],
'Z2' : lambda uq,ip: uq[1] / ip[1],
'Z0' : lambda uq,ip: uq[2] / ip[2],
'ZA' : lambda uq,ip: (vA @ uq) / (vA @ ip),
'ZB' : lambda uq,ip: (vB @ uq) / (vB @ ip),
'ZC' : lambda uq,ip: (vC @ uq) / (vC @ ip),
'ZAB' : lambda uq,ip: (vAB @ uq) / (vAB @ ip),
'ZBC' : lambda uq,ip: (vBC @ uq) / (vBC @ ip),
'ZCA' : lambda uq,ip: (vCA @ uq) / (vCA @ ip),
'ZABC' : lambda uq,ip: (Ms2f @ uq) / (Ms2f @ ip),
'ZAB_BC_CA' : lambda uq,ip: (Ms2ff @ uq) / (Ms2ff @ ip),
'S120' : lambda uq,ip: uq * np.conj(ip),
'S1' : lambda uq,ip: uq[0] * np.conj(ip[0]),
'S2' : lambda uq,ip: uq[1] * np.conj(ip[1]),
'S0' : lambda uq,ip: uq[2] * np.conj(ip[2]),
'SA' : lambda uq,ip: (vA @ uq) * np.conj(vA @ ip),
'SB' : lambda uq,ip: (vB @ uq) * np.conj(vB @ ip),
'SC' : lambda uq,ip: (vC @ uq) * np.conj(vC @ ip),
'SAB' : lambda uq,ip: (vAB @ uq) * np.conj(vAB @ ip),
'SBC' : lambda uq,ip: (vBC @ uq) * np.conj(vBC @ ip),
'SCA' : lambda uq,ip: (vCA @ uq) * np.conj(vCA @ ip),
'SABC' : lambda uq,ip: (Ms2f @ uq) * np.conj(Ms2f @ ip),
'S' : lambda uq,ip: np.sum((Ms2f @ uq) * np.conj(Ms2f @ ip)),
'SAB_BC_CA' : lambda uq,ip: (Ms2ff @ uq) * np.conj(Ms2ff @ ip)
})
mform1=dict({'' : lambda res,parname: res,
'R' : lambda res,parname: np.real(res),
'X' : lambda res,parname: np.imag(res),
'M' : lambda res,parname: np.abs(res),
'<f' : lambda res,parname: r2d*np.angle(res),
'R+jX' : lambda res,parname: "{0:<4} = {1:>8.1f} + {2:>8.1f}j".format(parname, np.real(res),np.imag(res)),
'M<f' : lambda res,parname: "{0:<4} = {1:>8.1f} ∠ {2:>6.1f}".format(parname, np.abs(res),r2d*np.angle(res))
})
mform3=dict({'' : lambda res,parname: res,
'R' : lambda res,parname: np.real(res),
'X' : lambda res,parname: np.imag(res),
'M' : lambda res,parname: np.abs(res),
'<f' : lambda res,parname: r2d*np.angle(res),
'R+jX' : lambda res,parname: "{0:<4} = [{1:>8.1f} + {2:>8.1f}j, {3:>8.1f} + {4:>8.1f}j, {5:>8.1f} + {6:>8.1f}j]".format(parname, np.real(res[0]), np.imag(res[0]), np.real(res[1]), np.imag(res[1]), np.real(res[2]), np.imag(res[2])),
'M<f' : lambda res,parname: "{0:<4} = [{1:>8.1f} ∠ {2:>6.1f}, {3:>8.1f} ∠ {4:>6.1f}, {5:>8.1f} ∠ {6:>6.1f}]".format(parname, np.abs(res[0]), r2d*np.angle(res[0]), np.abs(res[1]), r2d*np.angle(res[1]), np.abs(res[2]), r2d*np.angle(res[2]))
})
def StrU(u120):
strUABC = "| UA = {0:>7.0f} ∠ {1:>6.1f} | UB = {2:>7.0f} ∠ {3:>6.1f} | UC = {4:>7.0f} ∠ {5:>6.1f} |\n"
strU120 = "| U1 = {0:>7.0f} ∠ {1:>6.1f} | U2 = {2:>7.0f} ∠ {3:>6.1f} | 3U0 = {4:>7.0f} ∠ {5:>6.1f} |\n"
strUAB_BC_CA = "| UAB = {0:>7.0f} ∠ {1:>6.1f} | UBC = {2:>7.0f} ∠ {3:>6.1f} | UCA = {4:>7.0f} ∠ {5:>6.1f} |\n"
u1,u2,u0 = u120
uA,uB,uC = Ms2f @ u120
uAB,uBC,uCA = Ms2ff @ u120
resstr = []
resstr.append(strUABC.format(np.abs(uA),r2d*np.angle(uA),np.abs(uB),r2d*np.angle(uB),np.abs(uC),r2d*np.angle(uC)))
resstr.append(strU120.format(np.abs(u1),r2d*np.angle(u1),np.abs(u2),r2d*np.angle(u2),
|
np.abs(3*u0)
|
numpy.abs
|
"""preprocess.py: module for data pre-processing."""
__author__ = "<NAME>, <NAME>, <NAME> and <NAME>"
__copyright__ = "Copyright (c) 2020, 2021, <NAME>, <NAME>, <NAME> and <NAME>"
__credits__ = ["Department of Chemical Engineering, University of Utah, Salt Lake City, Utah, USA", "Universite Libre de Bruxelles, Aero-Thermo-Mechanics Laboratory, Brussels, Belgium"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = ["<NAME>", "<NAME>"]
__email__ = ["<EMAIL>", "<EMAIL>", "<EMAIL>"]
__status__ = "Production"
import numpy as np
import random
import copy
import operator
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from PCAfold.styles import *
################################################################################
#
# Data Manipulation
#
################################################################################
_scalings_list = ['none', '', 'auto', 'std', 'pareto', 'vast', 'range', '0to1', '-1to1', 'level', 'max', 'poisson', 'vast_2', 'vast_3', 'vast_4']
# ------------------------------------------------------------------------------
def center_scale(X, scaling, nocenter=False):
"""
Centers and scales the original data set, :math:`\mathbf{X}`.
In the discussion below, we understand that :math:`X_j` is the :math:`j^{th}` column
of :math:`\mathbf{X}`.
- **Centering** is performed by subtracting the center, :math:`c_j`, from\
:math:`X_j`, where centers for all columns are stored in the matrix :math:`\mathbf{C}`:
.. math::
\mathbf{X_c} = \mathbf{X} - \mathbf{C}
Centers for each column are computed as:
.. math::
c_j = mean(X_j)
with the only exceptions of ``'0to1'`` and ``'-1to1'`` scalings, which introduce a different
quantity to center each column.
- **Scaling** is performed by dividing :math:`X_j` by the scaling\
factor, :math:`d_j`, where scaling factors\
for all columns are stored in the diagonal matrix :math:`\mathbf{D}`:
.. math::
\mathbf{X_s} = \mathbf{X} \\cdot \mathbf{D}^{-1}
If both centering and scaling is applied:
.. math::
\mathbf{X_{cs}} = (\mathbf{X} - \mathbf{C}) \\cdot \mathbf{D}^{-1}
Several scaling options are implemented here:
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Scaling method | ``scaling`` | Scaling factor :math:`d_j` |
+============================+==========================+====================================================================+
| None | ``'none'`` | 1 |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Auto :cite:`Berg2006` | ``'auto'`` or ``'std'`` | :math:`\sigma` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Pareto :cite:`Noda2008` | ``'pareto'`` | :math:`\sqrt{\sigma}` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| VAST :cite:`Keun2003` | ``'vast'`` | :math:`\sigma^2 / mean(X_j)` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Range :cite:`Berg2006` | ``'range'`` | :math:`max(X_j) - min(X_j)` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| | 0 to 1 | | ``'0to1'`` | | :math:`d_j = max(X_j) - min(X_j)` |
| | | | | | :math:`c_j = min(X_j)` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| | -1 to 1 | | ``'-1to1'`` | | :math:`d_j = 0.5 \cdot (max(X_j) - min(X_j))` |
| | | | | | :math:`c_j = 0.5 \cdot (max(X_j) + min(X_j))` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Level :cite:`Berg2006` | ``'level'`` | :math:`mean(X_j)` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Max | ``'max'`` | :math:`max(X_j)` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Poisson :cite:`Keenan2004` |``'poisson'`` | :math:`\sqrt{mean(X_j)}` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Vast-2 | ``'vast_2'`` | :math:`\sigma^2 k^2 / mean(X_j)` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Vast-3 | ``'vast_3'`` | :math:`\sigma^2 k^2 / max(X_j)` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
| Vast-4 | ``'vast_4'`` | :math:`\sigma^2 k^2 / (max(X_j) - min(X_j))` |
+----------------------------+--------------------------+--------------------------------------------------------------------+
where :math:`\sigma` is the standard deviation of :math:`X_j`
and :math:`k` is the kurtosis of :math:`X_j`.
The effect of data preprocessing (including scaling) on low-dimensional manifolds was studied
in :cite:`Parente2013`.
**Example:**
.. code:: python
from PCAfold import center_scale
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Center and scale:
(X_cs, X_center, X_scale) = center_scale(X, 'range', nocenter=False)
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param scaling:
``str`` specifying the scaling methodology. It can be one of the following:
``'none'``, ``''``, ``'auto'``, ``'std'``, ``'pareto'``, ``'vast'``, ``'range'``, ``'0to1'``,
``'-1to1'``, ``'level'``, ``'max'``, ``'poisson'``, ``'vast_2'``, ``'vast_3'``, ``'vast_4'``.
:param nocenter: (optional)
``bool`` specifying whether data should be centered by mean. If set to ``True`` data will *not* be centered.
:return:
- **X_cs** - ``numpy.ndarray`` specifying the centered and scaled data set, :math:`\mathbf{X_{cs}}`. It has size ``(n_observations,n_variables)``.
- **X_center** - ``numpy.ndarray`` specifying the centers, :math:`c_j`, applied on the original data set :math:`\mathbf{X}`. It has size ``(n_variables,)``.
- **X_scale** - ``numpy.ndarray`` specifying the scales, :math:`d_j`, applied on the original data set :math:`\mathbf{X}`. It has size ``(n_variables,)``.
"""
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have size `(n_observations,n_variables)`.")
if not isinstance(scaling, str):
raise ValueError("Parameter `scaling` has to be a string.")
else:
if scaling.lower() not in _scalings_list:
raise ValueError("Unrecognized scaling method.")
if not isinstance(nocenter, bool):
raise ValueError("Parameter `nocenter` has to be a boolean.")
if scaling.lower() not in ['max', 'level', 'poisson']:
for i in range(0, n_variables):
if np.all(X[:,i] == X[0,i]):
raise ValueError("Constant variable(s) are detected in the original data set. This will cause division by zero for the selected scaling. Consider removing the constant variables using `preprocess.remove_constant_vars`.")
if scaling.lower() in ['max', 'level', 'poisson']:
for i in range(0, n_variables):
if np.all(X[:,i] == 0):
raise ValueError("Constant and zeroed variable(s) are detected in the original data set. This will cause division by zero for the selected scaling. Consider removing the constant variables using `preprocess.remove_constant_vars`.")
X_cs = np.zeros_like(X, dtype=float)
X_center = X.mean(axis=0)
dev = 0 * X_center
kurt = 0 * X_center
for i in range(0, n_variables):
if scaling.lower() in ['auto', 'std', 'vast', 'vast_2', 'vast_3', 'vast_4', 'pareto']:
# Calculate the standard deviation (required for some scalings):
dev[i] = np.std(X[:, i], ddof=0)
if scaling.lower() in ['vast_2', 'vast_3', 'vast_4']:
# Calculate the kurtosis (required for some scalings):
kurt[i] = np.sum((X[:, i] - X_center[i]) ** 4) / n_observations / (np.sum((X[:, i] - X_center[i]) ** 2) / n_observations) ** 2
scaling = scaling.upper()
eps = np.finfo(float).eps
if scaling == 'NONE' or scaling == '':
X_scale = np.ones(n_variables)
elif scaling == 'AUTO' or scaling == 'STD':
X_scale = dev
elif scaling == 'VAST':
X_scale = dev * dev / (X_center + eps)
elif scaling == 'VAST_2':
X_scale = dev * dev * kurt * kurt / (X_center + eps)
elif scaling == 'VAST_3':
X_scale = dev * dev * kurt * kurt / np.max(X, axis=0)
elif scaling == 'VAST_4':
X_scale = dev * dev * kurt * kurt / (np.max(X, axis=0) - np.min(X, axis=0))
elif scaling == 'RANGE':
X_scale = np.max(X, axis=0) - np.min(X, axis=0)
elif scaling == '0TO1':
X_center = np.min(X, axis=0)
X_scale = np.max(X, axis=0) - np.min(X, axis=0)
elif scaling == '-1TO1':
X_center = 0.5*(np.max(X, axis=0) + np.min(X, axis=0))
X_scale = 0.5*(np.max(X, axis=0) - np.min(X, axis=0))
elif scaling == 'LEVEL':
X_scale = X_center
elif scaling == 'MAX':
X_scale = np.max(X, axis=0)
elif scaling == 'PARETO':
X_scale = np.zeros(n_variables)
for i in range(0, n_variables):
X_scale[i] = np.sqrt(dev[i])
elif scaling == 'POISSON':
X_scale = np.sqrt(X_center)
else:
raise ValueError('Unsupported scaling option')
for i in range(0, n_variables):
if nocenter:
X_cs[:, i] = (X[:, i]) / X_scale[i]
else:
X_cs[:, i] = (X[:, i] - X_center[i]) / X_scale[i]
if nocenter:
X_center = np.zeros(n_variables)
return(X_cs, X_center, X_scale)
# ------------------------------------------------------------------------------
def invert_center_scale(X_cs, X_center, X_scale):
"""
Inverts whatever centering and scaling was done by the ``center_scale`` function:
.. math::
\mathbf{X} = \mathbf{X_{cs}} \\cdot \mathbf{D} + \mathbf{C}
**Example:**
.. code:: python
from PCAfold import center_scale, invert_center_scale
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Center and scale:
(X_cs, X_center, X_scale) = center_scale(X, 'range', nocenter=False)
# Uncenter and unscale:
X = invert_center_scale(X_cs, X_center, X_scale)
:param X_cs:
``numpy.ndarray`` specifying the centered and scaled data set, :math:`\mathbf{X_{cs}}`. It should be of size ``(n_observations,n_variables)``.
:param X_center:
``numpy.ndarray`` specifying the centers, :math:`c_j`, applied on the original data set, :math:`\mathbf{X}`. It should be of size ``(n_variables,)``.
:param X_scale:
``numpy.ndarray`` specifying the scales, :math:`d_j`, applied on the original data set, :math:`\mathbf{X}`. It should be of size ``(n_variables,)``.
:return:
- **X** - ``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}`. It has size ``(n_observations,n_variables)``.
"""
if not isinstance(X_cs, np.ndarray):
raise ValueError("Parameter `X_cs` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X_cs)
except:
raise ValueError("Parameter `X_cs` has to have size `(n_observations,n_variables)`.")
if not isinstance(X_center, np.ndarray):
raise ValueError("Parameter `X_center` has to be of type `numpy.ndarray`.")
try:
(n_variables_centers,) = np.shape(X_center)
except:
raise ValueError("Parameter `X_center` has to have size `(n_variables,)`.")
if not isinstance(X_scale, np.ndarray):
raise ValueError("Parameter `X_scale` has to be of type `numpy.ndarray`.")
try:
(n_variables_scales,) = np.shape(X_scale)
except:
raise ValueError("Parameter `X_scale` has to have size `(n_variables,)`.")
if n_variables != n_variables_centers:
raise ValueError("Parameter `X_center` has different number of variables than parameter `X_cs`.")
if n_variables != n_variables_scales:
raise ValueError("Parameter `X_scale` has different number of variables than parameter `X_cs`.")
if n_variables == 1:
X = X_cs * X_scale + X_center
else:
X = np.zeros_like(X_cs, dtype=float)
for i in range(0, n_variables):
X[:, i] = X_cs[:, i] * X_scale[i] + X_center[i]
return(X)
# ------------------------------------------------------------------------------
def log_transform(X, method='log', threshold=1.e-6):
"""
Performs log transformation of the original data set, :math:`\mathbf{X}`.
For an example original function:
.. image:: ../images/log_transform-original-function.svg
:width: 700
:align: center
The symlog transformation can be obtained with ``method='symlog'``:
.. image:: ../images/log_transform-symlog.svg
:width: 700
:align: center
The continuous symlog transformation can be obtained with ``method='continuous-symlog'``:
.. image:: ../images/log_transform-continuous-symlog.svg
:width: 700
:align: center
**Example:**
.. code:: python
from PCAfold import log_transform
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20) + 1
# Perform log transformation:
X_log = log_transform(X)
# Perform symlog transformation:
X_symlog = log_transform(X, method='symlog', threshold=1.e-4)
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param method: (optional)
``str`` specifying the log-transformation method. It can be one of the following: ``log``, ``ln``, ``symlog``, ``continuous-symlog``.
:param threshold: (optional)
``float`` or ``int`` specifying the threshold for symlog transformation.
:return:
- **X_transformed** - ``numpy.ndarray`` specifying the log-transformed data set. It has size ``(n_observations,n_variables)``.
"""
__methods = ['log', 'ln', 'symlog', 'continuous-symlog']
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have size `(n_observations,n_variables)`.")
if not isinstance(method, str):
raise ValueError("Parameter `method` has to be a string.")
else:
if method.lower() not in __methods:
raise ValueError("Unrecognized transformation method.")
if not isinstance(threshold, float):
if not isinstance(threshold, int):
raise ValueError("Parameter `threshold` has to be of type `float` or `int`.")
X_transformed = np.zeros_like(X)
if method == 'log':
for j in range(0, n_variables):
X_transformed[:,j] = np.log10(X[:,j])
elif method == 'ln':
for j in range(0, n_variables):
X_transformed[:,j] = np.log(X[:,j])
elif method == 'symlog':
for j in range(0, n_variables):
for i in range(0,n_observations):
if np.abs(X[i,j]) > threshold:
X_transformed[i,j] = np.sign(X[i,j]) * np.log10(np.abs(X[i,j]))
else:
X_transformed[i,j] = X[i,j]
elif method == 'continuous-symlog':
for j in range(0, n_variables):
for i in range(0,n_observations):
X_transformed[i,j] = np.sign(X[i,j]) * np.log10(1. + np.abs(X[i,j]/threshold))
return(X_transformed)
# ------------------------------------------------------------------------------
class PreProcessing:
"""
Performs a composition of data manipulation done by ``remove_constant_vars``
and ``center_scale`` functions on the original data set,
:math:`\mathbf{X}`. It can be used to store the result of that manipulation.
Specifically, it:
- checks for constant columns in a data set and removes them,
- centers and scales the data.
**Example:**
.. code:: python
from PCAfold import PreProcessing
import numpy as np
# Generate dummy data set with a constant variable:
X = np.random.rand(100,20)
X[:,5] = np.ones((100,))
# Instantiate PreProcessing class object:
preprocessed = PreProcessing(X, 'range', nocenter=False)
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param scaling:
``str`` specifying the scaling methodology. It can be one of the following:
``'none'``, ``''``, ``'auto'``, ``'std'``, ``'pareto'``, ``'vast'``, ``'range'``, ``'0to1'``,
``'-1to1'``, ``'level'``, ``'max'``, ``'poisson'``, ``'vast_2'``, ``'vast_3'``, ``'vast_4'``.
:param nocenter: (optional)
``bool`` specifying whether data should be centered by mean. If set to ``True`` data will *not* be centered.
**Attributes:**
- **X_removed** - (read only) ``numpy.ndarray`` specifying the original data set with any constant columns removed. It has size ``(n_observations,n_variables)``.
- **idx_removed** - (read only) ``list`` specifying the indices of columns removed from :math:`\mathbf{X}`.
- **idx_retained** - (read only) ``list`` specifying the indices of columns retained in :math:`\mathbf{X}`.
- **X_cs** - (read only) ``numpy.ndarray`` specifying the centered and scaled data set, :math:`\mathbf{X_{cs}}`. It should be of size ``(n_observations,n_variables)``.
- **X_center** - (read only) ``numpy.ndarray`` specifying the centers, :math:`c_j`, applied on the original data set :math:`\mathbf{X}`. It should be of size ``(n_variables,)``.
- **X_scale** - (read only) ``numpy.ndarray`` specifying the scales, :math:`d_j`, applied on the original data set :math:`\mathbf{X}`. It should be of size ``(n_variables,)``.
"""
def __init__(self, X, scaling='none', nocenter=False):
(self.__X_removed, self.__idx_removed, self.__idx_retained) = remove_constant_vars(X)
(self.__X_cs, self.__X_center, self.__X_scale) = center_scale(self.X_removed, scaling, nocenter=nocenter)
@property
def X_removed(self):
return self.__X_removed
@property
def idx_removed(self):
return self.__idx_removed
@property
def idx_retained(self):
return self.__idx_retained
@property
def X_cs(self):
return self.__X_cs
@property
def X_center(self):
return self.__X_center
@property
def X_scale(self):
return self.__X_scale
# ------------------------------------------------------------------------------
def remove_constant_vars(X, maxtol=1e-12, rangetol=1e-4):
"""
Removes any constant columns from the original data set, :math:`\mathbf{X}`.
The :math:`j^{th}` column, :math:`X_j`, is considered constant if either of the following is true:
- The maximum of an absolute value of a column :math:`X_j` is less than ``maxtol``:
.. math::
max(|X_j|) < \\verb|maxtol|
- The ratio of the range of values in a column :math:`X_j` to :math:`max(|X_j|)` is less than ``rangetol``:
.. math::
\\frac{max(X_j) - min(X_j)}{max(|X_j|)} < \\verb|rangetol|
Specifically, it can be used as preprocessing for PCA so the eigenvalue
calculation doesn't break.
**Example:**
.. code:: python
from PCAfold import remove_constant_vars
import numpy as np
# Generate dummy data set with a constant variable:
X = np.random.rand(100,20)
X[:,5] = np.ones((100,))
# Remove the constant column:
(X_removed, idx_removed, idx_retained) = remove_constant_vars(X)
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param maxtol: (optional)
``float`` specifying the tolerance for :math:`max(|X_j|)`.
:param rangetol: (optional)
``float`` specifying the tolerance for :math:`max(X_j) - min(X_j)` over :math:`max(|X_j|)`.
:return:
- **X_removed** - ``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}` with any constant columns removed. It has size ``(n_observations,n_variables)``.
- **idx_removed** - ``list`` specifying the indices of columns removed from :math:`\mathbf{X}`.
- **idx_retained** - ``list`` specifying the indices of columns retained in :math:`\mathbf{X}`.
"""
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have size `(n_observations,n_variables)`.")
if not isinstance(maxtol, float):
raise ValueError("Parameter `maxtol` has to be a `float`.")
if not isinstance(rangetol, float):
raise ValueError("Parameter `rangetol` has to be a `float`.")
idx_removed = []
idx_retained = []
for i in reversed(range(0, n_variables)):
min = np.min(X[:, i], axis=0)
max = np.max(X[:, i], axis=0)
maxabs = np.max(np.abs(X[:, i]), axis=0)
if (maxabs < maxtol) or ((max - min) / maxabs < rangetol):
X = np.delete(X, i, 1)
idx_removed.append(i)
else:
idx_retained.append(i)
X_removed = X
idx_removed = idx_removed[::-1]
idx_retained = idx_retained[::-1]
return(X_removed, idx_removed, idx_retained)
# ------------------------------------------------------------------------------
def order_variables(X, method='mean', descending=True):
"""
Orders variables in the original data set, :math:`\mathbf{X}`, using a selected method.
**Example:**
.. code::
from PCAfold import order_variables
import numpy as np
# Generate a dummy data set:
X = np.array([[100, 1, 10],
[200, 2, 20],
[300, 3, 30]])
# Order variables by the mean value in the descending order:
(X_ordered, idx) = order_variables(X, method='mean', descending=True)
The code above should return an ordered data set:
.. code-block:: text
array([[100, 10, 1],
[200, 20, 2],
[300, 30, 3]])
and the list of ordered variable indices:
.. code-block:: text
[1, 2, 0]
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param method: (optional)
``str`` specifying the ordering method. It can be one of the following:
``'mean'``, ``'min'``, ``'max'``, ``'std'`` or ``'var'``.
:param descending: (optional)
``bool`` specifying whether variables should be ordered in the descending order.
If set to ``False``, variables will be ordered in the ascending order.
:return:
- **X_ordered** - ``numpy.ndarray`` specifying the original data set with ordered variables. It has size ``(n_observations,n_variables)``.
- **idx** - ``list`` specifying the indices of the ordered variables. It has length ``n_variables``.
"""
__method = ['mean', 'min', 'max', 'std', 'var']
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have size `(n_observations,n_variables)`.")
if not isinstance(method, str):
raise ValueError("Parameter `method` has to be a string.")
if method not in __method:
raise ValueError("Parameter `method` has to be a 'mean', 'min', 'max', 'std' or 'var'.")
if not isinstance(descending, bool):
raise ValueError("Parameter `descending` has to be a boolean.")
if method == 'mean':
criterion = np.mean(X, axis=0)
elif method == 'min':
criterion = np.min(X, axis=0)
elif method == 'max':
criterion = np.max(X, axis=0)
elif method == 'std':
criterion = np.std(X, axis=0)
elif method == 'var':
criterion = np.var(X, axis=0)
sorted_pairs = sorted(enumerate(criterion), key=operator.itemgetter(1))
sorted_indices = [index for index, element in sorted_pairs]
if descending:
idx = sorted_indices[::-1]
else:
idx = sorted_indices
X_ordered = X[:,idx]
return (X_ordered, idx)
# ------------------------------------------------------------------------------
def outlier_detection(X, scaling, method='MULTIVARIATE TRIMMING', trimming_threshold=0.5, quantile_threshold=0.9899, verbose=False):
"""
Finds outliers in the original data set, :math:`\mathbf{X}`, and returns
indices of observations without outliers as well as indices of the outliers
themselves. Two options are implemented here:
- ``'MULTIVARIATE TRIMMING'``
Outliers are detected based on multivariate Mahalanobis distance, :math:`D_M`:
.. math::
D_M = \\sqrt{(\mathbf{X} - \mathbf{\\bar{X}})^T \mathbf{S}^{-1} (\mathbf{X} - \mathbf{\\bar{X}})}
where :math:`\mathbf{\\bar{X}}` is a matrix of the same size as :math:`\mathbf{X}`
storing in each column a copy of the average value of the same column in :math:`\mathbf{X}`.
:math:`\mathbf{S}` is the covariance matrix computed as per ``PCA`` class.
Note that the scaling option selected will affect the covariance matrix :math:`\mathbf{S}`.
Since Mahalanobis distance takes into account covariance between variables,
observations with sufficiently large :math:`D_M` can be considered as outliers.
For more detailed information on Mahalanobis distance the user is referred
to :cite:`Bishop2006` or :cite:`DeMaesschalck2000`.
The threshold above which observations will be classified as outliers
can be specified using ``trimming_threshold`` parameter. Specifically,
the :math:`i^{th}` observation is classified as an outlier if:
.. math::
D_{M, i} > \\verb|trimming_threshold| \\cdot max(D_M)
- ``'PC CLASSIFIER'``
Outliers are detected based on major and minor principal components (PCs).
The method of principal component classifier (PCC) was first proposed in
:cite:`Shyu2003`. The application of this technique to combustion data sets
was studied in :cite:`Parente2013`. Specifically,
the :math:`i^{th}` observation is classified as an outlier
if the *first PC classifier* based on :math:`q`-first (major) PCs:
.. math::
\sum_{j=1}^{q} \\frac{z_{ij}^2}{L_j} > c_1
or if the *second PC classifier* based on :math:`(Q-k+1)`-last (minor) PCs:
.. math::
\sum_{j=k}^{Q} \\frac{z_{ij}^2}{L_j} > c_2
where :math:`z_{ij}` is the :math:`i^{th}, j^{th}` element from the principal
components matrix :math:`\mathbf{Z}` and :math:`L_j` is the :math:`j^{th}`
eigenvalue from :math:`\mathbf{L}` (as per ``PCA`` class).
Major PCs are selected such that the total variance explained is 50%.
Minor PCs are selected such that the remaining variance they explain is 20%.
Coefficients :math:`c_1` and :math:`c_2` are found such that they
represent the ``quantile_threshold`` (by default 98.99%) quantile of the
empirical distributions of the first and second PC classifier respectively.
**Example:**
.. code::
from PCAfold import outlier_detection
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Find outliers:
(idx_outliers_removed, idx_outliers) = outlier_detection(X, scaling='auto', method='MULTIVARIATE TRIMMING', trimming_threshold=0.8, verbose=True)
# New data set without outliers can be obtained as:
X_outliers_removed = X[idx_outliers_removed,:]
# Observations that were classified as outliers can be obtained as:
X_outliers = X[idx_outliers,:]
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param scaling:
``str`` specifying the scaling methodology. It can be one of the following:
``'none'``, ``''``, ``'auto'``, ``'std'``, ``'pareto'``, ``'vast'``, ``'range'``, ``'0to1'``,
``'-1to1'``, ``'level'``, ``'max'``, ``'poisson'``, ``'vast_2'``, ``'vast_3'``, ``'vast_4'``.
:param method: (optional)
``str`` specifying the outlier detection method to use. It should be
``'MULTIVARIATE TRIMMING'`` or ``'PC CLASSIFIER'``.
:param trimming_threshold: (optional)
``float`` specifying the trimming threshold to use in combination with ``'MULTIVARIATE TRIMMING'`` method.
:param quantile_threshold: (optional)
``float`` specifying the quantile threshold to use in combination with ``'PC CLASSIFIER'`` method.
:param verbose: (optional)
``bool`` for printing verbose details.
:return:
- **idx_outliers_removed** - ``list`` specifying the indices of observations without outliers.
- **idx_outliers** - ``list`` specifying the indices of observations that were classified as outliers.
"""
from PCAfold import PCA
_detection_methods = ['MULTIVARIATE TRIMMING', 'PC CLASSIFIER']
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have size `(n_observations,n_variables)`.")
if not isinstance(scaling, str):
raise ValueError("Parameter `scaling` has to be a string.")
else:
if scaling.lower() not in _scalings_list:
raise ValueError("Unrecognized scaling method.")
if not isinstance(method, str):
raise ValueError("Parameter `method` has to be a string.")
else:
if method.upper() not in _detection_methods:
raise ValueError("Unrecognized outlier detection method.")
if trimming_threshold < 0 or trimming_threshold > 1:
raise ValueError("Parameter `trimming_threshold` has to be between 0 and 1.")
if not isinstance(trimming_threshold, float):
raise ValueError("Parameter `trimming_threshold` has to be a `float`.")
if not isinstance(quantile_threshold, float):
raise ValueError("Parameter `quantile_threshold` has to be a `float`.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be a boolean.")
(n_observations, n_variables) = np.shape(X)
idx_full = np.arange(0, n_observations)
idx_outliers_removed = []
idx_outliers = []
pca_X = PCA(X, scaling=scaling, n_components=0)
if method.upper() == 'MULTIVARIATE TRIMMING':
means_of_X = np.mean(X, axis=0)
covariance_matrix = pca_X.S
inverse_covariance_matrix = np.linalg.inv(covariance_matrix)
mahalanobis_distances = np.zeros((n_observations,))
for n_obs in range(0, n_observations):
mahalanobis_distance = np.sqrt(np.dot((X[n_obs,:] - means_of_X), np.dot(inverse_covariance_matrix, (X[n_obs,:] - means_of_X))))
mahalanobis_distances[n_obs,] = mahalanobis_distance
minimum_mahalanobis_distance = np.min(mahalanobis_distances)
maximum_mahalanobis_distance = np.max(mahalanobis_distances)
range_mahalanobis_distance = maximum_mahalanobis_distance - minimum_mahalanobis_distance
(idx_outliers, ) = np.where(mahalanobis_distances > trimming_threshold * maximum_mahalanobis_distance)
idx_outliers_removed = np.setdiff1d(idx_full, idx_outliers)
if verbose:
n_outliers = len(idx_outliers)
print('Number of observations classified as outliers: ' + str(n_outliers))
elif method.upper() == 'PC CLASSIFIER':
principal_components = pca_X.transform(X)
eigenvalues = pca_X.L
n_components = pca_X.n_components
# Select major components based on 50% of the original data variance:
pca_major = pca_X.set_retained_eigenvalues(method='TOTAL VARIANCE', option=0.5)
n_major_components = pca_major.n_components
# Select minor components based on 20% of the total variance in the data:
pca_minor = pca_X.set_retained_eigenvalues(method='TOTAL VARIANCE', option=0.8)
n_minor_components = pca_minor.n_components
if verbose:
print("Major components that will be selected are: " + ', '.join([str(i) for i in range(1, n_major_components+1)]))
if verbose:
print("Minor components that will be selected are: " + ', '.join([str(i) for i in range(n_minor_components, n_components+1)]))
scaled_squared_PCs = np.divide(np.square(principal_components), eigenvalues)
distances_major = np.sum(scaled_squared_PCs[:,0:n_major_components], axis=1)
distances_minor = np.sum(scaled_squared_PCs[:,(n_minor_components-1):n_components], axis=1)
# Threshold coefficient c_1 (for major PCs):
threshold_coefficient_major = np.quantile(distances_major, quantile_threshold)
# Threshold coefficient c_2 (for minor PCs):
threshold_coefficient_minor = np.quantile(distances_minor, quantile_threshold)
(idx_outliers_major, ) = np.where((distances_major > threshold_coefficient_major))
(idx_outliers_minor, ) = np.where((distances_minor > threshold_coefficient_minor))
idx_outliers = np.vstack((idx_outliers_major[:,np.newaxis], idx_outliers_minor[:,np.newaxis]))
idx_outliers = np.unique(idx_outliers)
idx_outliers_removed = np.setdiff1d(idx_full, idx_outliers)
if verbose:
n_outliers = len(idx_outliers)
print('Number of observations classified as outliers: ' + str(n_outliers))
idx_outliers_removed = np.sort(idx_outliers_removed.astype(int))
idx_outliers = np.sort(idx_outliers.astype(int))
return (idx_outliers_removed, idx_outliers)
# ------------------------------------------------------------------------------
class ConditionalStatistics:
"""
Enables computing conditional statistics on the original data set, :math:`\\mathbf{X}`.
This includes:
- conditional mean
- conditional minimum
- conditional maximum
- conditional standard deviation
Other quantities can be added in the future at the user's request.
**Example:**
.. code:: python
from PCAfold import ConditionalStatistics
import numpy as np
# Generate dummy variables:
conditioning_variable = np.linspace(-1,1,100)
y = -conditioning_variable**2 + 1
# Instantiate object of the ConditionalStatistics class
# and compute conditional statistics in 10 bins of the conditioning variable:
cond = ConditionalStatistics(y[:,None], conditioning_variable, k=10)
# Access conditional statistics:
conditional_mean = cond.conditional_mean
conditional_min = cond.conditional_minimum
conditional_max = cond.conditional_maximum
conditional_std = cond.conditional_standard_deviation
# Access the centroids of the created bins:
centroids = cond.centroids
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param conditioning_variable:
``numpy.ndarray`` specifying a single variable to be used as a
conditioning variable. It should be of size ``(n_observations,1)`` or ``(n_observations,)``.
:param k:
``int`` specifying the number of bins to create in the conditioning variable.
It has to be a positive number.
:param split_values:
``list`` specifying values at which splits should be performed.
If set to ``None``, splits will be performed using :math:`k` equal variable bins.
:param verbose: (optional)
``bool`` for printing verbose details.
**Attributes:**
- **idx** - (read only) ``numpy.ndarray`` of cluster (bins) classifications. It has size ``(n_observations,)``.
- **borders** - (read only) ``list`` of values that define borders for the clusters (bins). It has length ``k+1``.
- **centroids** - (read only) ``list`` of values that specify bins centers. It has length ``k``.
- **conditional_mean** - (read only) ``numpy.ndarray`` specifying the conditional means of all original variables in the :math:`k` bins created. It has size ``(k,n_variables)``.
- **conditional_minimum** - (read only) ``numpy.ndarray`` specifying the conditional minimums of all original variables in the :math:`k` bins created. It has size ``(k,n_variables)``.
- **conditional_maximum** - (read only) ``numpy.ndarray`` specifying the conditional maximums of all original variables in the :math:`k` bins created. It has size ``(k,n_variables)``.
- **conditional_standard_deviation** - (read only) ``numpy.ndarray`` specifying the conditional standard deviations of all original variables in the :math:`k` bins created. It has size ``(k,n_variables)``.
"""
def __init__(self, X, conditioning_variable, k=20, split_values=None, verbose=False):
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations_X, n_variables_X) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have size `(n_observations,n_variables)`.")
if not isinstance(conditioning_variable, np.ndarray):
raise ValueError("Parameter `conditioning_variable` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(conditioning_variable)
except:
(n_observations,) = np.shape(conditioning_variable)
n_variables = 1
if n_observations_X != n_observations:
raise ValueError("The original data set `X` and the `conditioning_variable` should have the same number of observations.")
if n_variables != 1:
raise ValueError("Parameter `conditioning_variable` has to have shape `(n_observations,1)` or `(n_observations,)`.")
if not (isinstance(k, int) and k > 0):
raise ValueError("Parameter `k` has to be a positive `int`.")
if split_values is not None:
if not isinstance(split_values, list):
raise ValueError("Parameter `split_values` has to be of type `None` or `list`.")
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be of type `bool`.")
if split_values is None:
if verbose:
print('Conditioning the data set based on equal bins of the conditioning variable.')
(idx, borders) = variable_bins(conditioning_variable, k, verbose=verbose)
if split_values is not None:
if verbose:
print('Conditioning the data set based on user-specified bins of the conditioning variable.')
(idx, borders) = predefined_variable_bins(conditioning_variable, split_values=split_values, verbose=verbose)
true_k = len(np.unique(idx))
conditional_mean = np.zeros((true_k, n_variables_X))
conditional_minimum = np.zeros((true_k, n_variables_X))
conditional_maximum = np.zeros((true_k, n_variables_X))
conditional_standard_deviation = np.zeros((true_k, n_variables_X))
centroids = []
for i in range(0,true_k):
# Compute the centroids of all bins:
centroids.append((borders[i] + borders[i+1])/2)
# Compute conditional statistics in the generated bins:
conditional_mean[i,:] = np.mean(X[idx==i,:], axis=0)[None,:]
conditional_minimum[i,:] = np.min(X[idx==i,:], axis=0)[None,:]
conditional_maximum[i,:] = np.max(X[idx==i,:], axis=0)[None,:]
conditional_standard_deviation[i,:] = np.std(X[idx==i,:], axis=0)[None,:]
self.__idx = idx
self.__borders = borders
self.__centroids = np.array(centroids)
self.__conditional_mean = conditional_mean
self.__conditional_minimum = conditional_minimum
self.__conditional_maximum = conditional_maximum
self.__conditional_standard_deviation = conditional_standard_deviation
@property
def idx(self):
return self.__idx
@property
def borders(self):
return self.__borders
@property
def centroids(self):
return self.__centroids
@property
def conditional_mean(self):
return self.__conditional_mean
@property
def conditional_minimum(self):
return self.__conditional_minimum
@property
def conditional_maximum(self):
return self.__conditional_maximum
@property
def conditional_standard_deviation(self):
return self.__conditional_standard_deviation
# ------------------------------------------------------------------------------
class KernelDensity:
"""
Enables kernel density weighting of the original data set, :math:`\mathbf{X}`,
based on *single-variable* or *multi-variable* case as proposed in
:cite:`Coussement2012`.
The goal of both cases is to obtain a vector of weights, :math:`\\mathbf{W_c}`, that
has the same number of elements as there are observations in the original
data set, :math:`\mathbf{X}`.
Each observation will then get multiplied by the corresponding weight from
:math:`\mathbf{W_c}`.
.. note::
Kernel density weighting technique is usually very expensive, even
on data sets with relatively small number of observations.
Since the *single-variable* case is a cheaper option than the *multi-variable*
case, it is recommended that this technique is tried first for larger data
sets.
Gaussian kernel is used in both approaches:
.. math::
K_{c, c'} = \sqrt{\\frac{1}{2 \pi h^2}} exp(- \\frac{d^2}{2 h^2})
:math:`h` is the kernel bandwidth:
.. math::
h = \Big( \\frac{4 \hat{\sigma}}{3 n} \Big)^{1/5}
where :math:`\hat{\sigma}` is the standard deviation of the considered variable
and :math:`n` is the number of observations in the data set.
:math:`d` is the distance between two observations :math:`c` and :math:`c'`:
.. math::
d = |x_c - x_{c'}|
**Single-variable**
If the ``conditioning_variable`` argument is a single vector, weighting will be performed
according to the *single-variable* case. It begins by summing Gaussian kernels:
.. math::
\mathbf{K_c} = \sum_{c' = 1}^{c' = n} \\frac{1}{n} K_{c, c'}
and weights are then computed as:
.. math::
\mathbf{W_c} = \\frac{\\frac{1}{\mathbf{K_c}}}{max(\\frac{1}{\mathbf{K_c}})}
**Multi-variable**
If the ``conditioning_variable`` argument is a matrix of multiple variables, weighting will
be performed according to the *multi-variable* case. It begins by summing
Gaussian kernels for a :math:`k^{th}` variable:
.. math::
\mathbf{K_c}_{, k} = \sum_{c' = 1}^{c' = n} \\frac{1}{n} K_{c, c', k}
Global density taking into account all variables is then obtained as:
.. math::
\mathbf{K_{c}} = \prod_{k=1}^{k=Q} \mathbf{K_c}_{, k}
where :math:`Q` is the total number of conditioning variables, and weights are computed as:
.. math::
\mathbf{W_c} = \\frac{\\frac{1}{\mathbf{K_c}}}{max(\\frac{1}{\mathbf{K_c}})}
**Example:**
.. code:: python
from PCAfold import KernelDensity
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Perform kernel density weighting based on the first variable:
kerneld = KernelDensity(X, X[:,0])
# Access the weighted data set:
X_weighted = kerneld.X_weighted
# Access the weights used to scale the data set:
weights = kerneld.weights
:param X:
``numpy.ndarray`` specifying the original data set, :math:`\mathbf{X}`. It should be of size ``(n_observations,n_variables)``.
:param conditioning_variable:
``numpy.ndarray`` specifying either a single variable or multiple variables to be used as a
conditioning variable for kernel weighting procedure. Note that it can also
be passed as the data set :math:`\mathbf{X}`.
**Attributes:**
- **weights** - ``numpy.ndarray`` specifying the computed weights, :math:`\mathbf{W_c}`. It has size ``(n_observations,1)``.
- **X_weighted** - ``numpy.ndarray`` specifying the weighted data set (each observation in\
:math:`\mathbf{X}` is multiplied by the corresponding weight in :math:`\mathbf{W_c}`). It has size ``(n_observations,n_variables)``.
"""
def __init__(self, X, conditioning_variable, verbose=False):
if not isinstance(X, np.ndarray):
raise ValueError("Parameter `X` has to be of type `numpy.ndarray`.")
try:
(n_observations_X, n_variables_X) = np.shape(X)
except:
raise ValueError("Parameter `X` has to have size `(n_observations,n_variables)`.")
if not isinstance(conditioning_variable, np.ndarray):
raise ValueError("Parameter `conditioning_variable` has to be of type `numpy.ndarray`.")
try:
(n_observations, n_variables) = np.shape(conditioning_variable)
except:
(n_observations, n_variables) = np.shape(conditioning_variable[:,np.newaxis])
if n_observations_X != n_observations:
raise ValueError("The data set to weight and the conditioning variable should have the same number of observations.")
if n_variables == 1:
if verbose: print('Single-variable case will be applied.')
self.__weights = self.__single_variable_observation_weights(conditioning_variable)
elif n_variables > 1:
if verbose: print('Multi-variable case will be applied.')
self.__weights = self.__multi_variable_observation_weights(conditioning_variable)
self.__X_weighted = np.multiply(X, self.weights)
@property
def weights(self):
return self.__weights
@property
def X_weighted(self):
return self.__X_weighted
# Computes eq.(26):
def __bandwidth(self, n, mean_standard_deviation):
"""
This function computes kernel bandwidth as:
.. math::
h = \Big( \\frac{4 \hat{\sigma}}{3 n} \Big)^{1/5}
:param n:
number of observations in a data set or a variable vector.
:param mean_standard_deviation:
mean standard deviation in the entire data set or a variable vector.
:returns:
- **h** - kernel bandwidth, scalar.
"""
h = (4*mean_standard_deviation/(3*n))**(1/5)
return(h)
# Computes eq.(21):
def __distance(self, x_1, x_2):
"""
This function computes distance between two observations:
.. math::
d = |x_1 - x_2|
:param x_1:
first observation.
:param x_2:
second observation.
:returns:
- **d** - distance between the first and second observation.
"""
d = abs(x_1 - x_2)
return(d)
# Computes eq.(22):
def __gaussian_kernel(self, x1, x2, n, mean_standard_deviation):
"""
This function computes a Gaussian kernel:
.. math::
K = \sqrt{\\frac{1}{2 \pi h^2}} exp(- \\frac{d^2}{2 h^2})
:param x_1:
first observation.
:param x_2:
second observation.
:param n:
number of observations in a data set or a variable vector.
:param mean_standard_deviation:
mean standard deviation in the entire data set or a variable vector.
:returns:
- **K** - Gaussian kernel.
"""
d = self.__distance(x1, x2)
h = self.__bandwidth(n, mean_standard_deviation)
K = (1/(2*np.pi*h**2))**0.5 * np.exp(- d/(2*h**2))
return(K)
# Computes eq.(23):
def __variable_density(self, x, mean_standard_deviation):
"""
This function computes a vector of variable densities for all observations.
:param x:
single variable vector.
:param mean_standard_deviation:
mean standard deviation in the entire data set or a variable vector.
:returns:
- **Kck** - a vector of variable densities for all observations, it has the same size as the variable vector `x`.
"""
n = len(x)
Kck = np.zeros((n,1))
for i in range(0,n):
gaussian_kernel_sum = 0
for j in range(0,n):
gaussian_kernel_sum = gaussian_kernel_sum + self.__gaussian_kernel(x[i], x[j], n, mean_standard_deviation)
Kck[i] = 1/n * gaussian_kernel_sum
return(Kck)
# Computes eq.(24):
def __multi_variable_global_density(self, X):
"""
This function computes a vector of variable global densities for a
multi-variable case, for all observations.
:param X:
multi-variable data set matrix.
:returns:
- **Kc** - a vector of global densities for all observations.
"""
(n, n_vars) = np.shape(X)
mean_standard_deviation = np.mean(np.std(X, axis=0))
Kck_matrix = np.zeros((n, n_vars))
for variable in range(0, n_vars):
Kck_matrix[:,variable] = np.reshape(self.__variable_density(X[:,variable], mean_standard_deviation), (n,))
# Compute the global densities vector:
Kc = np.zeros((n,1))
K = 1
for i in range(0,n):
Kc[i] = K * np.prod(Kck_matrix[i,:])
return(Kc)
# Computes eq.(25):
def __multi_variable_observation_weights(self, X):
"""
This function computes a vector of observation weights for a
multi-variable case.
:param X:
multi-variable data set matrix.
:returns:
- **W_c** - a vector of observation weights.
"""
(n, n_vars) = np.shape(X)
W_c = np.zeros((n,1))
Kc = self.__multi_variable_global_density(X)
Kc_inv = 1/Kc
for i in range(0,n):
W_c[i] = Kc_inv[i] / np.max(Kc_inv)
return(W_c)
# Computes eq.(20):
def __single_variable_observation_weights(self, x):
"""
This function computes a vector of observation weights for a
single-variable case.
:param x:
single variable vector.
:returns:
- **W_c** - a vector of observation weights.
"""
n = len(x)
mean_standard_deviation = np.std(x)
W_c = np.zeros((n,1))
Kc = self.__variable_density(x, mean_standard_deviation)
Kc_inv = 1/Kc
for i in range(0,n):
W_c[i] = Kc_inv[i] / np.max(Kc_inv)
return(W_c)
################################################################################
#
# Data Sampling
#
################################################################################
class DataSampler:
"""
Enables selecting train and test data samples.
**Example:**
.. code::
from PCAfold import DataSampler
import numpy as np
# Generate dummy idx vector:
idx = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1])
# Instantiate DataSampler class object:
selection = DataSampler(idx, idx_test=np.array([5,9]), random_seed=100, verbose=True)
:param idx:
``numpy.ndarray`` of cluster classifications. It should be of size ``(n_observations,)`` or ``(n_observations,1)``.
:param idx_test: (optional)
``numpy.ndarray`` specifying the user-provided indices for test data. If specified, train
data will be selected ignoring the indices in ``idx_test`` and the test
data will be returned the same as the user-provided ``idx_test``.
If not specified, test samples will be selected according to the
``test_selection_option`` parameter (see documentation for each sampling function).
Setting fixed ``idx_test`` parameter may be useful if training a machine
learning model on specific test samples is desired.
It should be of size ``(n_test_samples,)`` or ``(n_test_samples,1)``.
:param random_seed: (optional)
``int`` specifying random seed for random sample selection.
:param verbose: (optional)
``bool`` for printing verbose details.
"""
def __init__(self, idx, idx_test=None, random_seed=None, verbose=False):
if isinstance(idx, np.ndarray):
if not all(isinstance(i, np.integer) for i in idx.ravel()):
raise ValueError("Parameter `idx` can only contain integers.")
try:
(n_observations, n_dim) = np.shape(idx)
except:
(n_observations,) = np.shape(idx)
n_dim = 1
if n_dim != 1:
raise ValueError("Parameter `idx` has to have size `(n_observations,)` or `(n_observations,1)`.")
if n_observations == 0:
raise ValueError("Parameter `idx` is an empty array.")
self.__idx = idx
else:
raise ValueError("Parameter `idx` has to be of type `numpy.ndarray`.")
if idx_test is not None:
if isinstance(idx_test, np.ndarray):
if not all(isinstance(i, np.integer) for i in idx_test.ravel()):
raise ValueError("Parameter `idx_test` can only contain integers.")
try:
(n_test_samples, n_dim) = np.shape(idx_test)
except:
(n_test_samples,) = np.shape(idx_test)
n_dim = 1
if n_dim != 1:
raise ValueError("Parameter `idx_test` has to have size `(n_test_samples,)` or `(n_test_samples,1)`.")
self.__idx_test = idx_test
else:
raise ValueError("Parameter `idx_test` has to be of type `numpy.ndarray`.")
if len(np.unique(idx_test)) > len(idx):
raise ValueError("Parameter `idx_test` has more unique observations than `idx`.")
else:
self.__idx_test = idx_test
if random_seed is not None:
if not isinstance(random_seed, int):
raise ValueError("Parameter `random_seed` has to be an integer or None.")
if isinstance(random_seed, bool):
raise ValueError("Parameter `random_seed` has to be an integer or None.")
else:
self.__random_seed = random_seed
else:
self.__random_seed = random_seed
if not isinstance(verbose, bool):
raise ValueError("Parameter `verbose` has to be a boolean.")
else:
self.__verbose = verbose
if idx_test is not None:
if len(np.unique(idx_test)) != 0:
self.__using_user_defined_idx_test = True
if self.verbose==True:
print('User defined test samples will be used. Parameter `test_selection_option` will be ignored.\n')
else:
self.__using_user_defined_idx_test = False
else:
self.__using_user_defined_idx_test = False
@property
def idx(self):
return self.__idx
@property
def idx_test(self):
return self.__idx_test
@property
def random_seed(self):
return self.__random_seed
@property
def verbose(self):
return self.__verbose
@idx.setter
def idx(self, new_idx):
if isinstance(new_idx, np.ndarray):
if not all(isinstance(i, np.integer) for i in new_idx.ravel()):
raise ValueError("Parameter `idx` can only contain integers.")
try:
(n_observations, n_dim) = np.shape(new_idx)
except:
(n_observations,) = np.shape(new_idx)
n_dim = 1
if n_dim != 1:
raise ValueError("Parameter `idx` has to have size `(n_observations,)` or `(n_observations,1)`.")
if n_observations == 0:
raise ValueError("Parameter `idx` is an empty array.")
self.__idx = new_idx
else:
raise ValueError("Parameter `idx` has to be of type `numpy.ndarray`.")
if len(np.unique(self.idx_test)) > len(new_idx):
raise ValueError("Parameter `idx` has less observations than current `idx_test`.")
@idx_test.setter
def idx_test(self, new_idx_test):
if new_idx_test is not None:
if len(new_idx_test) > len(self.idx):
raise ValueError("Parameter `idx_test` has more unique observations than `idx`.")
else:
if isinstance(new_idx_test, np.ndarray):
if not all(isinstance(i, np.integer) for i in new_idx_test.ravel()):
raise ValueError("Parameter `idx_test` can only contain integers.")
try:
(n_test_samples, n_dim) = np.shape(new_idx_test)
except:
(n_test_samples,) = np.shape(new_idx_test)
n_dim = 1
if n_dim != 1:
raise ValueError("Parameter `idx_test` has to have size `(n_test_samples,)` or `(n_test_samples,1)`.")
self.__idx_test = new_idx_test
else:
raise ValueError("Parameter `idx_test` has to be of type `numpy.ndarray`.")
if len(np.unique(new_idx_test)) != 0:
self.__using_user_defined_idx_test = True
if self.verbose==True:
print('User defined test samples will be used. Parameter `test_selection_option` will be ignored.\n')
else:
self.__using_user_defined_idx_test = False
else:
self.__idx_test = new_idx_test
self.__using_user_defined_idx_test = False
@random_seed.setter
def random_seed(self, new_random_seed):
if new_random_seed is not None:
if not isinstance(new_random_seed, int):
raise ValueError("Parameter `random_seed` has to be an integer or None.")
if isinstance(new_random_seed, bool):
raise ValueError("Parameter `random_seed` has to be an integer or None.")
else:
self.__random_seed = new_random_seed
else:
self.__random_seed = new_random_seed
@verbose.setter
def verbose(self, new_verbose):
if not isinstance(new_verbose, bool):
raise ValueError("Parameter `verbose` has to be a boolean.")
else:
self.__verbose = new_verbose
def __print_verbose_information_sampling(self, idx, idx_train, idx_test):
"""
Prints detailed information on train and test sampling when
``verbose=True``.
:param idx:
``numpy.ndarray`` of cluster classifications. It should be of size ``(n_observations,)`` or ``(n_observations,1)``.
:param idx_train:
indices of the train data.
:param idx_test:
indices of the test data.
"""
cluster_populations = get_populations(idx)
k = np.size(np.unique(idx))
n_observations = len(idx)
for cl_id in range(0,k):
train_indices = [t_id for t_id in idx_train if idx[t_id,]==cl_id]
if cluster_populations[cl_id] != 0:
print("Cluster " + str(cl_id) + ": taking " + str(len(train_indices)) + " train samples out of " + str(cluster_populations[cl_id]) + " observations (%.1f" % (len(train_indices)/(cluster_populations[cl_id])*100) + "%).")
else:
print("Cluster " + str(cl_id) + ": taking " + str(len(train_indices)) + " train samples out of " + str(cluster_populations[cl_id]) + " observations (%.1f" % (0) + "%).")
print("")
for cl_id in range(0,k):
train_indices = [t_id for t_id in idx_train if idx[t_id,]==cl_id]
test_indices = [t_id for t_id in idx_test if idx[t_id,]==cl_id]
if (cluster_populations[cl_id] - len(train_indices)) != 0:
print("Cluster " + str(cl_id) + ": taking " + str(len(test_indices)) + " test samples out of " + str(cluster_populations[cl_id] - len(train_indices)) + " remaining observations (%.1f" % (len(test_indices)/(cluster_populations[cl_id] - len(train_indices))*100) + "%).")
else:
print("Cluster " + str(cl_id) + ": taking " + str(len(test_indices)) + " test samples out of " + str(cluster_populations[cl_id] - len(train_indices)) + " remaining observations (%.1f" % (0) + "%).")
print('\nSelected ' + str(np.size(idx_train)) + ' train samples (%.1f' % (np.size(idx_train)*100/n_observations) + '%) and ' + str(np.size(idx_test)) + ' test samples (%.1f' % (np.size(idx_test)*100/n_observations) + '%).\n')
# ------------------------------------------------------------------------------
def number(self, perc, test_selection_option=1):
"""
Uses classifications into :math:`k` clusters and samples
fixed number of observations from every cluster as training data.
In general, this results in a balanced representation of features
identified by a clustering algorithm.
**Example:**
.. code::
from PCAfold import DataSampler
import numpy as np
# Generate dummy idx vector:
idx = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1])
# Instantiate DataSampler class object:
selection = DataSampler(idx, verbose=True)
# Generate sampling:
(idx_train, idx_test) = selection.number(20, test_selection_option=1)
**Train data:**
The number of train samples is estimated based on the percentage
``perc`` provided.
First, the total number of samples for training is estimated as a
percentage ``perc`` from the total number of observations ``n_observations`` in a data set.
Next, this number is divided equally into :math:`k` clusters. The
result ``n_of_samples`` is the number of samples that will be selected
from each cluster:
.. math::
\\verb|n_of_samples| = \\verb|int| \Big( \\frac{\\verb|perc| \cdot \\verb|n_observations|}{k \cdot 100} \Big)
**Test data:**
Two options for sampling test data are implemented. If you select
``test_selection_option=1`` all remaining samples that were not taken as
train data become the test data. If you select ``test_selection_option=2``,
the smallest cluster is found and the remaining number of observations
:math:`m` are taken as test data in that cluster. Next, the same number
of samples :math:`m` is taken from all remaining larger clusters.
The scheme below presents graphically how train and test data can be selected using ``test_selection_option`` parameter:
.. image:: ../images/sampling-test-selection-option-number.svg
:width: 700
:align: center
Here :math:`n` and :math:`m` are fixed numbers for each cluster.
In general, :math:`n \\neq m`.
:param perc:
percentage of data to be selected as training data from the entire data set.
For instance, set ``perc=20`` if you want to select 20%.
:param test_selection_option: (optional)
``int`` specifying the option for how the test data is selected.
Select ``test_selection_option=1`` if you want all remaining samples
to become test data.
Select ``test_selection_option=2`` if you want to select a subset
of the remaining samples as test data.
:return:
- **idx_train** - ``numpy.ndarray`` of indices of the train data. It has size ``(n_train,)``.
- **idx_test** - ``numpy.ndarray`` of indices of the test data. It has size ``(n_test,)``.
"""
# Check if `perc` parameter was passed correctly:
if (perc < 0) or (perc > 100):
raise ValueError("Percentage has to be between 0-100.")
# Check if `test_selection_option` parameter was passed correctly:
_test_selection_option = [1,2]
if test_selection_option not in _test_selection_option:
raise ValueError("Test selection option can only be 1 or 2.")
# Degrade clusters if needed:
if (len(np.unique(self.idx)) != (np.max(self.idx)+1)) or (np.min(self.idx) != 0):
(self.idx, _) = degrade_clusters(self.idx, verbose=False)
# Initialize vector of indices 0..n_observations:
n_observations = len(self.idx)
idx_full = np.arange(0, n_observations)
if self.idx_test is not None:
idx_test = np.unique(np.array(self.idx_test))
else:
idx_test = np.array([])
idx_full_no_test = np.setdiff1d(idx_full, idx_test)
# Find the number of clusters:
k = np.size(np.unique(self.idx))
# Calculate fixed number of samples that will be taken from every cluster as the training data:
n_of_samples = int(perc*n_observations/k/100)
# Initialize auxiliary variables:
idx_train = []
cluster_test = []
# Get clusters and split them into train and test indices:
for cl_id in range(0,k):
if self.random_seed != None:
random.seed(self.random_seed)
# Variable `cluster` contains indices of observations that are allowed to be selected as train samples in a particular cluster:
cluster = []
for i, id in enumerate(self.idx[idx_full_no_test]):
if id == cl_id:
cluster.append(idx_full_no_test[i])
# Selection of training data:
if int(len(cluster)) < n_of_samples:
raise ValueError("The requested percentage requires taking more samples from cluster " + str(cl_id) + " than there are available observations in that cluster. Consider lowering the percentage or use a different sampling function.")
else:
cluster_train = np.array(random.sample(cluster, n_of_samples))
idx_train = np.concatenate((idx_train, cluster_train))
if self.__using_user_defined_idx_test==False:
# Selection of test data - all data that remains is test data:
if test_selection_option == 1:
cluster_test = np.setdiff1d(cluster, cluster_train)
idx_test = np.concatenate((idx_test, cluster_test))
# Selection of test data - equal samples from each cluster:
if test_selection_option == 2:
cluster_test.append(np.setdiff1d(cluster, cluster_train))
if self.__using_user_defined_idx_test==False:
if test_selection_option == 2:
# Search for the smallest number of remaining observations in any cluster:
minimum_test_samples = n_observations
for cl_id in range(0,k):
if len(cluster_test[cl_id]) < minimum_test_samples:
minimum_test_samples = len(cluster_test[cl_id])
# Sample that amount from every cluster:
for cl_id in range(0,k):
idx_test = np.concatenate((idx_test, random.sample(list(cluster_test[cl_id]), minimum_test_samples)))
idx_train = np.sort(idx_train.astype(int))
idx_test = np.sort(idx_test.astype(int))
# Print detailed information on sampling:
if self.verbose == True:
self.__print_verbose_information_sampling(self.idx, idx_train, idx_test)
return (idx_train, idx_test)
# ------------------------------------------------------------------------------
def percentage(self, perc, test_selection_option=1):
"""
Uses classifications into :math:`k` clusters and
samples a certain percentage ``perc`` from every cluster as the training data.
**Example:**
.. code:: python
from PCAfold import DataSampler
import numpy as np
# Generate dummy idx vector:
idx = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1])
# Instantiate DataSampler class object:
selection = DataSampler(idx, verbose=True)
# Generate sampling:
(idx_train, idx_test) = selection.percentage(20, test_selection_option=1)
*Note:*
If the cluster sizes are comparable, this function will give a similar
train sample distribution as random sampling (``DataSampler.random``).
This sampling can be useful in cases where one cluster is significantly
smaller than others and there is a chance that this cluster will not get
covered in the train data if random sampling was used.
**Train data:**
The number of train samples is estimated based on the percentage ``perc`` provided.
First, the size of the :math:`i^{th}` cluster is estimated ``cluster_size_i``
and then a percentage ``perc`` of that number is selected.
**Test data:**
Two options for sampling test data are implemented. If you select
``test_selection_option=1`` all remaining samples that were not taken as
train data become the test data. If you select
``test_selection_option=2`` the same procedure will be used to select
test data as was used to select train data (only allowed if the number of samples
taken as train data from any cluster did not exceed 50% of observations
in that cluster).
The scheme below presents graphically how train and test data can be
selected using ``test_selection_option`` parameter:
.. image:: ../images/sampling-test-selection-option-percentage.svg
:width: 700
:align: center
Here :math:`p` is the percentage ``perc`` provided.
:param perc:
percentage of data to be selected as training data from each cluster.
For instance, set ``perc=20`` if you want to select 20%.
:param test_selection_option: (optional)
``int`` specifying the option for how the test data is selected.
Select ``test_selection_option=1`` if you want all remaining samples
to become test data.
Select ``test_selection_option=2`` if you want to select a subset
of the remaining samples as test data.
:return:
- **idx_train** - ``numpy.ndarray`` of indices of the train data. It has size ``(n_train,)``.
- **idx_test** - ``numpy.ndarray`` of indices of the test data. It has size ``(n_test,)``.
"""
# Check if `perc` parameter was passed correctly:
if (perc < 0) or (perc > 100):
raise ValueError("Percentage has to be between 0-100.")
# Check if `test_selection_option` parameter was passed correctly:
_test_selection_option = [1,2]
if test_selection_option not in _test_selection_option:
raise ValueError("Test selection option can only be 1 or 2.")
# Degrade clusters if needed:
if (len(np.unique(self.idx)) != (np.max(self.idx)+1)) or (np.min(self.idx) != 0):
(self.idx, _) = degrade_clusters(self.idx, verbose=False)
# Initialize vector of indices 0..n_observations:
n_observations = len(self.idx)
idx_full = np.arange(0, n_observations)
if self.idx_test is not None:
idx_test = np.unique(np.array(self.idx_test))
else:
idx_test = np.array([])
idx_full_no_test = np.setdiff1d(idx_full, idx_test)
# Find the number of clusters:
k = np.size(np.unique(self.idx))
# Initialize auxiliary variables:
idx_train = []
# Get cluster populations:
cluster_populations = get_populations(self.idx)
# Get clusters and split them into training and test indices:
for cl_id in range(0,k):
if self.random_seed != None:
random.seed(self.random_seed)
# Variable `cluster` contains indices of observations that are allowed to be selected as train samples in a particular cluster:
cluster = []
for i, id in enumerate(self.idx[idx_full_no_test]):
if id == cl_id:
cluster.append(idx_full_no_test[i])
# Selection of training data:
if int(len(cluster)) < int(cluster_populations[cl_id]*perc/100):
raise ValueError("The requested percentage requires taking more samples from cluster " + str(cl_id) + " than there are available observations in that cluster. Consider lowering the percentage or use a different sampling function.")
else:
cluster_train = np.array(random.sample(cluster, int(cluster_populations[cl_id]*perc/100)))
idx_train = np.concatenate((idx_train, cluster_train))
if self.__using_user_defined_idx_test==False:
# Selection of test data - all data that remains is test data:
if test_selection_option == 1:
cluster_test = np.setdiff1d(cluster, cluster_train)
idx_test = np.concatenate((idx_test, cluster_test))
if test_selection_option == 2:
# Check if there is enough test samples to select:
if perc > 50:
raise ValueError("Percentage is larger than 50% and test samples cannot be selected with `test_selection_option=2`.")
else:
cluster_test = np.setdiff1d(cluster, cluster_train)
cluster_test_sampled = np.array(random.sample(list(cluster_test), int(cluster_populations[cl_id]*perc/100)))
idx_test = np.concatenate((idx_test, cluster_test_sampled))
idx_train = np.sort(idx_train.astype(int))
idx_test = np.sort(idx_test.astype(int))
# Print detailed information on sampling:
if self.verbose == True:
self.__print_verbose_information_sampling(self.idx, idx_train, idx_test)
return (idx_train, idx_test)
# ------------------------------------------------------------------------------
def manual(self, sampling_dictionary, sampling_type='percentage', test_selection_option=1):
"""
Uses classifications into :math:`k` clusters
and a dictionary ``sampling_dictionary`` in which you manually specify what
``'percentage'`` (or what ``'number'``) of samples will be
selected as the train data from each cluster. The dictionary keys are
cluster classifications as per ``idx`` and the dictionary values are either
percentage or number of train samples to be selected. The default dictionary
values are percentage but you can select ``sampling_type='number'`` in order
to interpret the values as a number of samples.
**Example:**
.. code:: python
from PCAfold import DataSampler
import numpy as np
# Generate dummy idx vector:
idx = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2])
# Instantiate DataSampler class object:
selection = DataSampler(idx, verbose=True)
# Generate sampling:
(idx_train, idx_test) = selection.manual({0:1, 1:1, 2:1}, sampling_type='number', test_selection_option=1)
**Train data:**
The number of train samples selected from each cluster is estimated based
on the ``sampling_dictionary``. For ``key : value``, percentage ``value``
(or number ``value``) of samples will be selected from cluster ``key``.
**Test data:**
Two options for sampling test data are implemented.
If you select ``test_selection_option=1`` all remaining samples that
were not taken as train data become the test data.
If you select
``test_selection_option=2`` the same procedure will be used to select
test data as was used to select train data (only allowed if the number
of samples taken as train data from any cluster did not exceed 50%
of observations in that cluster).
The scheme below presents graphically how train and test data can be
selected using ``test_selection_option`` parameter:
.. image:: ../images/sampling-test-selection-option-manual.svg
:width: 700
:align: center
Here it is understood that :math:`n_1` train samples were requested from
the first cluster, :math:`n_2` from the second cluster and :math:`n_3`
from the third cluster, where :math:`n_i` can be interpreted as number
or as percentage. This can be achieved by setting:
.. code:: python
sampling_dictionary = {0:n_1, 1:n_2, 2:n_3}
:param sampling_dictionary:
``dict`` specifying manual sampling. Keys are cluster classifications and
values are either ``percentage`` or ``number`` of samples to be taken from
that cluster. Keys should match the cluster classifications as per ``idx``.
:param sampling_type: (optional)
``str`` specifying whether percentage or number is given in the
``sampling_dictionary``. Available options: ``percentage`` or ``number``.
The default is ``percentage``.
:param test_selection_option: (optional)
``int`` specifying the option for how the test data is selected.
Select ``test_selection_option=1`` if you want all remaining samples
to become test data.
Select ``test_selection_option=2`` if you want to select a subset
of the remaining samples as test data.
:return:
- **idx_train** - ``numpy.ndarray`` of indices of the train data. It has size ``(n_train,)``.
- **idx_test** - ``numpy.ndarray`` of indices of the test data. It has size ``(n_test,)``.
"""
# Check that sampling_type is passed correctly:
_sampling_type = ['percentage', 'number']
if sampling_type not in _sampling_type:
raise ValueError("Variable `sampling_type` has to be one of the following: 'percentage' or 'number'.")
# Degrade clusters if needed:
if (len(np.unique(self.idx)) != (np.max(self.idx)+1)) or (np.min(self.idx) != 0):
(self.idx, _) = degrade_clusters(self.idx, verbose=False)
# Check that dictionary has consistend number of entries with respect to `idx`:
if len(np.unique(self.idx)) != len(sampling_dictionary.keys()):
raise ValueError("The number of entries inside `sampling_dictionary` does not match the number of clusters specified in `idx`.")
# Check that keys and values are properly defined:
for key, value in sampling_dictionary.items():
# Check that all keys are present in the `idx`:
if key not in np.unique(self.idx):
raise ValueError("Key " + str(key) + " does not match an entry in `idx`.")
# Check that keys are non-negative integers:
if not (isinstance(key, int) and key >= 0):
raise ValueError("Error in cluster " + str(key) + ". Key must be a non-negative integer.")
# Check that percentage is between 0 and 100:
if sampling_type == 'percentage':
if not (value >= 0 and value <= 100):
raise ValueError("Error in cluster " + str(key) + ". The percentage has to be between 0-100.")
# Check that number is a non-negative integer:
if sampling_type == 'number':
if not (isinstance(value, int) and value >= 0):
raise ValueError("Error in cluster " + str(key) + ". The number must be a non-negative integer.")
# Check that `test_selection_option` parameter was passed correctly:
_test_selection_option = [1,2]
if test_selection_option not in _test_selection_option:
raise ValueError("Test selection option can only be 1 or 2.")
# Initialize vector of indices 0..n_observations:
n_observations = len(self.idx)
idx_full = np.arange(0,n_observations)
if self.idx_test is not None:
idx_test = np.unique(
|
np.array(self.idx_test)
|
numpy.array
|
'''
description:
co-optimization for finger reach task
'''
import os
import sys
example_base_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
sys.path.append(example_base_dir)
from parameterization_torch import Design as Design
from parameterization import Design as Design_np
from renderer import SimRenderer
import numpy as np
import scipy.optimize
import redmax_py as redmax
import os
import argparse
import time
from common import *
import torch
import matplotlib.pyplot as plt
torch.set_default_dtype(torch.double)
if __name__ == '__main__':
parser = argparse.ArgumentParser('')
parser.add_argument("--model", type = str, default = 'rss_finger_flip')
parser.add_argument('--record', action = 'store_true')
parser.add_argument('--record-file-name', type = str, default = 'rss_finger_flip')
parser.add_argument('--seed', type=int, default = 0)
parser.add_argument('--save-dir', type=str, default = './results/tmp/')
parser.add_argument('--no-design-optim', action='store_true', help = 'whether control-only')
parser.add_argument('--visualize', type=str, default='True', help = 'whether visualize the simulation')
parser.add_argument('--load-dir', type = str, default = None, help = 'load optimized parameters')
parser.add_argument('--verbose', default = False, action = 'store_true', help = 'verbose output')
parser.add_argument('--test-derivatives', default = False, action = 'store_true')
asset_folder = os.path.abspath(os.path.join(example_base_dir, '..', 'assets'))
args = parser.parse_args()
if args.model[-4:] == '.xml':
model_path = os.path.join(asset_folder, args.model)
else:
model_path = os.path.join(asset_folder, args.model + '.xml')
optimize_design_flag = not args.no_design_optim
os.makedirs(args.save_dir, exist_ok = True)
visualize = (args.visualize == 'True')
play_mode = (args.load_dir is not None)
'''init sim and task'''
sim = redmax.Simulation(model_path, args.verbose)
if args.verbose:
sim.print_ctrl_info()
sim.print_design_params_info()
num_steps = 150
ndof_u = sim.ndof_u
ndof_r = sim.ndof_r
ndof_var = sim.ndof_var
ndof_p = sim.ndof_p
# set up camera
sim.viewer_options.camera_pos = np.array([2.5, -4, 1.8])
# init design params
design = Design()
design_np = Design_np()
cage_params = np.ones(9)
ndof_cage = len(cage_params)
design_params, meshes = design_np.parameterize(cage_params, True)
sim.set_design_params(design_params)
Vs = []
for i in range(len(meshes)):
Vs.append(meshes[i].V)
sim.set_rendering_mesh_vertices(Vs)
# init control sequence
sub_steps = 5
assert (num_steps % sub_steps) == 0
num_ctrl_steps = num_steps // sub_steps
if args.seed == 0:
action = np.zeros(ndof_u * num_ctrl_steps)
else:
np.random.seed(args.seed)
action = np.random.uniform(-0.5, 0.5, ndof_u * num_ctrl_steps)
if visualize:
print('ndof_p = ', ndof_p)
print('ndof_u = ', len(action))
print('ndof_cage = ', ndof_cage)
if not optimize_design_flag:
params = action
else:
params = np.zeros(ndof_u * num_ctrl_steps + ndof_cage)
params[0:ndof_u * num_ctrl_steps] = action
params[-ndof_cage:] = cage_params
# init optimization history
f_log = []
global num_sim
num_sim = 0
'''compute the objectives by forward pass'''
def forward(params, backward_flag = False):
global num_sim
num_sim += 1
action = params[:ndof_u * num_ctrl_steps]
u = np.tanh(action)
if optimize_design_flag:
cage_params = params[-ndof_cage:]
design_params = design_np.parameterize(cage_params)
sim.set_design_params(design_params)
sim.reset(backward_flag = backward_flag, backward_design_params_flag = optimize_design_flag)
# objectives coefficients
coef_u = 5.
coef_touch = 1.
coef_flip = 50.
f_u = 0.
f_touch = 0.
f_flip = 0.
f = 0.
if backward_flag:
df_dq = np.zeros(ndof_r * num_steps)
df_du = np.zeros(ndof_u * num_steps)
df_dvar = np.zeros(ndof_var * num_steps)
if optimize_design_flag:
df_dp = np.zeros(ndof_p)
for i in range(num_ctrl_steps):
sim.set_u(u[i * ndof_u:(i + 1) * ndof_u])
sim.forward(sub_steps, verbose = args.verbose)
variables = sim.get_variables()
q = sim.get_q()
# compute objective f
f_u_i = np.sum(u[i * ndof_u:(i + 1) * ndof_u] ** 2)
f_touch_i = 0.
if i < num_ctrl_steps // 2:
f_touch_i += np.sum((variables[0:3] - variables[3:6]) ** 2) # MSE
f_flip_i = 0.
f_flip_i += (q[-1] - np.pi / 2.) ** 2
f_u += f_u_i
f_touch += f_touch_i
f_flip += f_flip_i
f += coef_u * f_u_i + coef_touch * f_touch_i + coef_flip * f_flip_i
# backward info
if backward_flag:
df_du[i * sub_steps * ndof_u:(i * sub_steps + 1) * ndof_u] += \
coef_u * 2. * u[i * ndof_u:(i + 1) * ndof_u]
if i < num_ctrl_steps // 2:
df_dvar[((i + 1) * sub_steps - 1) * ndof_var:((i + 1) * sub_steps - 1) * ndof_var + 3] += \
coef_touch * 2. * (variables[0:3] - variables[3:6])
df_dvar[((i + 1) * sub_steps - 1) * ndof_var + 3:((i + 1) * sub_steps) * ndof_var] += \
-coef_touch * 2. * (variables[0:3] - variables[3:6]) # MSE
df_dq[((i + 1) * sub_steps) * ndof_r - 1] += coef_flip * 2. * (q[-1] - np.pi / 2.)
if backward_flag:
sim.backward_info.set_flags(False, False, optimize_design_flag, True)
sim.backward_info.df_du = df_du
sim.backward_info.df_dq = df_dq
sim.backward_info.df_dvar = df_dvar
if optimize_design_flag:
sim.backward_info.df_dp = df_dp
return f, {'f_u': f_u, 'f_touch': f_touch, 'f_flip': f_flip}
'''compute loss and gradient'''
def loss_and_grad(params):
with torch.no_grad():
f, _ = forward(params, backward_flag = True)
sim.backward()
grad = np.zeros(len(params))
# gradient for control params
action = params[:ndof_u * num_ctrl_steps]
df_du_full =
|
np.copy(sim.backward_results.df_du)
|
numpy.copy
|
# ---- Imports ----- #
from nlb_tools.nwb_interface import NWBDataset
from nlb_tools.make_tensors import make_train_input_tensors, \
make_eval_input_tensors, make_eval_target_tensors, save_to_h5
from nlb_tools.evaluation import evaluate
import h5py
import sys, gc
import numpy as np
import pandas as pd
import scipy.signal as signal
from sklearn.linear_model import PoissonRegressor
from datetime import datetime
# ---- Run Params ---- #
dataset_name = "area2_bump" # one of {'area2_bump', 'dmfc_rsg', 'mc_maze', 'mc_rtt',
# 'mc_maze_large', 'mc_maze_medium', 'mc_maze_small'}
bin_size_ms = 5
kern_sds = np.linspace(30, 60, 4)
alphas = np.logspace(-3, 0, 4)
cv_fold = 5
log_offset = 1e-4 # amount to add before taking log to prevent log(0) error
# ---- Useful variables ---- #
binsuf = '' if bin_size_ms == 5 else f'_{bin_size_ms}'
dskey = f'mc_maze_scaling{binsuf}_split' if 'maze_' in dataset_name else (dataset_name + binsuf + "_split")
pref_dict = {'mc_maze_small': '[100] ', 'mc_maze_medium': '[250] ', 'mc_maze_large': '[500] '}
bpskey = pref_dict.get(dataset_name, '') + 'co-bps'
# ---- Data locations ----#
datapath_dict = {
'mc_maze': '~/data/000128/sub-Jenkins/',
'mc_rtt': '~/data/000129/sub-Indy/',
'area2_bump': '~/data/000127/sub-Han/',
'dmfc_rsg': '~/data/000130/sub-Haydn/',
'mc_maze_large': '~/data/000138/sub-Jenkins/',
'mc_maze_medium': '~/data/000139/sub-Jenkins/',
'mc_maze_small': '~/data/000140/sub-Jenkins/',
}
prefix_dict = {
'mc_maze': '*full',
'mc_maze_large': '*large',
'mc_maze_medium': '*medium',
'mc_maze_small': '*small',
}
datapath = datapath_dict[dataset_name]
prefix = prefix_dict.get(dataset_name, '')
# ---- Load data ---- #
dataset = NWBDataset(datapath, prefix,
skip_fields=['hand_pos', 'cursor_pos', 'eye_pos', 'force', 'muscle_vel', 'muscle_len', 'joint_vel', 'joint_ang'])
dataset.resample(bin_size_ms)
# ---- Prepare n folds ---- #
all_mask = np.isin(dataset.trial_info.split, ['train', 'val'])
all_idx = np.arange(all_mask.shape[0])[all_mask]
train_masks = []
eval_masks = []
for i in range(cv_fold):
eval_idx = all_idx[i::cv_fold] # take every n samples for each fold
train_idx = all_idx[~np.isin(all_idx, eval_idx)]
train_masks.append(np.isin(np.arange(all_mask.shape[0]), train_idx))
eval_masks.append(np.isin(np.arange(all_mask.shape[0]), eval_idx))
# ---- Extract data for each fold ---- #
fold_data = []
for i in range(cv_fold):
train_dict = make_train_input_tensors(dataset, dataset_name, train_masks[i], save_file=False)
eval_dict = make_eval_input_tensors(dataset, dataset_name, eval_masks[i], save_file=False)
train_spikes_heldin = train_dict['train_spikes_heldin']
train_spikes_heldout = train_dict['train_spikes_heldout']
eval_spikes_heldin = eval_dict['eval_spikes_heldin']
target_dict = make_eval_target_tensors(dataset, dataset_name, train_masks[i], eval_masks[i], include_psth=True, save_file=False)
fold_data.append((train_spikes_heldin, train_spikes_heldout, eval_spikes_heldin, target_dict))
del dataset
gc.collect()
# ---- Useful shape info ---- #
tlen = fold_data[0][0].shape[1]
num_heldin = fold_data[0][0].shape[2]
num_heldout = fold_data[0][1].shape[2]
results = []
# ---- Define helpers ---- #
flatten2d = lambda x: x.reshape(-1, x.shape[2]) # flattens 3d -> 2d array
def fit_poisson(train_factors_s, test_factors_s, train_spikes_s, test_spikes_s=None, alpha=0.0):
"""Fit Poisson GLM from factors to spikes and return rate predictions"""
train_in = train_factors_s if test_spikes_s is None else np.vstack([train_factors_s, test_factors_s])
train_out = train_spikes_s if test_spikes_s is None else np.vstack([train_spikes_s, test_spikes_s])
train_pred = []
test_pred = []
for chan in range(train_out.shape[1]):
pr = PoissonRegressor(alpha=alpha, max_iter=500)
pr.fit(train_in, train_out[:, chan])
while pr.n_iter_ == pr.max_iter and pr.max_iter < 10000:
print(f"didn't converge - retraining {chan} with max_iter={pr.max_iter * 5}")
oldmax = pr.max_iter
del pr
pr = PoissonRegressor(alpha=alpha, max_iter=oldmax * 5)
pr.fit(train_in, train_out[:, chan])
train_pred.append(pr.predict(train_factors_s))
test_pred.append(pr.predict(test_factors_s))
train_rates_s = np.vstack(train_pred).T
test_rates_s = np.vstack(test_pred).T
return np.clip(train_rates_s, 1e-9, 1e20), np.clip(test_rates_s, 1e-9, 1e20)
# ---- Sweep kernel std ---- #
for ks in kern_sds:
print(f"Evaluating kern_sd = {ks}")
# ---- Prepare smoothing kernel ---- #
window = signal.gaussian(int(6 * ks / bin_size_ms), int(ks / bin_size_ms), sym=True)
window /= np.sum(window)
def filt(x):
return
|
np.convolve(x, window, 'same')
|
numpy.convolve
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:26:49 2018
@author: <NAME> <<EMAIL>>
Multi-Fidelity co-Kriging: recursive formulation with autoregressive model of
order 1 (AR1)
"""
from sys import exit
import copy
from types import FunctionType
from sys import exit
import copy
from types import FunctionType
import numpy as np
from sklearn.metrics.pairwise import manhattan_distances
from scipy.linalg import solve_triangular
from scipy import linalg
from scipy.spatial.distance import cdist
from smt.surrogate_models.krg_based import KrgBased
from smt.sampling_methods import LHS
from smt.utils.kriging_utils import (
cross_distances,
componentwise_distance,
standardization,
differences,
)
from sys import exit
class NestedLHS(object):
def __init__(self, nlevel, xlimits):
"""
Constructor where values of options can be passed in.
Parameters
----------
nlevel : integer.
The number of design of experiments to be built
xlimits : ndarray
The interval of the domain in each dimension with shape (nx, 2)
"""
self.nlevel = nlevel
self.xlimits = xlimits
def __call__(self, nb_samples_hifi):
"""
Builds nlevel nested design of experiments of dimension dim and size n_samples.
Each doe sis built with the optmized lhs procedure.
Builds the highest level first; nested properties are ensured by deleting
the nearest neighbours in lower levels of fidelity.
Parameters
----------
nb_samples_hifi: The number of samples of the highest fidelity model.
nb_samples_fi(n-1) = 2 * nb_samples_fi(n)
Returns
------
list of length nlevel of design of experiemnts from low to high fidelity level.
"""
nt = []
for i in range(self.nlevel, 0, -1):
nt.append(pow(2, i - 1) * nb_samples_hifi)
if len(nt) != self.nlevel:
raise ValueError("nt must be a list of nlevel elements")
if np.allclose(np.sort(nt)[::-1], nt) == False:
raise ValueError("nt must be a list of decreasing integers")
doe = []
p0 = LHS(xlimits=self.xlimits, criterion="ese")
doe.append(p0(nt[0]))
for i in range(1, self.nlevel):
p = LHS(xlimits=self.xlimits, criterion="ese")
doe.append(p(nt[i]))
for i in range(1, self.nlevel)[::-1]:
ind = []
d = cdist(doe[i], doe[i - 1], "euclidean")
for j in range(doe[i].shape[0]):
dj = np.sort(d[j, :])
k = dj[0]
l = (np.where(d[j, :] == k))[0][0]
m = 0
while l in ind:
m = m + 1
k = dj[m]
l = (np.where(d[j, :] == k))[0][0]
ind.append(l)
doe[i - 1] = np.delete(doe[i - 1], ind, axis=0)
doe[i - 1] = np.vstack((doe[i - 1], doe[i]))
return doe
class MFK(KrgBased):
def _initialize(self):
super(MFK, self)._initialize()
declare = self.options.declare
declare(
"rho_regr",
"constant",
values=("constant", "linear", "quadratic"),
desc="Regression function type for rho",
)
declare(
"theta0", [1e-2], types=(list, np.ndarray), desc="Initial hyperparameters"
)
declare(
"optim_var",
False,
types=bool,
values=(True, False),
desc="Turning this option to True, forces variance to zero at HF samples ",
)
declare(
"noise0", 1e-6, types=(float, list), desc="Initial noise hyperparameters"
)
self.name = "MFK"
def _check_list_structure(self, X, y):
"""
checks if the data structure is compatible with MFK.
sets class attributes such as (number of levels of Fidelity, training points in each level, ...)
Arguments :
X : list of arrays, each array corresponds to a fidelity level. starts from lowest to highest
y : same as X
"""
if type(X) is not list:
nlevel = 1
X = [X]
else:
nlevel = len(X)
if type(y) is not list:
y = [y]
if len(X) != len(y):
raise ValueError("X and y must have the same length.")
n_samples = np.zeros(nlevel, dtype=int)
n_features = np.zeros(nlevel, dtype=int)
n_samples_y = np.zeros(nlevel, dtype=int)
for i in range(nlevel):
n_samples[i], n_features[i] = X[i].shape
if i > 1 and n_features[i] != n_features[i - 1]:
raise ValueError("All X must have the same number of columns.")
y[i] = np.asarray(y[i]).ravel()[:, np.newaxis]
n_samples_y[i] = y[i].shape[0]
if n_samples[i] != n_samples_y[i]:
raise ValueError("X and y must have the same number of rows.")
self.nx = n_features[0]
self.nt_all = n_samples
self.nlvl = nlevel
self.ny = y[0].shape[1]
self.X = X[:]
self.y = y[:]
def _new_train(self):
"""
Overrides KrgBased implementation
Trains the Multi-Fidelity model
"""
xt = []
yt = []
i = 0
while self.training_points.get(i, None) is not None:
xt.append(self.training_points[i][0][0])
yt.append(self.training_points[i][0][1])
i = i + 1
xt.append(self.training_points[None][0][0])
yt.append(self.training_points[None][0][1])
self._check_list_structure(xt, yt)
self._check_param()
X = self.X
y = self.y
_, _, self.X_offset, self.y_mean, self.X_scale, self.y_std = standardization(
np.concatenate(xt, axis=0), np.concatenate(yt, axis=0)
)
nlevel = self.nlvl
n_samples = self.nt_all
# initialize lists
self.noise = nlevel * [0]
self.D_all = nlevel * [0]
self.F_all = nlevel * [0]
self.p_all = nlevel * [0]
self.q_all = nlevel * [0]
self.optimal_rlf_value = nlevel * [0]
self.optimal_par = nlevel * [{}]
self.optimal_theta = nlevel * [0]
self.X_norma_all = [(x - self.X_offset) / self.X_scale for x in X]
self.y_norma_all = [(f - self.y_mean) / self.y_std for f in y]
if isinstance(self.options["noise0"], float):
self.options["noise0"] = self.nlvl * [self.options["noise0"]]
noise0 = self.options["noise0"].copy()
if (
isinstance(self.options["theta0"], list)
or len(self.options["theta0"].shape) == 1
):
if len(self.options["theta0"]) == self.nx:
self.options["theta0"] = np.repeat(
np.array(self.options["theta0"]).reshape(1, -1), self.nlvl, axis=0
)
elif len(self.options["theta0"]) == self.nlvl:
self.options["theta0"] = np.repeat(
np.array(self.options["theta0"]).reshape(-1, 1), self.nx, axis=1
)
theta0 = self.options["theta0"].copy()
for lvl in range(nlevel):
self.options["noise0"] = noise0[lvl]
self.options["theta0"] = theta0[lvl, :]
self.X_norma = self.X_norma_all[lvl]
self.y_norma = self.y_norma_all[lvl]
# Calculate matrix of distances D between samples
self.D_all[lvl] = cross_distances(self.X_norma)
# Regression matrix and parameters
self.F_all[lvl] = self._regression_types[self.options["poly"]](self.X_norma)
self.p_all[lvl] = self.F_all[lvl].shape[1]
# Concatenate the autoregressive part for levels > 0
if lvl > 0:
F_rho = self._regression_types[self.options["rho_regr"]](self.X_norma)
self.q_all[lvl] = F_rho.shape[1]
self.F_all[lvl] = np.hstack(
(
F_rho
* np.dot(
self._predict_intermediate_values(
self.X_norma, lvl, descale=False
),
np.ones((1, self.q_all[lvl])),
),
self.F_all[lvl],
)
)
else:
self.q_all[lvl] = 0
n_samples_F_i = self.F_all[lvl].shape[0]
if n_samples_F_i != n_samples[lvl]:
raise Exception(
"Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model."
)
if int(self.p_all[lvl] + self.q_all[lvl]) >= n_samples_F_i:
raise Exception(
(
"Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the regression"
" model size p+q=%d."
)
% (n_samples[i], self.p_all[lvl] + self.q_all[lvl])
)
# Determine Gaussian Process model parameters
self.F = self.F_all[lvl]
D, self.ij = self.D_all[lvl]
self._lvl = lvl
self.nt = self.nt_all[lvl]
self.q = self.q_all[lvl]
self.p = self.p_all[lvl]
(
self.optimal_rlf_value[lvl],
self.optimal_par[lvl],
self.optimal_theta[lvl],
) = self._optimize_hyperparam(D)
if self.options["eval_noise"]:
tmp_list = self.optimal_theta[lvl]
self.optimal_theta[lvl] = tmp_list[:-1]
self.noise[lvl] = tmp_list[-1]
del self.y_norma, self.D
self.options["noise0"] = noise0
self.options["theta0"] = theta0
if self.options["eval_noise"] and self.options["optim_var"]:
for lvl in range(self.nlvl - 1):
self.set_training_values(
X[lvl], self._predict_intermediate_values(X[lvl], lvl + 1), name=lvl
)
self.set_training_values(
X[-1], self._predict_intermediate_values(X[-1], self.nlvl)
)
self.options["eval_noise"] = False
self._new_train()
def _componentwise_distance(self, dx, opt=0):
d = componentwise_distance(dx, self.options["corr"], self.nx)
return d
def _predict_intermediate_values(self, X, lvl, descale=True):
"""
Evaluates the model at a set of points.
Used for training the model at level lvl.
Allows to relax the order problem.
Arguments
---------
x : np.ndarray [n_evals, dim]
Evaluation point input variable values
lvl : level at which the prediction is made
Returns
-------
y : np.ndarray
Evaluation point output variable values
"""
n_eval, _ = X.shape
# if n_features_X != self.n_features:
# raise ValueError("Design must be an array of n_features columns.")
# Calculate kriging mean and variance at level 0
mu = np.zeros((n_eval, lvl))
# if self.normalize:
if descale:
X = (X - self.X_offset) / self.X_scale
## X = (X - self.X_offset[0]) / self.X_scale[0]
f = self._regression_types[self.options["poly"]](X)
f0 = self._regression_types[self.options["poly"]](X)
dx = differences(X, Y=self.X_norma_all[0])
d = self._componentwise_distance(dx)
# Get regression function and correlation
F = self.F_all[0]
C = self.optimal_par[0]["C"]
beta = self.optimal_par[0]["beta"]
Ft = solve_triangular(C, F, lower=True)
yt = solve_triangular(C, self.y_norma_all[0], lower=True)
r_ = self._correlation_types[self.options["corr"]](
self.optimal_theta[0], d
).reshape(n_eval, self.nt_all[0])
gamma = self.optimal_par[0]["gamma"]
# Scaled predictor
mu[:, 0] = (
|
np.dot(f, beta)
|
numpy.dot
|
import typing as T
from ..data.utils import create_data_object, LabeledData
from ..networks import SingleSensorNetwork
from ..utils.reshape import nd_to_columns
import numpy as np
import cv2
from skimage import util as sk_util
from tsaug import AddNoise, Drift, TimeWarp
from torch_geometric.data import Data
def augment_time(
ldata: LabeledData,
p: T.Any,
xaug: np.ndarray,
nbands: int,
add_noise: bool,
warper: T.Union[AddNoise, Drift, TimeWarp]
) -> np.ndarray:
"""Applies temporal augmentation to a dataset
"""
# Get the segment
min_row, min_col, max_row, max_col = p.bbox
xseg = xaug[:, min_row:max_row, min_col:max_col].copy()
seg = ldata.segments[min_row:max_row, min_col:max_col].copy()
mask = np.uint8(seg == p.label)[np.newaxis]
ntime, nrows, ncols = xseg.shape
# Reshape from (T x H x W) -> (H*W x T X C)
xseg = (xseg.transpose(1, 2, 0)
.reshape(nrows*ncols, ntime)
.reshape(nrows*ncols, int(ntime/nbands), nbands))
# if xseg.shape[1] < VegetationIndices().n_vis:
# raise ValueError('The time series stack should have 3 layers.')
# Warp the segment
xseg_warped = warper.augment(xseg)
if add_noise:
noise_warper = AddNoise(scale=np.random.uniform(low=0.01, high=0.05))
xseg_warped = noise_warper.augment(xseg_warped)
# Reshape back from (H*W x T x C) -> (T x H x W)
xseg_warped = (xseg_warped.transpose(0, 2, 1)
.reshape(nrows*ncols, ntime)
.T.reshape(ntime, nrows, ncols)
.clip(0, 1))
xseg = xseg.reshape(nrows*ncols, ntime).T.reshape(ntime, nrows, ncols)
# Insert back into full array
xaug[:, min_row:max_row, min_col:max_col] =
|
np.where(mask == 1, xseg_warped, xseg)
|
numpy.where
|
"""Module containing functions to compute the autocorrelation function
and estimate the associated autocorrelation length of series.
The estimate of the autocorrelation function is based on the method
described at http://www.math.nyu.edu/faculty/goodman/software/acor/
and implemented in the associated ``acor`` C++ code, though the
implementation details differ.
"""
import matplotlib.pyplot as plt
import numpy as np
import numpy.ma as ma
def _next_power_of_two(i):
pt = 2
while pt < i:
pt *= 2
return pt
def autocorrelation_function(series, axis=0):
"""Returns the autocorrelation function of the given series. The
function is normalised so that it is 1 at zero lag.
If ``series`` is an N-dimensional array, the ACF will be computed
along ``axis`` and the result will have the same shape as
``series``.
"""
series = np.atleast_1d(series)
shape = np.array(series.shape)
m = [slice(None)] * len(shape)
n0 = shape[axis]
n = _next_power_of_two(shape[axis]*2)
m[axis] = slice(0, n0)
shape[axis] = n
padded_series = np.zeros(shape)
padded_series[m] = series - np.expand_dims(series.mean(axis=axis), axis=axis)
ps_tilde = np.fft.fft(padded_series, axis=axis)
acf = np.real(np.fft.ifft(ps_tilde*np.conj(ps_tilde), axis=axis))[m]
m[axis] = 0
shape[axis] = 1
acf /= acf[m].reshape(shape).repeat(n0, axis)
return acf
def autocorrelation_length_estimate(series, acf=None, M=5, axis=0):
r"""Returns an estimate of the autocorrelation length of the given
series:
.. math::
L = \int_{-\infty}^\infty \rho(t) dt
The estimate is the smallest :math:`L` such that
.. math::
L = \rho(0) + 2 \sum_{j = 1}^{M L} \rho(j)
In words: the ACL is estimated over a window that is at least
:math:`M` ACLs long, with the constraint that :math:`ML < N/2`.
Defined in this way, the ACL gives the reduction factor between
the number of samples and the "effective" number of samples. In
particular, the variance of the estimated mean of the series is
given by
.. math::
\left\langle \left( \frac{1}{N} \sum_{i=0}^{N-1} x_i - \mu
\right)^2 \right\rangle = \frac{\left\langle \left(x_i -
\mu\right)^2 \right\rangle}{N/L}
Returns ``nan`` if there is no such estimate possible (because
the series is too short to fit :math:`2M` ACLs).
For an N-dimensional array, returns an array of ACLs of the same
shape as ``series``, but with the dimension along ``axis``
removed.
"""
if acf is None:
acf = autocorrelation_function(series, axis=axis)
m = [slice(None)] * len(acf.shape)
nmax = acf.shape[axis]/2
# Generate ACL candidates.
m[axis] = slice(0, nmax)
acl_ests = 2.0*np.cumsum(acf[m], axis=axis) - 1.0
# Build array of lags (like arange, but N-dimensional).
shape = acf.shape[:axis] + (nmax,) + acf.shape[axis+1:]
lags = np.cumsum(np.ones(shape), axis=axis) - 1.0
# Mask out unwanted lags and set corresponding ACLs to nan.
mask = M*acl_ests >= lags
acl_ests[mask] = np.nan
i =
|
ma.masked_greater(mask, lags, copy=False)
|
numpy.ma.masked_greater
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.